xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision 88ca0c738c4159ce87893782b6dd964b5aa01f6e)
1ac8a861fSMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
2ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
3ac8a861fSMichal Kubiak 
4ac8a861fSMichal Kubiak #include "idpf.h"
5705457e7SMichal Kubiak #include "idpf_virtchnl.h"
6ac8a861fSMichal Kubiak #include "xdp.h"
7ac8a861fSMichal Kubiak 
8ac8a861fSMichal Kubiak static int idpf_rxq_for_each(const struct idpf_vport *vport,
9ac8a861fSMichal Kubiak 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
10ac8a861fSMichal Kubiak 			     void *arg)
11ac8a861fSMichal Kubiak {
12ac8a861fSMichal Kubiak 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
13ac8a861fSMichal Kubiak 
14ac8a861fSMichal Kubiak 	if (!vport->rxq_grps)
15ac8a861fSMichal Kubiak 		return -ENETDOWN;
16ac8a861fSMichal Kubiak 
17ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
18ac8a861fSMichal Kubiak 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
19ac8a861fSMichal Kubiak 		u32 num_rxq;
20ac8a861fSMichal Kubiak 
21ac8a861fSMichal Kubiak 		if (splitq)
22ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
23ac8a861fSMichal Kubiak 		else
24ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->singleq.num_rxq;
25ac8a861fSMichal Kubiak 
26ac8a861fSMichal Kubiak 		for (u32 j = 0; j < num_rxq; j++) {
27ac8a861fSMichal Kubiak 			struct idpf_rx_queue *q;
28ac8a861fSMichal Kubiak 			int err;
29ac8a861fSMichal Kubiak 
30ac8a861fSMichal Kubiak 			if (splitq)
31ac8a861fSMichal Kubiak 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
32ac8a861fSMichal Kubiak 			else
33ac8a861fSMichal Kubiak 				q = rx_qgrp->singleq.rxqs[j];
34ac8a861fSMichal Kubiak 
35ac8a861fSMichal Kubiak 			err = fn(q, arg);
36ac8a861fSMichal Kubiak 			if (err)
37ac8a861fSMichal Kubiak 				return err;
38ac8a861fSMichal Kubiak 		}
39ac8a861fSMichal Kubiak 	}
40ac8a861fSMichal Kubiak 
41ac8a861fSMichal Kubiak 	return 0;
42ac8a861fSMichal Kubiak }
43ac8a861fSMichal Kubiak 
44ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
45ac8a861fSMichal Kubiak {
46ac8a861fSMichal Kubiak 	const struct idpf_vport *vport = rxq->q_vector->vport;
47ac8a861fSMichal Kubiak 	bool split = idpf_is_queue_model_split(vport->rxq_model);
48ac8a861fSMichal Kubiak 	const struct page_pool *pp;
49ac8a861fSMichal Kubiak 	int err;
50ac8a861fSMichal Kubiak 
51ac8a861fSMichal Kubiak 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
52ac8a861fSMichal Kubiak 				 rxq->q_vector->napi.napi_id,
53ac8a861fSMichal Kubiak 				 rxq->rx_buf_size);
54ac8a861fSMichal Kubiak 	if (err)
55ac8a861fSMichal Kubiak 		return err;
56ac8a861fSMichal Kubiak 
57ac8a861fSMichal Kubiak 	pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
58ac8a861fSMichal Kubiak 	xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
59ac8a861fSMichal Kubiak 
60ac8a861fSMichal Kubiak 	if (!split)
61ac8a861fSMichal Kubiak 		return 0;
62ac8a861fSMichal Kubiak 
63ac8a861fSMichal Kubiak 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
64ac8a861fSMichal Kubiak 	rxq->num_xdp_txq = vport->num_xdp_txq;
65ac8a861fSMichal Kubiak 
66ac8a861fSMichal Kubiak 	return 0;
67ac8a861fSMichal Kubiak }
68ac8a861fSMichal Kubiak 
69ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
70ac8a861fSMichal Kubiak {
71ac8a861fSMichal Kubiak 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
72ac8a861fSMichal Kubiak }
73ac8a861fSMichal Kubiak 
74ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
75ac8a861fSMichal Kubiak {
76ac8a861fSMichal Kubiak 	if (idpf_is_queue_model_split((size_t)arg)) {
77ac8a861fSMichal Kubiak 		rxq->xdpsqs = NULL;
78ac8a861fSMichal Kubiak 		rxq->num_xdp_txq = 0;
79ac8a861fSMichal Kubiak 	}
80ac8a861fSMichal Kubiak 
81ac8a861fSMichal Kubiak 	xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
82ac8a861fSMichal Kubiak 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
83ac8a861fSMichal Kubiak 
84ac8a861fSMichal Kubiak 	return 0;
85ac8a861fSMichal Kubiak }
86ac8a861fSMichal Kubiak 
87ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
88ac8a861fSMichal Kubiak {
89ac8a861fSMichal Kubiak 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
90ac8a861fSMichal Kubiak 			  (void *)(size_t)vport->rxq_model);
91ac8a861fSMichal Kubiak }
92ac8a861fSMichal Kubiak 
93705457e7SMichal Kubiak static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
94705457e7SMichal Kubiak {
95705457e7SMichal Kubiak 	struct bpf_prog *prog = arg;
96705457e7SMichal Kubiak 	struct bpf_prog *old;
97705457e7SMichal Kubiak 
98705457e7SMichal Kubiak 	if (prog)
99705457e7SMichal Kubiak 		bpf_prog_inc(prog);
100705457e7SMichal Kubiak 
101705457e7SMichal Kubiak 	old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
102705457e7SMichal Kubiak 	if (old)
103705457e7SMichal Kubiak 		bpf_prog_put(old);
104705457e7SMichal Kubiak 
105705457e7SMichal Kubiak 	return 0;
106705457e7SMichal Kubiak }
107705457e7SMichal Kubiak 
108705457e7SMichal Kubiak void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
109705457e7SMichal Kubiak 			       struct bpf_prog *xdp_prog)
110705457e7SMichal Kubiak {
111705457e7SMichal Kubiak 	idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
112705457e7SMichal Kubiak }
113705457e7SMichal Kubiak 
114cba102cdSAlexander Lobakin static void idpf_xdp_tx_timer(struct work_struct *work);
115cba102cdSAlexander Lobakin 
116ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport)
117ac8a861fSMichal Kubiak {
118ac8a861fSMichal Kubiak 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
119ac8a861fSMichal Kubiak 	struct net_device *dev;
120ac8a861fSMichal Kubiak 	u32 sqs;
121ac8a861fSMichal Kubiak 
122ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
123ac8a861fSMichal Kubiak 		return 0;
124ac8a861fSMichal Kubiak 
125ac8a861fSMichal Kubiak 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
126ac8a861fSMichal Kubiak 	if (!timers)
127ac8a861fSMichal Kubiak 		return -ENOMEM;
128ac8a861fSMichal Kubiak 
129ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
130ac8a861fSMichal Kubiak 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
131ac8a861fSMichal Kubiak 					 cpu_to_mem(i));
132ac8a861fSMichal Kubiak 		if (!timers[i]) {
133ac8a861fSMichal Kubiak 			for (int j = i - 1; j >= 0; j--)
134ac8a861fSMichal Kubiak 				kfree(timers[j]);
135ac8a861fSMichal Kubiak 
136ac8a861fSMichal Kubiak 			return -ENOMEM;
137ac8a861fSMichal Kubiak 		}
138ac8a861fSMichal Kubiak 	}
139ac8a861fSMichal Kubiak 
140ac8a861fSMichal Kubiak 	dev = vport->netdev;
141ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
142ac8a861fSMichal Kubiak 
143ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
144ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
145ac8a861fSMichal Kubiak 
146ac8a861fSMichal Kubiak 		xdpsq->complq = xdpsq->txq_grp->complq;
147ac8a861fSMichal Kubiak 		kfree(xdpsq->refillq);
148ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
149ac8a861fSMichal Kubiak 
150ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
151ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
152ac8a861fSMichal Kubiak 		idpf_queue_set(NOIRQ, xdpsq);
153ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq);
154ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq->complq);
155ac8a861fSMichal Kubiak 
156ac8a861fSMichal Kubiak 		xdpsq->timer = timers[i - sqs];
157ac8a861fSMichal Kubiak 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
158cba102cdSAlexander Lobakin 		libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
159cba102cdSAlexander Lobakin 					idpf_xdp_tx_timer);
160ac8a861fSMichal Kubiak 
161ac8a861fSMichal Kubiak 		xdpsq->pending = 0;
162ac8a861fSMichal Kubiak 		xdpsq->xdp_tx = 0;
163ac8a861fSMichal Kubiak 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
164ac8a861fSMichal Kubiak 	}
165ac8a861fSMichal Kubiak 
166ac8a861fSMichal Kubiak 	return 0;
167ac8a861fSMichal Kubiak }
168ac8a861fSMichal Kubiak 
169ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport)
170ac8a861fSMichal Kubiak {
171ac8a861fSMichal Kubiak 	struct net_device *dev;
172ac8a861fSMichal Kubiak 	u32 sqs;
173ac8a861fSMichal Kubiak 
174ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
175ac8a861fSMichal Kubiak 		return;
176ac8a861fSMichal Kubiak 
177ac8a861fSMichal Kubiak 	dev = vport->netdev;
178ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
179ac8a861fSMichal Kubiak 
180ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
181ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
182ac8a861fSMichal Kubiak 
183ac8a861fSMichal Kubiak 		if (!idpf_queue_has_clear(XDP, xdpsq))
184ac8a861fSMichal Kubiak 			continue;
185ac8a861fSMichal Kubiak 
186cba102cdSAlexander Lobakin 		libeth_xdpsq_deinit_timer(xdpsq->timer);
187ac8a861fSMichal Kubiak 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
188ac8a861fSMichal Kubiak 
189ac8a861fSMichal Kubiak 		kfree(xdpsq->timer);
190ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
191ac8a861fSMichal Kubiak 		idpf_queue_clear(NOIRQ, xdpsq);
192ac8a861fSMichal Kubiak 	}
193ac8a861fSMichal Kubiak }
194705457e7SMichal Kubiak 
195cba102cdSAlexander Lobakin static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
196cba102cdSAlexander Lobakin 			      bool gen)
197cba102cdSAlexander Lobakin {
198cba102cdSAlexander Lobakin 	u32 val;
199cba102cdSAlexander Lobakin 
200cba102cdSAlexander Lobakin #ifdef __LIBETH_WORD_ACCESS
201cba102cdSAlexander Lobakin 	val = *(const u32 *)desc;
202cba102cdSAlexander Lobakin #else
203cba102cdSAlexander Lobakin 	val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
204cba102cdSAlexander Lobakin 	      le16_to_cpu(desc->qid_comptype_gen);
205cba102cdSAlexander Lobakin #endif
206cba102cdSAlexander Lobakin 	if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
207cba102cdSAlexander Lobakin 		return -ENODATA;
208cba102cdSAlexander Lobakin 
209cba102cdSAlexander Lobakin 	if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
210cba102cdSAlexander Lobakin 		     FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
211cba102cdSAlexander Lobakin 				IDPF_TXD_COMPLT_RS)))
212cba102cdSAlexander Lobakin 		return -EINVAL;
213cba102cdSAlexander Lobakin 
214cba102cdSAlexander Lobakin 	return upper_16_bits(val);
215cba102cdSAlexander Lobakin }
216cba102cdSAlexander Lobakin 
217cba102cdSAlexander Lobakin static u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
218cba102cdSAlexander Lobakin {
219cba102cdSAlexander Lobakin 	struct idpf_compl_queue *cq = xdpsq->complq;
220cba102cdSAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
221cba102cdSAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
222cba102cdSAlexander Lobakin 	u32 ntc = cq->next_to_clean;
223cba102cdSAlexander Lobakin 	u32 cnt = cq->desc_count;
224cba102cdSAlexander Lobakin 	u32 done_frames;
225cba102cdSAlexander Lobakin 	bool gen;
226cba102cdSAlexander Lobakin 
227cba102cdSAlexander Lobakin 	gen = idpf_queue_has(GEN_CHK, cq);
228cba102cdSAlexander Lobakin 
229cba102cdSAlexander Lobakin 	for (done_frames = 0; done_frames < budget; ) {
230cba102cdSAlexander Lobakin 		int ret;
231cba102cdSAlexander Lobakin 
232cba102cdSAlexander Lobakin 		ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
233cba102cdSAlexander Lobakin 		if (ret >= 0) {
234cba102cdSAlexander Lobakin 			done_frames = ret > tx_ntc ? ret - tx_ntc :
235cba102cdSAlexander Lobakin 						     ret + tx_cnt - tx_ntc;
236cba102cdSAlexander Lobakin 			goto next;
237cba102cdSAlexander Lobakin 		}
238cba102cdSAlexander Lobakin 
239cba102cdSAlexander Lobakin 		switch (ret) {
240cba102cdSAlexander Lobakin 		case -ENODATA:
241cba102cdSAlexander Lobakin 			goto out;
242cba102cdSAlexander Lobakin 		case -EINVAL:
243cba102cdSAlexander Lobakin 			break;
244cba102cdSAlexander Lobakin 		}
245cba102cdSAlexander Lobakin 
246cba102cdSAlexander Lobakin next:
247cba102cdSAlexander Lobakin 		if (unlikely(++ntc == cnt)) {
248cba102cdSAlexander Lobakin 			ntc = 0;
249cba102cdSAlexander Lobakin 			gen = !gen;
250cba102cdSAlexander Lobakin 			idpf_queue_change(GEN_CHK, cq);
251cba102cdSAlexander Lobakin 		}
252cba102cdSAlexander Lobakin 	}
253cba102cdSAlexander Lobakin 
254cba102cdSAlexander Lobakin out:
255cba102cdSAlexander Lobakin 	cq->next_to_clean = ntc;
256cba102cdSAlexander Lobakin 
257cba102cdSAlexander Lobakin 	return done_frames;
258cba102cdSAlexander Lobakin }
259cba102cdSAlexander Lobakin 
260cba102cdSAlexander Lobakin static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
261cba102cdSAlexander Lobakin {
262cba102cdSAlexander Lobakin 	struct libeth_xdpsq_napi_stats ss = { };
263cba102cdSAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
264cba102cdSAlexander Lobakin 	u32 tx_ntc = xdpsq->next_to_clean;
265cba102cdSAlexander Lobakin 	u32 tx_cnt = xdpsq->desc_count;
266cba102cdSAlexander Lobakin 	struct xdp_frame_bulk bq;
267cba102cdSAlexander Lobakin 	struct libeth_cq_pp cp = {
268cba102cdSAlexander Lobakin 		.dev	= xdpsq->dev,
269cba102cdSAlexander Lobakin 		.bq	= &bq,
270cba102cdSAlexander Lobakin 		.xss	= &ss,
271cba102cdSAlexander Lobakin 		.napi	= true,
272cba102cdSAlexander Lobakin 	};
273cba102cdSAlexander Lobakin 	u32 done_frames;
274cba102cdSAlexander Lobakin 
275cba102cdSAlexander Lobakin 	done_frames = idpf_xdpsq_poll(xdpsq, budget);
276cba102cdSAlexander Lobakin 	if (unlikely(!done_frames))
277cba102cdSAlexander Lobakin 		return 0;
278cba102cdSAlexander Lobakin 
279cba102cdSAlexander Lobakin 	xdp_frame_bulk_init(&bq);
280cba102cdSAlexander Lobakin 
281cba102cdSAlexander Lobakin 	for (u32 i = 0; likely(i < done_frames); i++) {
282cba102cdSAlexander Lobakin 		libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
283cba102cdSAlexander Lobakin 
284cba102cdSAlexander Lobakin 		if (unlikely(++tx_ntc == tx_cnt))
285cba102cdSAlexander Lobakin 			tx_ntc = 0;
286cba102cdSAlexander Lobakin 	}
287cba102cdSAlexander Lobakin 
288cba102cdSAlexander Lobakin 	xdp_flush_frame_bulk(&bq);
289cba102cdSAlexander Lobakin 
290cba102cdSAlexander Lobakin 	xdpsq->next_to_clean = tx_ntc;
291cba102cdSAlexander Lobakin 	xdpsq->pending -= done_frames;
292cba102cdSAlexander Lobakin 	xdpsq->xdp_tx -= cp.xdp_tx;
293cba102cdSAlexander Lobakin 
294cba102cdSAlexander Lobakin 	return done_frames;
295cba102cdSAlexander Lobakin }
296cba102cdSAlexander Lobakin 
297cba102cdSAlexander Lobakin static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
298cba102cdSAlexander Lobakin {
299cba102cdSAlexander Lobakin 	struct idpf_tx_queue *xdpsq = _xdpsq;
300cba102cdSAlexander Lobakin 	u32 free;
301cba102cdSAlexander Lobakin 
302cba102cdSAlexander Lobakin 	libeth_xdpsq_lock(&xdpsq->xdp_lock);
303cba102cdSAlexander Lobakin 
304cba102cdSAlexander Lobakin 	free = xdpsq->desc_count - xdpsq->pending;
305cba102cdSAlexander Lobakin 	if (free < xdpsq->thresh)
306cba102cdSAlexander Lobakin 		free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
307cba102cdSAlexander Lobakin 
308cba102cdSAlexander Lobakin 	*sq = (struct libeth_xdpsq){
309cba102cdSAlexander Lobakin 		.sqes		= xdpsq->tx_buf,
310cba102cdSAlexander Lobakin 		.descs		= xdpsq->desc_ring,
311cba102cdSAlexander Lobakin 		.count		= xdpsq->desc_count,
312cba102cdSAlexander Lobakin 		.lock		= &xdpsq->xdp_lock,
313cba102cdSAlexander Lobakin 		.ntu		= &xdpsq->next_to_use,
314cba102cdSAlexander Lobakin 		.pending	= &xdpsq->pending,
315cba102cdSAlexander Lobakin 		.xdp_tx		= &xdpsq->xdp_tx,
316cba102cdSAlexander Lobakin 	};
317cba102cdSAlexander Lobakin 
318cba102cdSAlexander Lobakin 	return free;
319cba102cdSAlexander Lobakin }
320cba102cdSAlexander Lobakin 
321cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_START();
322cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
323cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
324cba102cdSAlexander Lobakin 			   idpf_xdp_tx_xmit);
325aaa3ac64SAlexander Lobakin LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
326aaa3ac64SAlexander Lobakin 			     idpf_xdp_tx_xmit);
327cba102cdSAlexander Lobakin LIBETH_XDP_DEFINE_END();
328cba102cdSAlexander Lobakin 
329aaa3ac64SAlexander Lobakin int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
330aaa3ac64SAlexander Lobakin 		  u32 flags)
331aaa3ac64SAlexander Lobakin {
332aaa3ac64SAlexander Lobakin 	const struct idpf_netdev_priv *np = netdev_priv(dev);
333aaa3ac64SAlexander Lobakin 	const struct idpf_vport *vport = np->vport;
334aaa3ac64SAlexander Lobakin 
335aaa3ac64SAlexander Lobakin 	if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
336aaa3ac64SAlexander Lobakin 		return -ENETDOWN;
337aaa3ac64SAlexander Lobakin 
338aaa3ac64SAlexander Lobakin 	return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
339aaa3ac64SAlexander Lobakin 				       &vport->txqs[vport->xdp_txq_offset],
340aaa3ac64SAlexander Lobakin 				       vport->num_xdp_txq,
341aaa3ac64SAlexander Lobakin 				       idpf_xdp_xmit_flush_bulk,
342aaa3ac64SAlexander Lobakin 				       idpf_xdp_tx_finalize);
343aaa3ac64SAlexander Lobakin }
344aaa3ac64SAlexander Lobakin 
345*88ca0c73SAlexander Lobakin static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
346*88ca0c73SAlexander Lobakin 			      enum xdp_rss_hash_type *rss_type)
347*88ca0c73SAlexander Lobakin {
348*88ca0c73SAlexander Lobakin 	const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
349*88ca0c73SAlexander Lobakin 	struct idpf_xdp_rx_desc desc __uninitialized;
350*88ca0c73SAlexander Lobakin 	const struct idpf_rx_queue *rxq;
351*88ca0c73SAlexander Lobakin 	struct libeth_rx_pt pt;
352*88ca0c73SAlexander Lobakin 
353*88ca0c73SAlexander Lobakin 	rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
354*88ca0c73SAlexander Lobakin 
355*88ca0c73SAlexander Lobakin 	idpf_xdp_get_qw0(&desc, xdp->desc);
356*88ca0c73SAlexander Lobakin 
357*88ca0c73SAlexander Lobakin 	pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
358*88ca0c73SAlexander Lobakin 	if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
359*88ca0c73SAlexander Lobakin 		return -ENODATA;
360*88ca0c73SAlexander Lobakin 
361*88ca0c73SAlexander Lobakin 	idpf_xdp_get_qw2(&desc, xdp->desc);
362*88ca0c73SAlexander Lobakin 
363*88ca0c73SAlexander Lobakin 	return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
364*88ca0c73SAlexander Lobakin 				    pt);
365*88ca0c73SAlexander Lobakin }
366*88ca0c73SAlexander Lobakin 
367*88ca0c73SAlexander Lobakin static const struct xdp_metadata_ops idpf_xdpmo = {
368*88ca0c73SAlexander Lobakin 	.xmo_rx_hash		= idpf_xdpmo_rx_hash,
369*88ca0c73SAlexander Lobakin };
370*88ca0c73SAlexander Lobakin 
371cba102cdSAlexander Lobakin void idpf_xdp_set_features(const struct idpf_vport *vport)
372cba102cdSAlexander Lobakin {
373cba102cdSAlexander Lobakin 	if (!idpf_is_queue_model_split(vport->rxq_model))
374cba102cdSAlexander Lobakin 		return;
375cba102cdSAlexander Lobakin 
376*88ca0c73SAlexander Lobakin 	libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo);
377cba102cdSAlexander Lobakin }
378cba102cdSAlexander Lobakin 
379705457e7SMichal Kubiak static int idpf_xdp_setup_prog(struct idpf_vport *vport,
380705457e7SMichal Kubiak 			       const struct netdev_bpf *xdp)
381705457e7SMichal Kubiak {
382705457e7SMichal Kubiak 	const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
383705457e7SMichal Kubiak 	struct bpf_prog *old, *prog = xdp->prog;
384705457e7SMichal Kubiak 	struct idpf_vport_config *cfg;
385705457e7SMichal Kubiak 	int ret;
386705457e7SMichal Kubiak 
387705457e7SMichal Kubiak 	cfg = vport->adapter->vport_config[vport->idx];
388705457e7SMichal Kubiak 
389705457e7SMichal Kubiak 	if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
390705457e7SMichal Kubiak 	    !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
391705457e7SMichal Kubiak 	    !!vport->xdp_prog == !!prog) {
392705457e7SMichal Kubiak 		if (np->state == __IDPF_VPORT_UP)
393705457e7SMichal Kubiak 			idpf_xdp_copy_prog_to_rqs(vport, prog);
394705457e7SMichal Kubiak 
395705457e7SMichal Kubiak 		old = xchg(&vport->xdp_prog, prog);
396705457e7SMichal Kubiak 		if (old)
397705457e7SMichal Kubiak 			bpf_prog_put(old);
398705457e7SMichal Kubiak 
399705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = prog;
400705457e7SMichal Kubiak 
401705457e7SMichal Kubiak 		return 0;
402705457e7SMichal Kubiak 	}
403705457e7SMichal Kubiak 
404705457e7SMichal Kubiak 	if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
405705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
406705457e7SMichal Kubiak 				   "No Tx queues available for XDP, please decrease the number of regular SQs");
407705457e7SMichal Kubiak 		return -ENOSPC;
408705457e7SMichal Kubiak 	}
409705457e7SMichal Kubiak 
410705457e7SMichal Kubiak 	old = cfg->user_config.xdp_prog;
411705457e7SMichal Kubiak 	cfg->user_config.xdp_prog = prog;
412705457e7SMichal Kubiak 
413705457e7SMichal Kubiak 	ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
414705457e7SMichal Kubiak 	if (ret) {
415705457e7SMichal Kubiak 		NL_SET_ERR_MSG_MOD(xdp->extack,
416705457e7SMichal Kubiak 				   "Could not reopen the vport after XDP setup");
417705457e7SMichal Kubiak 
418705457e7SMichal Kubiak 		cfg->user_config.xdp_prog = old;
419705457e7SMichal Kubiak 		old = prog;
420705457e7SMichal Kubiak 	}
421705457e7SMichal Kubiak 
422705457e7SMichal Kubiak 	if (old)
423705457e7SMichal Kubiak 		bpf_prog_put(old);
424705457e7SMichal Kubiak 
425aaa3ac64SAlexander Lobakin 	libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
426aaa3ac64SAlexander Lobakin 
427705457e7SMichal Kubiak 	return ret;
428705457e7SMichal Kubiak }
429705457e7SMichal Kubiak 
430705457e7SMichal Kubiak int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
431705457e7SMichal Kubiak {
432705457e7SMichal Kubiak 	struct idpf_vport *vport;
433705457e7SMichal Kubiak 	int ret;
434705457e7SMichal Kubiak 
435705457e7SMichal Kubiak 	idpf_vport_ctrl_lock(dev);
436705457e7SMichal Kubiak 	vport = idpf_netdev_to_vport(dev);
437705457e7SMichal Kubiak 
438705457e7SMichal Kubiak 	if (!idpf_is_queue_model_split(vport->txq_model))
439705457e7SMichal Kubiak 		goto notsupp;
440705457e7SMichal Kubiak 
441705457e7SMichal Kubiak 	switch (xdp->command) {
442705457e7SMichal Kubiak 	case XDP_SETUP_PROG:
443705457e7SMichal Kubiak 		ret = idpf_xdp_setup_prog(vport, xdp);
444705457e7SMichal Kubiak 		break;
445705457e7SMichal Kubiak 	default:
446705457e7SMichal Kubiak notsupp:
447705457e7SMichal Kubiak 		ret = -EOPNOTSUPP;
448705457e7SMichal Kubiak 		break;
449705457e7SMichal Kubiak 	}
450705457e7SMichal Kubiak 
451705457e7SMichal Kubiak 	idpf_vport_ctrl_unlock(dev);
452705457e7SMichal Kubiak 
453705457e7SMichal Kubiak 	return ret;
454705457e7SMichal Kubiak }
455