xref: /linux/drivers/net/ethernet/intel/idpf/xdp.c (revision ac8a861f632e68e669ba8fb28645fd118f19a7ab)
1*ac8a861fSMichal Kubiak // SPDX-License-Identifier: GPL-2.0-only
2*ac8a861fSMichal Kubiak /* Copyright (C) 2025 Intel Corporation */
3*ac8a861fSMichal Kubiak 
4*ac8a861fSMichal Kubiak #include <net/libeth/xdp.h>
5*ac8a861fSMichal Kubiak 
6*ac8a861fSMichal Kubiak #include "idpf.h"
7*ac8a861fSMichal Kubiak #include "xdp.h"
8*ac8a861fSMichal Kubiak 
9*ac8a861fSMichal Kubiak static int idpf_rxq_for_each(const struct idpf_vport *vport,
10*ac8a861fSMichal Kubiak 			     int (*fn)(struct idpf_rx_queue *rxq, void *arg),
11*ac8a861fSMichal Kubiak 			     void *arg)
12*ac8a861fSMichal Kubiak {
13*ac8a861fSMichal Kubiak 	bool splitq = idpf_is_queue_model_split(vport->rxq_model);
14*ac8a861fSMichal Kubiak 
15*ac8a861fSMichal Kubiak 	if (!vport->rxq_grps)
16*ac8a861fSMichal Kubiak 		return -ENETDOWN;
17*ac8a861fSMichal Kubiak 
18*ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_rxq_grp; i++) {
19*ac8a861fSMichal Kubiak 		const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
20*ac8a861fSMichal Kubiak 		u32 num_rxq;
21*ac8a861fSMichal Kubiak 
22*ac8a861fSMichal Kubiak 		if (splitq)
23*ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
24*ac8a861fSMichal Kubiak 		else
25*ac8a861fSMichal Kubiak 			num_rxq = rx_qgrp->singleq.num_rxq;
26*ac8a861fSMichal Kubiak 
27*ac8a861fSMichal Kubiak 		for (u32 j = 0; j < num_rxq; j++) {
28*ac8a861fSMichal Kubiak 			struct idpf_rx_queue *q;
29*ac8a861fSMichal Kubiak 			int err;
30*ac8a861fSMichal Kubiak 
31*ac8a861fSMichal Kubiak 			if (splitq)
32*ac8a861fSMichal Kubiak 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
33*ac8a861fSMichal Kubiak 			else
34*ac8a861fSMichal Kubiak 				q = rx_qgrp->singleq.rxqs[j];
35*ac8a861fSMichal Kubiak 
36*ac8a861fSMichal Kubiak 			err = fn(q, arg);
37*ac8a861fSMichal Kubiak 			if (err)
38*ac8a861fSMichal Kubiak 				return err;
39*ac8a861fSMichal Kubiak 		}
40*ac8a861fSMichal Kubiak 	}
41*ac8a861fSMichal Kubiak 
42*ac8a861fSMichal Kubiak 	return 0;
43*ac8a861fSMichal Kubiak }
44*ac8a861fSMichal Kubiak 
45*ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
46*ac8a861fSMichal Kubiak {
47*ac8a861fSMichal Kubiak 	const struct idpf_vport *vport = rxq->q_vector->vport;
48*ac8a861fSMichal Kubiak 	bool split = idpf_is_queue_model_split(vport->rxq_model);
49*ac8a861fSMichal Kubiak 	const struct page_pool *pp;
50*ac8a861fSMichal Kubiak 	int err;
51*ac8a861fSMichal Kubiak 
52*ac8a861fSMichal Kubiak 	err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
53*ac8a861fSMichal Kubiak 				 rxq->q_vector->napi.napi_id,
54*ac8a861fSMichal Kubiak 				 rxq->rx_buf_size);
55*ac8a861fSMichal Kubiak 	if (err)
56*ac8a861fSMichal Kubiak 		return err;
57*ac8a861fSMichal Kubiak 
58*ac8a861fSMichal Kubiak 	pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
59*ac8a861fSMichal Kubiak 	xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
60*ac8a861fSMichal Kubiak 
61*ac8a861fSMichal Kubiak 	if (!split)
62*ac8a861fSMichal Kubiak 		return 0;
63*ac8a861fSMichal Kubiak 
64*ac8a861fSMichal Kubiak 	rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
65*ac8a861fSMichal Kubiak 	rxq->num_xdp_txq = vport->num_xdp_txq;
66*ac8a861fSMichal Kubiak 
67*ac8a861fSMichal Kubiak 	return 0;
68*ac8a861fSMichal Kubiak }
69*ac8a861fSMichal Kubiak 
70*ac8a861fSMichal Kubiak int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
71*ac8a861fSMichal Kubiak {
72*ac8a861fSMichal Kubiak 	return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
73*ac8a861fSMichal Kubiak }
74*ac8a861fSMichal Kubiak 
75*ac8a861fSMichal Kubiak static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
76*ac8a861fSMichal Kubiak {
77*ac8a861fSMichal Kubiak 	if (idpf_is_queue_model_split((size_t)arg)) {
78*ac8a861fSMichal Kubiak 		rxq->xdpsqs = NULL;
79*ac8a861fSMichal Kubiak 		rxq->num_xdp_txq = 0;
80*ac8a861fSMichal Kubiak 	}
81*ac8a861fSMichal Kubiak 
82*ac8a861fSMichal Kubiak 	xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
83*ac8a861fSMichal Kubiak 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
84*ac8a861fSMichal Kubiak 
85*ac8a861fSMichal Kubiak 	return 0;
86*ac8a861fSMichal Kubiak }
87*ac8a861fSMichal Kubiak 
88*ac8a861fSMichal Kubiak void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
89*ac8a861fSMichal Kubiak {
90*ac8a861fSMichal Kubiak 	idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
91*ac8a861fSMichal Kubiak 			  (void *)(size_t)vport->rxq_model);
92*ac8a861fSMichal Kubiak }
93*ac8a861fSMichal Kubiak 
94*ac8a861fSMichal Kubiak int idpf_xdpsqs_get(const struct idpf_vport *vport)
95*ac8a861fSMichal Kubiak {
96*ac8a861fSMichal Kubiak 	struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
97*ac8a861fSMichal Kubiak 	struct net_device *dev;
98*ac8a861fSMichal Kubiak 	u32 sqs;
99*ac8a861fSMichal Kubiak 
100*ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
101*ac8a861fSMichal Kubiak 		return 0;
102*ac8a861fSMichal Kubiak 
103*ac8a861fSMichal Kubiak 	timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
104*ac8a861fSMichal Kubiak 	if (!timers)
105*ac8a861fSMichal Kubiak 		return -ENOMEM;
106*ac8a861fSMichal Kubiak 
107*ac8a861fSMichal Kubiak 	for (u32 i = 0; i < vport->num_xdp_txq; i++) {
108*ac8a861fSMichal Kubiak 		timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
109*ac8a861fSMichal Kubiak 					 cpu_to_mem(i));
110*ac8a861fSMichal Kubiak 		if (!timers[i]) {
111*ac8a861fSMichal Kubiak 			for (int j = i - 1; j >= 0; j--)
112*ac8a861fSMichal Kubiak 				kfree(timers[j]);
113*ac8a861fSMichal Kubiak 
114*ac8a861fSMichal Kubiak 			return -ENOMEM;
115*ac8a861fSMichal Kubiak 		}
116*ac8a861fSMichal Kubiak 	}
117*ac8a861fSMichal Kubiak 
118*ac8a861fSMichal Kubiak 	dev = vport->netdev;
119*ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
120*ac8a861fSMichal Kubiak 
121*ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
122*ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
123*ac8a861fSMichal Kubiak 
124*ac8a861fSMichal Kubiak 		xdpsq->complq = xdpsq->txq_grp->complq;
125*ac8a861fSMichal Kubiak 		kfree(xdpsq->refillq);
126*ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
127*ac8a861fSMichal Kubiak 
128*ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq);
129*ac8a861fSMichal Kubiak 		idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
130*ac8a861fSMichal Kubiak 		idpf_queue_set(NOIRQ, xdpsq);
131*ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq);
132*ac8a861fSMichal Kubiak 		idpf_queue_set(XDP, xdpsq->complq);
133*ac8a861fSMichal Kubiak 
134*ac8a861fSMichal Kubiak 		xdpsq->timer = timers[i - sqs];
135*ac8a861fSMichal Kubiak 		libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
136*ac8a861fSMichal Kubiak 
137*ac8a861fSMichal Kubiak 		xdpsq->pending = 0;
138*ac8a861fSMichal Kubiak 		xdpsq->xdp_tx = 0;
139*ac8a861fSMichal Kubiak 		xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
140*ac8a861fSMichal Kubiak 	}
141*ac8a861fSMichal Kubiak 
142*ac8a861fSMichal Kubiak 	return 0;
143*ac8a861fSMichal Kubiak }
144*ac8a861fSMichal Kubiak 
145*ac8a861fSMichal Kubiak void idpf_xdpsqs_put(const struct idpf_vport *vport)
146*ac8a861fSMichal Kubiak {
147*ac8a861fSMichal Kubiak 	struct net_device *dev;
148*ac8a861fSMichal Kubiak 	u32 sqs;
149*ac8a861fSMichal Kubiak 
150*ac8a861fSMichal Kubiak 	if (!idpf_xdp_enabled(vport))
151*ac8a861fSMichal Kubiak 		return;
152*ac8a861fSMichal Kubiak 
153*ac8a861fSMichal Kubiak 	dev = vport->netdev;
154*ac8a861fSMichal Kubiak 	sqs = vport->xdp_txq_offset;
155*ac8a861fSMichal Kubiak 
156*ac8a861fSMichal Kubiak 	for (u32 i = sqs; i < vport->num_txq; i++) {
157*ac8a861fSMichal Kubiak 		struct idpf_tx_queue *xdpsq = vport->txqs[i];
158*ac8a861fSMichal Kubiak 
159*ac8a861fSMichal Kubiak 		if (!idpf_queue_has_clear(XDP, xdpsq))
160*ac8a861fSMichal Kubiak 			continue;
161*ac8a861fSMichal Kubiak 
162*ac8a861fSMichal Kubiak 		libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
163*ac8a861fSMichal Kubiak 
164*ac8a861fSMichal Kubiak 		kfree(xdpsq->timer);
165*ac8a861fSMichal Kubiak 		xdpsq->refillq = NULL;
166*ac8a861fSMichal Kubiak 		idpf_queue_clear(NOIRQ, xdpsq);
167*ac8a861fSMichal Kubiak 	}
168*ac8a861fSMichal Kubiak }
169