xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2024 Marvell.
5  *
6  */
7 
8 #include <linux/bpf_trace.h>
9 #include <linux/stringify.h>
10 #include <net/xdp_sock_drv.h>
11 #include <net/xdp.h>
12 
13 #include "otx2_common.h"
14 #include "otx2_struct.h"
15 #include "otx2_xsk.h"
16 
otx2_xsk_pool_alloc_buf(struct otx2_nic * pfvf,struct otx2_pool * pool,dma_addr_t * dma,int idx)17 int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
18 			    dma_addr_t *dma, int idx)
19 {
20 	struct xdp_buff *xdp;
21 	int delta;
22 
23 	xdp = xsk_buff_alloc(pool->xsk_pool);
24 	if (!xdp)
25 		return -ENOMEM;
26 
27 	pool->xdp[pool->xdp_top++] = xdp;
28 	*dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
29 	/* Adjust xdp->data for unaligned addresses */
30 	delta = *dma - xsk_buff_xdp_get_dma(xdp);
31 	xdp->data += delta;
32 
33 	return 0;
34 }
35 
otx2_xsk_ctx_disable(struct otx2_nic * pfvf,u16 qidx,int aura_id)36 static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
37 {
38 	struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
39 	struct npa_aq_enq_req *aura_aq;
40 	struct npa_aq_enq_req *pool_aq;
41 	struct nix_aq_enq_req *rq_aq;
42 
43 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
44 		cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
45 		if (!cn10k_rq_aq)
46 			return -ENOMEM;
47 		cn10k_rq_aq->qidx = qidx;
48 		cn10k_rq_aq->rq.ena = 0;
49 		cn10k_rq_aq->rq_mask.ena = 1;
50 		cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
51 		cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
52 	} else {
53 		rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
54 		if (!rq_aq)
55 			return -ENOMEM;
56 		rq_aq->qidx = qidx;
57 		rq_aq->sq.ena = 0;
58 		rq_aq->sq_mask.ena = 1;
59 		rq_aq->ctype = NIX_AQ_CTYPE_RQ;
60 		rq_aq->op = NIX_AQ_INSTOP_WRITE;
61 	}
62 
63 	aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
64 	if (!aura_aq)
65 		goto fail;
66 
67 	aura_aq->aura_id = aura_id;
68 	aura_aq->aura.ena = 0;
69 	aura_aq->aura_mask.ena = 1;
70 	aura_aq->ctype = NPA_AQ_CTYPE_AURA;
71 	aura_aq->op = NPA_AQ_INSTOP_WRITE;
72 
73 	pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
74 	if (!pool_aq)
75 		goto fail;
76 
77 	pool_aq->aura_id = aura_id;
78 	pool_aq->pool.ena = 0;
79 	pool_aq->pool_mask.ena = 1;
80 
81 	pool_aq->ctype = NPA_AQ_CTYPE_POOL;
82 	pool_aq->op = NPA_AQ_INSTOP_WRITE;
83 
84 	return otx2_sync_mbox_msg(&pfvf->mbox);
85 
86 fail:
87 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
88 	return -ENOMEM;
89 }
90 
otx2_clean_up_rq(struct otx2_nic * pfvf,int qidx)91 static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
92 {
93 	struct otx2_qset *qset = &pfvf->qset;
94 	struct otx2_cq_queue *cq;
95 	struct otx2_pool *pool;
96 	u64 iova;
97 
98 	/* If the DOWN flag is set SQs are already freed */
99 	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
100 		return;
101 
102 	cq = &qset->cq[qidx];
103 	if (cq)
104 		otx2_cleanup_rx_cqes(pfvf, cq, qidx);
105 
106 	pool = &pfvf->qset.pool[qidx];
107 	iova = otx2_aura_allocptr(pfvf, qidx);
108 	while (iova) {
109 		iova -= OTX2_HEAD_ROOM;
110 		otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
111 		iova = otx2_aura_allocptr(pfvf, qidx);
112 	}
113 
114 	mutex_lock(&pfvf->mbox.lock);
115 	otx2_xsk_ctx_disable(pfvf, qidx, qidx);
116 	mutex_unlock(&pfvf->mbox.lock);
117 }
118 
otx2_xsk_pool_enable(struct otx2_nic * pf,struct xsk_buff_pool * pool,u16 qidx)119 int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
120 {
121 	u16 rx_queues = pf->hw.rx_queues;
122 	u16 tx_queues = pf->hw.tx_queues;
123 	int err;
124 
125 	if (qidx >= rx_queues || qidx >= tx_queues)
126 		return -EINVAL;
127 
128 	err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
129 	if (err)
130 		return err;
131 
132 	set_bit(qidx, pf->af_xdp_zc_qidx);
133 	otx2_clean_up_rq(pf, qidx);
134 	/* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
135 	otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
136 	/* Kick start the NAPI context so that receiving will start */
137 	return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
138 }
139 
otx2_xsk_pool_disable(struct otx2_nic * pf,u16 qidx)140 int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
141 {
142 	struct net_device *netdev = pf->netdev;
143 	struct xsk_buff_pool *pool;
144 	struct otx2_snd_queue *sq;
145 
146 	pool = xsk_get_pool_from_qid(netdev, qidx);
147 	if (!pool)
148 		return -EINVAL;
149 
150 	sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
151 	sq->xsk_pool = NULL;
152 	otx2_clean_up_rq(pf, qidx);
153 	clear_bit(qidx, pf->af_xdp_zc_qidx);
154 	xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
155 	/* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
156 	otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP, NULL);
157 
158 	return 0;
159 }
160 
otx2_xsk_pool_setup(struct otx2_nic * pf,struct xsk_buff_pool * pool,u16 qidx)161 int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
162 {
163 	if (pool)
164 		return otx2_xsk_pool_enable(pf, pool, qidx);
165 
166 	return otx2_xsk_pool_disable(pf, qidx);
167 }
168 
otx2_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)169 int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
170 {
171 	struct otx2_nic *pf = netdev_priv(dev);
172 	struct otx2_cq_poll *cq_poll = NULL;
173 	struct otx2_qset *qset = &pf->qset;
174 
175 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
176 		return -ENETDOWN;
177 
178 	if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
179 		return -EINVAL;
180 
181 	cq_poll = &qset->napi[queue_id];
182 	if (!cq_poll)
183 		return -EINVAL;
184 
185 	/* Trigger interrupt */
186 	if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
187 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
188 		otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
189 	}
190 
191 	return 0;
192 }
193 
otx2_attach_xsk_buff(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,int qidx)194 void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
195 {
196 	if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
197 		sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
198 }
199 
otx2_xsk_sq_append_pkt(struct otx2_nic * pfvf,u64 iova,int len,u16 qidx)200 static void otx2_xsk_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len,
201 				   u16 qidx)
202 {
203 	struct nix_sqe_hdr_s *sqe_hdr;
204 	struct otx2_snd_queue *sq;
205 	int offset;
206 
207 	sq = &pfvf->qset.sq[qidx];
208 	memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
209 
210 	sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
211 
212 	if (!sqe_hdr->total) {
213 		sqe_hdr->aura = sq->aura_id;
214 		sqe_hdr->df = 1;
215 		sqe_hdr->sq = qidx;
216 		sqe_hdr->pnc = 1;
217 	}
218 	sqe_hdr->total = len;
219 	sqe_hdr->sqe_id = sq->head;
220 
221 	offset = sizeof(*sqe_hdr);
222 
223 	otx2_xdp_sqe_add_sg(sq, NULL, iova, len, &offset, OTX2_AF_XDP_FRAME);
224 	sqe_hdr->sizem1 = (offset / 16) - 1;
225 	pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
226 }
227 
otx2_zc_napi_handler(struct otx2_nic * pfvf,struct xsk_buff_pool * pool,int queue,int budget)228 void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
229 			  int queue, int budget)
230 {
231 	struct xdp_desc *xdp_desc = pool->tx_descs;
232 	int  i, batch;
233 
234 	budget = min(budget, otx2_read_free_sqe(pfvf, queue));
235 	batch = xsk_tx_peek_release_desc_batch(pool, budget);
236 	if (!batch)
237 		return;
238 
239 	for (i = 0; i < batch; i++) {
240 		dma_addr_t dma_addr;
241 
242 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
243 		otx2_xsk_sq_append_pkt(pfvf, dma_addr, xdp_desc[i].len, queue);
244 	}
245 }
246