1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
3
4 #include "nfp_app.h"
5 #include "nfp_net_dp.h"
6 #include "nfp_net_xsk.h"
7
8 /**
9 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
10 * @dp: NFP Net data path struct
11 * @dma_addr: Pointer to storage for DMA address (output param)
12 *
13 * This function will allcate a new page frag, map it for DMA.
14 *
15 * Return: allocated page frag or NULL on failure.
16 */
nfp_net_rx_alloc_one(struct nfp_net_dp * dp,dma_addr_t * dma_addr)17 void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
18 {
19 void *frag;
20
21 if (!dp->xdp_prog) {
22 frag = netdev_alloc_frag(dp->fl_bufsz);
23 } else {
24 struct page *page;
25
26 page = alloc_page(GFP_KERNEL);
27 frag = page ? page_address(page) : NULL;
28 }
29 if (!frag) {
30 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
31 return NULL;
32 }
33
34 *dma_addr = nfp_net_dma_map_rx(dp, frag);
35 if (dma_mapping_error(dp->dev, *dma_addr)) {
36 nfp_net_free_frag(frag, dp->xdp_prog);
37 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
38 return NULL;
39 }
40
41 return frag;
42 }
43
44 /**
45 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
46 * @tx_ring: TX ring structure
47 * @dp: NFP Net data path struct
48 * @r_vec: IRQ vector servicing this ring
49 * @idx: Ring index
50 * @is_xdp: Is this an XDP TX ring?
51 */
52 static void
nfp_net_tx_ring_init(struct nfp_net_tx_ring * tx_ring,struct nfp_net_dp * dp,struct nfp_net_r_vector * r_vec,unsigned int idx,bool is_xdp)53 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp,
54 struct nfp_net_r_vector *r_vec, unsigned int idx,
55 bool is_xdp)
56 {
57 struct nfp_net *nn = r_vec->nfp_net;
58
59 tx_ring->idx = idx;
60 tx_ring->r_vec = r_vec;
61 tx_ring->is_xdp = is_xdp;
62 u64_stats_init(&tx_ring->r_vec->tx_sync);
63
64 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
65 tx_ring->txrwb = dp->txrwb ? &dp->txrwb[idx] : NULL;
66 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
67 }
68
69 /**
70 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
71 * @rx_ring: RX ring structure
72 * @r_vec: IRQ vector servicing this ring
73 * @idx: Ring index
74 */
75 static void
nfp_net_rx_ring_init(struct nfp_net_rx_ring * rx_ring,struct nfp_net_r_vector * r_vec,unsigned int idx)76 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
77 struct nfp_net_r_vector *r_vec, unsigned int idx)
78 {
79 struct nfp_net *nn = r_vec->nfp_net;
80
81 rx_ring->idx = idx;
82 rx_ring->r_vec = r_vec;
83 u64_stats_init(&rx_ring->r_vec->rx_sync);
84
85 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
86 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
87 }
88
89 /**
90 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
91 * @rx_ring: RX ring structure
92 *
93 * Assumes that the device is stopped, must be idempotent.
94 */
nfp_net_rx_ring_reset(struct nfp_net_rx_ring * rx_ring)95 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
96 {
97 unsigned int wr_idx, last_idx;
98
99 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
100 * kept at cnt - 1 FL bufs.
101 */
102 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
103 return;
104
105 /* Move the empty entry to the end of the list */
106 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
107 last_idx = rx_ring->cnt - 1;
108 if (rx_ring->r_vec->xsk_pool) {
109 rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
110 memset(&rx_ring->xsk_rxbufs[last_idx], 0,
111 sizeof(*rx_ring->xsk_rxbufs));
112 } else {
113 rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
114 memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
115 }
116
117 memset(rx_ring->rxds, 0, rx_ring->size);
118 rx_ring->wr_p = 0;
119 rx_ring->rd_p = 0;
120 }
121
122 /**
123 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
124 * @dp: NFP Net data path struct
125 * @rx_ring: RX ring to remove buffers from
126 *
127 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
128 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
129 * to restore required ring geometry.
130 */
131 static void
nfp_net_rx_ring_bufs_free(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)132 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
133 struct nfp_net_rx_ring *rx_ring)
134 {
135 unsigned int i;
136
137 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
138 return;
139
140 for (i = 0; i < rx_ring->cnt - 1; i++) {
141 /* NULL skb can only happen when initial filling of the ring
142 * fails to allocate enough buffers and calls here to free
143 * already allocated ones.
144 */
145 if (!rx_ring->rxbufs[i].frag)
146 continue;
147
148 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
149 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
150 rx_ring->rxbufs[i].dma_addr = 0;
151 rx_ring->rxbufs[i].frag = NULL;
152 }
153 }
154
155 /**
156 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
157 * @dp: NFP Net data path struct
158 * @rx_ring: RX ring to remove buffers from
159 */
160 static int
nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)161 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
162 struct nfp_net_rx_ring *rx_ring)
163 {
164 struct nfp_net_rx_buf *rxbufs;
165 unsigned int i;
166
167 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
168 return 0;
169
170 rxbufs = rx_ring->rxbufs;
171
172 for (i = 0; i < rx_ring->cnt - 1; i++) {
173 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
174 if (!rxbufs[i].frag) {
175 nfp_net_rx_ring_bufs_free(dp, rx_ring);
176 return -ENOMEM;
177 }
178 }
179
180 return 0;
181 }
182
nfp_net_tx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)183 int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
184 {
185 unsigned int r;
186
187 dp->tx_rings = kzalloc_objs(*dp->tx_rings, dp->num_tx_rings);
188 if (!dp->tx_rings)
189 return -ENOMEM;
190
191 if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
192 dp->txrwb = dma_alloc_coherent(dp->dev,
193 dp->num_tx_rings * sizeof(u64),
194 &dp->txrwb_dma, GFP_KERNEL);
195 if (!dp->txrwb)
196 goto err_free_rings;
197 }
198
199 for (r = 0; r < dp->num_tx_rings; r++) {
200 int bias = 0;
201
202 if (r >= dp->num_stack_tx_rings)
203 bias = dp->num_stack_tx_rings;
204
205 nfp_net_tx_ring_init(&dp->tx_rings[r], dp,
206 &nn->r_vecs[r - bias], r, bias);
207
208 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
209 goto err_free_prev;
210
211 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
212 goto err_free_ring;
213 }
214
215 return 0;
216
217 err_free_prev:
218 while (r--) {
219 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
220 err_free_ring:
221 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
222 }
223 if (dp->txrwb)
224 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
225 dp->txrwb, dp->txrwb_dma);
226 err_free_rings:
227 kfree(dp->tx_rings);
228 return -ENOMEM;
229 }
230
nfp_net_tx_rings_free(struct nfp_net_dp * dp)231 void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
232 {
233 unsigned int r;
234
235 for (r = 0; r < dp->num_tx_rings; r++) {
236 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
237 nfp_net_tx_ring_free(dp, &dp->tx_rings[r]);
238 }
239
240 if (dp->txrwb)
241 dma_free_coherent(dp->dev, dp->num_tx_rings * sizeof(u64),
242 dp->txrwb, dp->txrwb_dma);
243 kfree(dp->tx_rings);
244 }
245
246 /**
247 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
248 * @rx_ring: RX ring to free
249 */
nfp_net_rx_ring_free(struct nfp_net_rx_ring * rx_ring)250 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
251 {
252 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
253 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
254
255 if (dp->netdev)
256 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
257
258 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
259 kvfree(rx_ring->xsk_rxbufs);
260 else
261 kvfree(rx_ring->rxbufs);
262
263 if (rx_ring->rxds)
264 dma_free_coherent(dp->dev, rx_ring->size,
265 rx_ring->rxds, rx_ring->dma);
266
267 rx_ring->cnt = 0;
268 rx_ring->rxbufs = NULL;
269 rx_ring->xsk_rxbufs = NULL;
270 rx_ring->rxds = NULL;
271 rx_ring->dma = 0;
272 rx_ring->size = 0;
273 }
274
275 /**
276 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
277 * @dp: NFP Net data path struct
278 * @rx_ring: RX ring to allocate
279 *
280 * Return: 0 on success, negative errno otherwise.
281 */
282 static int
nfp_net_rx_ring_alloc(struct nfp_net_dp * dp,struct nfp_net_rx_ring * rx_ring)283 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
284 {
285 enum xdp_mem_type mem_type;
286 size_t rxbuf_sw_desc_sz;
287 int err;
288
289 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
290 mem_type = MEM_TYPE_XSK_BUFF_POOL;
291 rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
292 } else {
293 mem_type = MEM_TYPE_PAGE_ORDER0;
294 rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
295 }
296
297 if (dp->netdev) {
298 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
299 rx_ring->idx, rx_ring->r_vec->napi.napi_id);
300 if (err < 0)
301 return err;
302
303 err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
304 if (err)
305 goto err_alloc;
306 }
307
308 rx_ring->cnt = dp->rxd_cnt;
309 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
310 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
311 &rx_ring->dma,
312 GFP_KERNEL | __GFP_NOWARN);
313 if (!rx_ring->rxds) {
314 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
315 rx_ring->cnt);
316 goto err_alloc;
317 }
318
319 if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
320 rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
321 GFP_KERNEL);
322 if (!rx_ring->xsk_rxbufs)
323 goto err_alloc;
324 } else {
325 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
326 GFP_KERNEL);
327 if (!rx_ring->rxbufs)
328 goto err_alloc;
329 }
330
331 return 0;
332
333 err_alloc:
334 nfp_net_rx_ring_free(rx_ring);
335 return -ENOMEM;
336 }
337
nfp_net_rx_rings_prepare(struct nfp_net * nn,struct nfp_net_dp * dp)338 int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
339 {
340 unsigned int r;
341
342 dp->rx_rings = kzalloc_objs(*dp->rx_rings, dp->num_rx_rings);
343 if (!dp->rx_rings)
344 return -ENOMEM;
345
346 for (r = 0; r < dp->num_rx_rings; r++) {
347 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
348
349 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
350 goto err_free_prev;
351
352 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
353 goto err_free_ring;
354 }
355
356 return 0;
357
358 err_free_prev:
359 while (r--) {
360 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
361 err_free_ring:
362 nfp_net_rx_ring_free(&dp->rx_rings[r]);
363 }
364 kfree(dp->rx_rings);
365 return -ENOMEM;
366 }
367
nfp_net_rx_rings_free(struct nfp_net_dp * dp)368 void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
369 {
370 unsigned int r;
371
372 for (r = 0; r < dp->num_rx_rings; r++) {
373 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
374 nfp_net_rx_ring_free(&dp->rx_rings[r]);
375 }
376
377 kfree(dp->rx_rings);
378 }
379
380 void
nfp_net_rx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_rx_ring * rx_ring,unsigned int idx)381 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
382 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
383 {
384 /* Write the DMA address, size and MSI-X info to the device */
385 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
386 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
387 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
388 }
389
390 void
nfp_net_tx_ring_hw_cfg_write(struct nfp_net * nn,struct nfp_net_tx_ring * tx_ring,unsigned int idx)391 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
392 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
393 {
394 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
395 if (tx_ring->txrwb) {
396 *tx_ring->txrwb = 0;
397 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx),
398 nn->dp.txrwb_dma + idx * sizeof(u64));
399 }
400 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
401 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
402 }
403
nfp_net_vec_clear_ring_data(struct nfp_net * nn,unsigned int idx)404 void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
405 {
406 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
407 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
408 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
409
410 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
411 nn_writeq(nn, NFP_NET_CFG_TXR_WB_ADDR(idx), 0);
412 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
413 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
414 }
415
nfp_net_tx(struct sk_buff * skb,struct net_device * netdev)416 netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
417 {
418 struct nfp_net *nn = netdev_priv(netdev);
419
420 return nn->dp.ops->xmit(skb, netdev);
421 }
422
__nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)423 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
424 {
425 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
426
427 return nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
428 }
429
nfp_ctrl_tx(struct nfp_net * nn,struct sk_buff * skb)430 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
431 {
432 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
433 bool ret;
434
435 spin_lock_bh(&r_vec->lock);
436 ret = nn->dp.ops->ctrl_tx_one(nn, r_vec, skb, false);
437 spin_unlock_bh(&r_vec->lock);
438
439 return ret;
440 }
441
nfp_net_vlan_strip(struct sk_buff * skb,const struct nfp_net_rx_desc * rxd,const struct nfp_meta_parsed * meta)442 bool nfp_net_vlan_strip(struct sk_buff *skb, const struct nfp_net_rx_desc *rxd,
443 const struct nfp_meta_parsed *meta)
444 {
445 u16 tpid = 0, tci = 0;
446
447 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) {
448 tpid = ETH_P_8021Q;
449 tci = le16_to_cpu(rxd->rxd.vlan);
450 } else if (meta->vlan.stripped) {
451 if (meta->vlan.tpid == NFP_NET_VLAN_CTAG)
452 tpid = ETH_P_8021Q;
453 else if (meta->vlan.tpid == NFP_NET_VLAN_STAG)
454 tpid = ETH_P_8021AD;
455 else
456 return false;
457
458 tci = meta->vlan.tci;
459 }
460 if (tpid)
461 __vlan_hwaccel_put_tag(skb, htons(tpid), tci);
462
463 return true;
464 }
465