1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/netdevice.h>
4 #include <net/netdev_lock.h>
5 #include <net/xsk_buff_pool.h>
6 #include <net/xdp_sock.h>
7 #include <net/xdp_sock_drv.h>
8
9 #include "xsk_queue.h"
10 #include "xdp_umem.h"
11 #include "xsk.h"
12
xp_add_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
14 {
15 unsigned long flags;
16
17 if (!xs->tx)
18 return;
19
20 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
21 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
22 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
23 }
24
xp_del_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)25 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
26 {
27 unsigned long flags;
28
29 if (!xs->tx)
30 return;
31
32 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
33 list_del_rcu(&xs->tx_list);
34 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
35 }
36
xp_destroy(struct xsk_buff_pool * pool)37 void xp_destroy(struct xsk_buff_pool *pool)
38 {
39 if (!pool)
40 return;
41
42 kvfree(pool->tx_descs);
43 kvfree(pool->heads);
44 kvfree(pool);
45 }
46
xp_alloc_tx_descs(struct xsk_buff_pool * pool,struct xdp_sock * xs)47 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
48 {
49 pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
50 GFP_KERNEL);
51 if (!pool->tx_descs)
52 return -ENOMEM;
53
54 return 0;
55 }
56
xp_create_and_assign_umem(struct xdp_sock * xs,struct xdp_umem * umem)57 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
58 struct xdp_umem *umem)
59 {
60 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
61 struct xsk_buff_pool *pool;
62 struct xdp_buff_xsk *xskb;
63 u32 i, entries;
64
65 entries = unaligned ? umem->chunks : 0;
66 pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL);
67 if (!pool)
68 goto out;
69
70 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
71 if (!pool->heads)
72 goto out;
73
74 if (xs->tx)
75 if (xp_alloc_tx_descs(pool, xs))
76 goto out;
77
78 pool->chunk_mask = ~((u64)umem->chunk_size - 1);
79 pool->addrs_cnt = umem->size;
80 pool->heads_cnt = umem->chunks;
81 pool->free_heads_cnt = umem->chunks;
82 pool->headroom = umem->headroom;
83 pool->chunk_size = umem->chunk_size;
84 pool->chunk_shift = ffs(umem->chunk_size) - 1;
85 pool->unaligned = unaligned;
86 pool->frame_len = umem->chunk_size - umem->headroom -
87 XDP_PACKET_HEADROOM;
88 pool->umem = umem;
89 pool->addrs = umem->addrs;
90 pool->tx_metadata_len = umem->tx_metadata_len;
91 pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
92 INIT_LIST_HEAD(&pool->free_list);
93 INIT_LIST_HEAD(&pool->xskb_list);
94 INIT_LIST_HEAD(&pool->xsk_tx_list);
95 spin_lock_init(&pool->xsk_tx_list_lock);
96 spin_lock_init(&pool->cq_lock);
97 refcount_set(&pool->users, 1);
98
99 pool->fq = xs->fq_tmp;
100 pool->cq = xs->cq_tmp;
101
102 for (i = 0; i < pool->free_heads_cnt; i++) {
103 xskb = &pool->heads[i];
104 xskb->pool = pool;
105 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
106 INIT_LIST_HEAD(&xskb->list_node);
107 if (pool->unaligned)
108 pool->free_heads[i] = xskb;
109 else
110 xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size);
111 }
112
113 return pool;
114
115 out:
116 xp_destroy(pool);
117 return NULL;
118 }
119
xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)120 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
121 {
122 u32 i;
123
124 for (i = 0; i < pool->heads_cnt; i++)
125 pool->heads[i].xdp.rxq = rxq;
126 }
127 EXPORT_SYMBOL(xp_set_rxq_info);
128
xp_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc)129 void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc)
130 {
131 u32 i;
132
133 for (i = 0; i < pool->heads_cnt; i++) {
134 struct xdp_buff_xsk *xskb = &pool->heads[i];
135
136 memcpy(xskb->cb + desc->off, desc->src, desc->bytes);
137 }
138 }
139 EXPORT_SYMBOL(xp_fill_cb);
140
xp_disable_drv_zc(struct xsk_buff_pool * pool)141 static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
142 {
143 struct netdev_bpf bpf;
144 int err;
145
146 ASSERT_RTNL();
147
148 if (pool->umem->zc) {
149 bpf.command = XDP_SETUP_XSK_POOL;
150 bpf.xsk.pool = NULL;
151 bpf.xsk.queue_id = pool->queue_id;
152
153 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
154
155 if (err)
156 WARN(1, "Failed to disable zero-copy!\n");
157 }
158 }
159
160 #define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \
161 NETDEV_XDP_ACT_REDIRECT | \
162 NETDEV_XDP_ACT_XSK_ZEROCOPY)
163
xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags)164 int xp_assign_dev(struct xsk_buff_pool *pool,
165 struct net_device *netdev, u16 queue_id, u16 flags)
166 {
167 bool force_zc, force_copy;
168 struct netdev_bpf bpf;
169 int err = 0;
170
171 ASSERT_RTNL();
172
173 force_zc = flags & XDP_ZEROCOPY;
174 force_copy = flags & XDP_COPY;
175
176 if (force_zc && force_copy)
177 return -EINVAL;
178
179 if (xsk_get_pool_from_qid(netdev, queue_id))
180 return -EBUSY;
181
182 pool->netdev = netdev;
183 pool->queue_id = queue_id;
184 err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
185 if (err)
186 return err;
187
188 if (flags & XDP_USE_SG)
189 pool->umem->flags |= XDP_UMEM_SG_FLAG;
190
191 if (flags & XDP_USE_NEED_WAKEUP)
192 pool->uses_need_wakeup = true;
193 /* Tx needs to be explicitly woken up the first time. Also
194 * for supporting drivers that do not implement this
195 * feature. They will always have to call sendto() or poll().
196 */
197 pool->cached_need_wakeup = XDP_WAKEUP_TX;
198
199 dev_hold(netdev);
200
201 if (force_copy)
202 /* For copy-mode, we are done. */
203 return 0;
204
205 if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) {
206 err = -EOPNOTSUPP;
207 goto err_unreg_pool;
208 }
209
210 if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) {
211 err = -EOPNOTSUPP;
212 goto err_unreg_pool;
213 }
214
215 if (dev_get_min_mp_channel_count(netdev)) {
216 err = -EBUSY;
217 goto err_unreg_pool;
218 }
219
220 bpf.command = XDP_SETUP_XSK_POOL;
221 bpf.xsk.pool = pool;
222 bpf.xsk.queue_id = queue_id;
223
224 netdev_ops_assert_locked(netdev);
225 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
226 if (err)
227 goto err_unreg_pool;
228
229 if (!pool->dma_pages) {
230 WARN(1, "Driver did not DMA map zero-copy buffers");
231 err = -EINVAL;
232 goto err_unreg_xsk;
233 }
234 pool->umem->zc = true;
235 pool->xdp_zc_max_segs = netdev->xdp_zc_max_segs;
236 return 0;
237
238 err_unreg_xsk:
239 xp_disable_drv_zc(pool);
240 err_unreg_pool:
241 if (!force_zc)
242 err = 0; /* fallback to copy mode */
243 if (err) {
244 xsk_clear_pool_at_qid(netdev, queue_id);
245 dev_put(netdev);
246 }
247 return err;
248 }
249
xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id)250 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
251 struct net_device *dev, u16 queue_id)
252 {
253 u16 flags;
254 struct xdp_umem *umem = umem_xs->umem;
255
256 /* One fill and completion ring required for each queue id. */
257 if (!pool->fq || !pool->cq)
258 return -EINVAL;
259
260 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
261 if (umem_xs->pool->uses_need_wakeup)
262 flags |= XDP_USE_NEED_WAKEUP;
263
264 return xp_assign_dev(pool, dev, queue_id, flags);
265 }
266
xp_clear_dev(struct xsk_buff_pool * pool)267 void xp_clear_dev(struct xsk_buff_pool *pool)
268 {
269 if (!pool->netdev)
270 return;
271
272 xp_disable_drv_zc(pool);
273 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
274 dev_put(pool->netdev);
275 pool->netdev = NULL;
276 }
277
xp_release_deferred(struct work_struct * work)278 static void xp_release_deferred(struct work_struct *work)
279 {
280 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
281 work);
282
283 rtnl_lock();
284 xp_clear_dev(pool);
285 rtnl_unlock();
286
287 if (pool->fq) {
288 xskq_destroy(pool->fq);
289 pool->fq = NULL;
290 }
291
292 if (pool->cq) {
293 xskq_destroy(pool->cq);
294 pool->cq = NULL;
295 }
296
297 xdp_put_umem(pool->umem, false);
298 xp_destroy(pool);
299 }
300
xp_get_pool(struct xsk_buff_pool * pool)301 void xp_get_pool(struct xsk_buff_pool *pool)
302 {
303 refcount_inc(&pool->users);
304 }
305
xp_put_pool(struct xsk_buff_pool * pool)306 bool xp_put_pool(struct xsk_buff_pool *pool)
307 {
308 if (!pool)
309 return false;
310
311 if (refcount_dec_and_test(&pool->users)) {
312 INIT_WORK(&pool->work, xp_release_deferred);
313 schedule_work(&pool->work);
314 return true;
315 }
316
317 return false;
318 }
319
xp_find_dma_map(struct xsk_buff_pool * pool)320 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
321 {
322 struct xsk_dma_map *dma_map;
323
324 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
325 if (dma_map->netdev == pool->netdev)
326 return dma_map;
327 }
328
329 return NULL;
330 }
331
xp_create_dma_map(struct device * dev,struct net_device * netdev,u32 nr_pages,struct xdp_umem * umem)332 static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
333 u32 nr_pages, struct xdp_umem *umem)
334 {
335 struct xsk_dma_map *dma_map;
336
337 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
338 if (!dma_map)
339 return NULL;
340
341 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
342 if (!dma_map->dma_pages) {
343 kfree(dma_map);
344 return NULL;
345 }
346
347 dma_map->netdev = netdev;
348 dma_map->dev = dev;
349 dma_map->dma_pages_cnt = nr_pages;
350 refcount_set(&dma_map->users, 1);
351 list_add(&dma_map->list, &umem->xsk_dma_list);
352 return dma_map;
353 }
354
xp_destroy_dma_map(struct xsk_dma_map * dma_map)355 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
356 {
357 list_del(&dma_map->list);
358 kvfree(dma_map->dma_pages);
359 kfree(dma_map);
360 }
361
__xp_dma_unmap(struct xsk_dma_map * dma_map,unsigned long attrs)362 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
363 {
364 dma_addr_t *dma;
365 u32 i;
366
367 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
368 dma = &dma_map->dma_pages[i];
369 if (*dma) {
370 *dma &= ~XSK_NEXT_PG_CONTIG_MASK;
371 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
372 DMA_BIDIRECTIONAL, attrs);
373 *dma = 0;
374 }
375 }
376
377 xp_destroy_dma_map(dma_map);
378 }
379
xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)380 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
381 {
382 struct xsk_dma_map *dma_map;
383
384 if (!pool->dma_pages)
385 return;
386
387 dma_map = xp_find_dma_map(pool);
388 if (!dma_map) {
389 WARN(1, "Could not find dma_map for device");
390 return;
391 }
392
393 if (refcount_dec_and_test(&dma_map->users))
394 __xp_dma_unmap(dma_map, attrs);
395
396 kvfree(pool->dma_pages);
397 pool->dma_pages = NULL;
398 pool->dma_pages_cnt = 0;
399 pool->dev = NULL;
400 }
401 EXPORT_SYMBOL(xp_dma_unmap);
402
xp_check_dma_contiguity(struct xsk_dma_map * dma_map)403 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
404 {
405 u32 i;
406
407 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
408 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
409 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
410 else
411 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
412 }
413 }
414
xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map)415 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
416 {
417 if (!pool->unaligned) {
418 u32 i;
419
420 for (i = 0; i < pool->heads_cnt; i++) {
421 struct xdp_buff_xsk *xskb = &pool->heads[i];
422 u64 orig_addr;
423
424 orig_addr = xskb->xdp.data_hard_start - pool->addrs - pool->headroom;
425 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, orig_addr);
426 }
427 }
428
429 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
430 if (!pool->dma_pages)
431 return -ENOMEM;
432
433 pool->dev = dma_map->dev;
434 pool->dma_pages_cnt = dma_map->dma_pages_cnt;
435 memcpy(pool->dma_pages, dma_map->dma_pages,
436 pool->dma_pages_cnt * sizeof(*pool->dma_pages));
437
438 return 0;
439 }
440
xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages)441 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
442 unsigned long attrs, struct page **pages, u32 nr_pages)
443 {
444 struct xsk_dma_map *dma_map;
445 dma_addr_t dma;
446 int err;
447 u32 i;
448
449 dma_map = xp_find_dma_map(pool);
450 if (dma_map) {
451 err = xp_init_dma_info(pool, dma_map);
452 if (err)
453 return err;
454
455 refcount_inc(&dma_map->users);
456 return 0;
457 }
458
459 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
460 if (!dma_map)
461 return -ENOMEM;
462
463 for (i = 0; i < dma_map->dma_pages_cnt; i++) {
464 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
465 DMA_BIDIRECTIONAL, attrs);
466 if (dma_mapping_error(dev, dma)) {
467 __xp_dma_unmap(dma_map, attrs);
468 return -ENOMEM;
469 }
470 dma_map->dma_pages[i] = dma;
471 }
472
473 if (pool->unaligned)
474 xp_check_dma_contiguity(dma_map);
475
476 err = xp_init_dma_info(pool, dma_map);
477 if (err) {
478 __xp_dma_unmap(dma_map, attrs);
479 return err;
480 }
481
482 return 0;
483 }
484 EXPORT_SYMBOL(xp_dma_map);
485
xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr)486 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
487 u64 addr)
488 {
489 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
490 }
491
xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr)492 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
493 {
494 *addr = xp_unaligned_extract_addr(*addr);
495 if (*addr >= pool->addrs_cnt ||
496 *addr + pool->chunk_size > pool->addrs_cnt ||
497 xp_addr_crosses_non_contig_pg(pool, *addr))
498 return false;
499 return true;
500 }
501
xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr)502 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
503 {
504 *addr = xp_aligned_extract_addr(pool, *addr);
505 return *addr < pool->addrs_cnt;
506 }
507
xp_get_xskb(struct xsk_buff_pool * pool,u64 addr)508 static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr)
509 {
510 struct xdp_buff_xsk *xskb;
511
512 if (pool->unaligned) {
513 xskb = pool->free_heads[--pool->free_heads_cnt];
514 xp_init_xskb_addr(xskb, pool, addr);
515 if (pool->dma_pages)
516 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
517 } else {
518 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
519 }
520
521 return xskb;
522 }
523
__xp_alloc(struct xsk_buff_pool * pool)524 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
525 {
526 struct xdp_buff_xsk *xskb;
527 u64 addr;
528 bool ok;
529
530 if (pool->free_heads_cnt == 0)
531 return NULL;
532
533 for (;;) {
534 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
535 pool->fq->queue_empty_descs++;
536 return NULL;
537 }
538
539 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
540 xp_check_aligned(pool, &addr);
541 if (!ok) {
542 pool->fq->invalid_descs++;
543 xskq_cons_release(pool->fq);
544 continue;
545 }
546 break;
547 }
548
549 xskb = xp_get_xskb(pool, addr);
550
551 xskq_cons_release(pool->fq);
552 return xskb;
553 }
554
xp_alloc(struct xsk_buff_pool * pool)555 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
556 {
557 struct xdp_buff_xsk *xskb;
558
559 if (!pool->free_list_cnt) {
560 xskb = __xp_alloc(pool);
561 if (!xskb)
562 return NULL;
563 } else {
564 pool->free_list_cnt--;
565 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
566 list_node);
567 list_del_init(&xskb->list_node);
568 }
569
570 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
571 xskb->xdp.data_meta = xskb->xdp.data;
572 xskb->xdp.flags = 0;
573
574 if (pool->dev)
575 xp_dma_sync_for_device(pool, xskb->dma, pool->frame_len);
576
577 return &xskb->xdp;
578 }
579 EXPORT_SYMBOL(xp_alloc);
580
xp_alloc_new_from_fq(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)581 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
582 {
583 u32 i, cached_cons, nb_entries;
584
585 if (max > pool->free_heads_cnt)
586 max = pool->free_heads_cnt;
587 max = xskq_cons_nb_entries(pool->fq, max);
588
589 cached_cons = pool->fq->cached_cons;
590 nb_entries = max;
591 i = max;
592 while (i--) {
593 struct xdp_buff_xsk *xskb;
594 u64 addr;
595 bool ok;
596
597 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr);
598
599 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
600 xp_check_aligned(pool, &addr);
601 if (unlikely(!ok)) {
602 pool->fq->invalid_descs++;
603 nb_entries--;
604 continue;
605 }
606
607 xskb = xp_get_xskb(pool, addr);
608
609 *xdp = &xskb->xdp;
610 xdp++;
611 }
612
613 xskq_cons_release_n(pool->fq, max);
614 return nb_entries;
615 }
616
xp_alloc_reused(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 nb_entries)617 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries)
618 {
619 struct xdp_buff_xsk *xskb;
620 u32 i;
621
622 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt);
623
624 i = nb_entries;
625 while (i--) {
626 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node);
627 list_del_init(&xskb->list_node);
628
629 *xdp = &xskb->xdp;
630 xdp++;
631 }
632 pool->free_list_cnt -= nb_entries;
633
634 return nb_entries;
635 }
636
xp_alloc_slow(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)637 static u32 xp_alloc_slow(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
638 u32 max)
639 {
640 int i;
641
642 for (i = 0; i < max; i++) {
643 struct xdp_buff *buff;
644
645 buff = xp_alloc(pool);
646 if (unlikely(!buff))
647 return i;
648 *xdp = buff;
649 xdp++;
650 }
651
652 return max;
653 }
654
xp_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)655 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
656 {
657 u32 nb_entries1 = 0, nb_entries2;
658
659 if (unlikely(pool->dev && dma_dev_need_sync(pool->dev)))
660 return xp_alloc_slow(pool, xdp, max);
661
662 if (unlikely(pool->free_list_cnt)) {
663 nb_entries1 = xp_alloc_reused(pool, xdp, max);
664 if (nb_entries1 == max)
665 return nb_entries1;
666
667 max -= nb_entries1;
668 xdp += nb_entries1;
669 }
670
671 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max);
672 if (!nb_entries2)
673 pool->fq->queue_empty_descs++;
674
675 return nb_entries1 + nb_entries2;
676 }
677 EXPORT_SYMBOL(xp_alloc_batch);
678
xp_can_alloc(struct xsk_buff_pool * pool,u32 count)679 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
680 {
681 u32 req_count, avail_count;
682
683 if (pool->free_list_cnt >= count)
684 return true;
685
686 req_count = count - pool->free_list_cnt;
687 avail_count = xskq_cons_nb_entries(pool->fq, req_count);
688 if (!avail_count)
689 pool->fq->queue_empty_descs++;
690
691 return avail_count >= req_count;
692 }
693 EXPORT_SYMBOL(xp_can_alloc);
694
xp_free(struct xdp_buff_xsk * xskb)695 void xp_free(struct xdp_buff_xsk *xskb)
696 {
697 if (!list_empty(&xskb->list_node))
698 return;
699
700 xskb->pool->free_list_cnt++;
701 list_add(&xskb->list_node, &xskb->pool->free_list);
702 }
703 EXPORT_SYMBOL(xp_free);
704
__xp_raw_get_addr(const struct xsk_buff_pool * pool,u64 addr)705 static u64 __xp_raw_get_addr(const struct xsk_buff_pool *pool, u64 addr)
706 {
707 return pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
708 }
709
__xp_raw_get_data(const struct xsk_buff_pool * pool,u64 addr)710 static void *__xp_raw_get_data(const struct xsk_buff_pool *pool, u64 addr)
711 {
712 return pool->addrs + addr;
713 }
714
xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr)715 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
716 {
717 return __xp_raw_get_data(pool, __xp_raw_get_addr(pool, addr));
718 }
719 EXPORT_SYMBOL(xp_raw_get_data);
720
__xp_raw_get_dma(const struct xsk_buff_pool * pool,u64 addr)721 static dma_addr_t __xp_raw_get_dma(const struct xsk_buff_pool *pool, u64 addr)
722 {
723 return (pool->dma_pages[addr >> PAGE_SHIFT] &
724 ~XSK_NEXT_PG_CONTIG_MASK) +
725 (addr & ~PAGE_MASK);
726 }
727
xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)728 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
729 {
730 return __xp_raw_get_dma(pool, __xp_raw_get_addr(pool, addr));
731 }
732 EXPORT_SYMBOL(xp_raw_get_dma);
733
734 /**
735 * xp_raw_get_ctx - get &xdp_desc context
736 * @pool: XSk buff pool desc address belongs to
737 * @addr: desc address (from userspace)
738 *
739 * Helper for getting desc's DMA address and metadata pointer, if present.
740 * Saves one call on hotpath, double calculation of the actual address,
741 * and inline checks for metadata presence and sanity.
742 *
743 * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
744 * pointer, if it is present and valid (initialized to %NULL otherwise).
745 */
xp_raw_get_ctx(const struct xsk_buff_pool * pool,u64 addr)746 struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
747 {
748 struct xdp_desc_ctx ret;
749
750 addr = __xp_raw_get_addr(pool, addr);
751
752 ret.dma = __xp_raw_get_dma(pool, addr);
753 ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr));
754
755 return ret;
756 }
757 EXPORT_SYMBOL(xp_raw_get_ctx);
758