1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_lock.h>
29 #include <net/netdev_rx_queue.h>
30 #include <net/xdp.h>
31
32 #include "xsk_queue.h"
33 #include "xdp_umem.h"
34 #include "xsk.h"
35
36 #define TX_BATCH_SIZE 32
37 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
38
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
40 {
41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
42 return;
43
44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
45 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
46 }
47 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
48
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
50 {
51 struct xdp_sock *xs;
52
53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
54 return;
55
56 rcu_read_lock();
57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
58 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
59 }
60 rcu_read_unlock();
61
62 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
63 }
64 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
65
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
67 {
68 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
69 return;
70
71 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
72 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
73 }
74 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
75
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77 {
78 struct xdp_sock *xs;
79
80 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
81 return;
82
83 rcu_read_lock();
84 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
85 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
86 }
87 rcu_read_unlock();
88
89 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
90 }
91 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
92
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
94 {
95 return pool->uses_need_wakeup;
96 }
97 EXPORT_SYMBOL(xsk_uses_need_wakeup);
98
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)99 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
100 u16 queue_id)
101 {
102 if (queue_id < dev->real_num_rx_queues)
103 return dev->_rx[queue_id].pool;
104 if (queue_id < dev->real_num_tx_queues)
105 return dev->_tx[queue_id].pool;
106
107 return NULL;
108 }
109 EXPORT_SYMBOL(xsk_get_pool_from_qid);
110
xsk_clear_pool_at_qid(struct net_device * dev,u16 queue_id)111 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
112 {
113 if (queue_id < dev->num_rx_queues)
114 dev->_rx[queue_id].pool = NULL;
115 if (queue_id < dev->num_tx_queues)
116 dev->_tx[queue_id].pool = NULL;
117 }
118
119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
120 * not know if the device has more tx queues than rx, or the opposite.
121 * This might also change during run time.
122 */
xsk_reg_pool_at_qid(struct net_device * dev,struct xsk_buff_pool * pool,u16 queue_id)123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
124 u16 queue_id)
125 {
126 if (queue_id >= max_t(unsigned int,
127 dev->real_num_rx_queues,
128 dev->real_num_tx_queues))
129 return -EINVAL;
130
131 if (queue_id < dev->real_num_rx_queues)
132 dev->_rx[queue_id].pool = pool;
133 if (queue_id < dev->real_num_tx_queues)
134 dev->_tx[queue_id].pool = pool;
135
136 return 0;
137 }
138
__xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff_xsk * xskb,u32 len,u32 flags)139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
140 u32 flags)
141 {
142 u64 addr;
143 int err;
144
145 addr = xp_get_handle(xskb, xskb->pool);
146 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
147 if (err) {
148 xs->rx_queue_full++;
149 return err;
150 }
151
152 xp_release(xskb);
153 return 0;
154 }
155
xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
157 {
158 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
159 u32 frags = xdp_buff_has_frags(xdp);
160 struct xdp_buff_xsk *pos, *tmp;
161 struct list_head *xskb_list;
162 u32 contd = 0;
163 int err;
164
165 if (frags)
166 contd = XDP_PKT_CONTD;
167
168 err = __xsk_rcv_zc(xs, xskb, len, contd);
169 if (err)
170 goto err;
171 if (likely(!frags))
172 return 0;
173
174 xskb_list = &xskb->pool->xskb_list;
175 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
176 if (list_is_singular(xskb_list))
177 contd = 0;
178 len = pos->xdp.data_end - pos->xdp.data;
179 err = __xsk_rcv_zc(xs, pos, len, contd);
180 if (err)
181 goto err;
182 list_del(&pos->list_node);
183 }
184
185 return 0;
186 err:
187 xsk_buff_free(xdp);
188 return err;
189 }
190
xsk_copy_xdp_start(struct xdp_buff * from)191 static void *xsk_copy_xdp_start(struct xdp_buff *from)
192 {
193 if (unlikely(xdp_data_meta_unsupported(from)))
194 return from->data;
195 else
196 return from->data_meta;
197 }
198
xsk_copy_xdp(void * to,void ** from,u32 to_len,u32 * from_len,skb_frag_t ** frag,u32 rem)199 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
200 u32 *from_len, skb_frag_t **frag, u32 rem)
201 {
202 u32 copied = 0;
203
204 while (1) {
205 u32 copy_len = min_t(u32, *from_len, to_len);
206
207 memcpy(to, *from, copy_len);
208 copied += copy_len;
209 if (rem == copied)
210 return copied;
211
212 if (*from_len == copy_len) {
213 *from = skb_frag_address(*frag);
214 *from_len = skb_frag_size((*frag)++);
215 } else {
216 *from += copy_len;
217 *from_len -= copy_len;
218 }
219 if (to_len == copy_len)
220 return copied;
221
222 to_len -= copy_len;
223 to += copy_len;
224 }
225 }
226
__xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)227 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
228 {
229 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
230 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
231 u32 from_len, meta_len, rem, num_desc;
232 struct xdp_buff_xsk *xskb;
233 struct xdp_buff *xsk_xdp;
234 skb_frag_t *frag;
235
236 from_len = xdp->data_end - copy_from;
237 meta_len = xdp->data - copy_from;
238 rem = len + meta_len;
239
240 if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
241 int err;
242
243 xsk_xdp = xsk_buff_alloc(xs->pool);
244 if (!xsk_xdp) {
245 xs->rx_dropped++;
246 return -ENOMEM;
247 }
248 memcpy(xsk_xdp->data - meta_len, copy_from, rem);
249 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
250 err = __xsk_rcv_zc(xs, xskb, len, 0);
251 if (err) {
252 xsk_buff_free(xsk_xdp);
253 return err;
254 }
255
256 return 0;
257 }
258
259 num_desc = (len - 1) / frame_size + 1;
260
261 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
262 xs->rx_dropped++;
263 return -ENOMEM;
264 }
265 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
266 xs->rx_queue_full++;
267 return -ENOBUFS;
268 }
269
270 if (xdp_buff_has_frags(xdp)) {
271 struct skb_shared_info *sinfo;
272
273 sinfo = xdp_get_shared_info_from_buff(xdp);
274 frag = &sinfo->frags[0];
275 }
276
277 do {
278 u32 to_len = frame_size + meta_len;
279 u32 copied;
280
281 xsk_xdp = xsk_buff_alloc(xs->pool);
282 copy_to = xsk_xdp->data - meta_len;
283
284 copied = xsk_copy_xdp(copy_to, ©_from, to_len, &from_len, &frag, rem);
285 rem -= copied;
286
287 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
288 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
289 meta_len = 0;
290 } while (rem);
291
292 return 0;
293 }
294
xsk_tx_writeable(struct xdp_sock * xs)295 static bool xsk_tx_writeable(struct xdp_sock *xs)
296 {
297 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
298 return false;
299
300 return true;
301 }
302
xsk_is_bound(struct xdp_sock * xs)303 static bool xsk_is_bound(struct xdp_sock *xs)
304 {
305 if (READ_ONCE(xs->state) == XSK_BOUND) {
306 /* Matches smp_wmb() in bind(). */
307 smp_rmb();
308 return true;
309 }
310 return false;
311 }
312
xsk_rcv_check(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)313 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
314 {
315 if (!xsk_is_bound(xs))
316 return -ENXIO;
317
318 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
319 return -EINVAL;
320
321 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
322 xs->rx_dropped++;
323 return -ENOSPC;
324 }
325
326 return 0;
327 }
328
xsk_flush(struct xdp_sock * xs)329 static void xsk_flush(struct xdp_sock *xs)
330 {
331 xskq_prod_submit(xs->rx);
332 __xskq_cons_release(xs->pool->fq);
333 sock_def_readable(&xs->sk);
334 }
335
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)336 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
337 {
338 u32 len = xdp_get_buff_len(xdp);
339 int err;
340
341 spin_lock_bh(&xs->rx_lock);
342 err = xsk_rcv_check(xs, xdp, len);
343 if (!err) {
344 err = __xsk_rcv(xs, xdp, len);
345 xsk_flush(xs);
346 }
347 spin_unlock_bh(&xs->rx_lock);
348 return err;
349 }
350
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)351 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
352 {
353 u32 len = xdp_get_buff_len(xdp);
354 int err;
355
356 err = xsk_rcv_check(xs, xdp, len);
357 if (err)
358 return err;
359
360 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
361 len = xdp->data_end - xdp->data;
362 return xsk_rcv_zc(xs, xdp, len);
363 }
364
365 err = __xsk_rcv(xs, xdp, len);
366 if (!err)
367 xdp_return_buff(xdp);
368 return err;
369 }
370
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)371 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
372 {
373 int err;
374
375 err = xsk_rcv(xs, xdp);
376 if (err)
377 return err;
378
379 if (!xs->flush_node.prev) {
380 struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
381
382 list_add(&xs->flush_node, flush_list);
383 }
384
385 return 0;
386 }
387
__xsk_map_flush(struct list_head * flush_list)388 void __xsk_map_flush(struct list_head *flush_list)
389 {
390 struct xdp_sock *xs, *tmp;
391
392 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
393 xsk_flush(xs);
394 __list_del_clearprev(&xs->flush_node);
395 }
396 }
397
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)398 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
399 {
400 xskq_prod_submit_n(pool->cq, nb_entries);
401 }
402 EXPORT_SYMBOL(xsk_tx_completed);
403
xsk_tx_release(struct xsk_buff_pool * pool)404 void xsk_tx_release(struct xsk_buff_pool *pool)
405 {
406 struct xdp_sock *xs;
407
408 rcu_read_lock();
409 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
410 __xskq_cons_release(xs->tx);
411 if (xsk_tx_writeable(xs))
412 xs->sk.sk_write_space(&xs->sk);
413 }
414 rcu_read_unlock();
415 }
416 EXPORT_SYMBOL(xsk_tx_release);
417
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)418 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
419 {
420 bool budget_exhausted = false;
421 struct xdp_sock *xs;
422
423 rcu_read_lock();
424 again:
425 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
426 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
427 budget_exhausted = true;
428 continue;
429 }
430
431 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
432 if (xskq_has_descs(xs->tx))
433 xskq_cons_release(xs->tx);
434 continue;
435 }
436
437 xs->tx_budget_spent++;
438
439 /* This is the backpressure mechanism for the Tx path.
440 * Reserve space in the completion queue and only proceed
441 * if there is space in it. This avoids having to implement
442 * any buffering in the Tx path.
443 */
444 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
445 goto out;
446
447 xskq_cons_release(xs->tx);
448 rcu_read_unlock();
449 return true;
450 }
451
452 if (budget_exhausted) {
453 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
454 xs->tx_budget_spent = 0;
455
456 budget_exhausted = false;
457 goto again;
458 }
459
460 out:
461 rcu_read_unlock();
462 return false;
463 }
464 EXPORT_SYMBOL(xsk_tx_peek_desc);
465
xsk_tx_peek_release_fallback(struct xsk_buff_pool * pool,u32 max_entries)466 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
467 {
468 struct xdp_desc *descs = pool->tx_descs;
469 u32 nb_pkts = 0;
470
471 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
472 nb_pkts++;
473
474 xsk_tx_release(pool);
475 return nb_pkts;
476 }
477
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 nb_pkts)478 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
479 {
480 struct xdp_sock *xs;
481
482 rcu_read_lock();
483 if (!list_is_singular(&pool->xsk_tx_list)) {
484 /* Fallback to the non-batched version */
485 rcu_read_unlock();
486 return xsk_tx_peek_release_fallback(pool, nb_pkts);
487 }
488
489 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
490 if (!xs) {
491 nb_pkts = 0;
492 goto out;
493 }
494
495 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
496
497 /* This is the backpressure mechanism for the Tx path. Try to
498 * reserve space in the completion queue for all packets, but
499 * if there are fewer slots available, just process that many
500 * packets. This avoids having to implement any buffering in
501 * the Tx path.
502 */
503 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
504 if (!nb_pkts)
505 goto out;
506
507 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
508 if (!nb_pkts) {
509 xs->tx->queue_empty_descs++;
510 goto out;
511 }
512
513 __xskq_cons_release(xs->tx);
514 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
515 xs->sk.sk_write_space(&xs->sk);
516
517 out:
518 rcu_read_unlock();
519 return nb_pkts;
520 }
521 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
522
xsk_wakeup(struct xdp_sock * xs,u8 flags)523 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
524 {
525 struct net_device *dev = xs->dev;
526
527 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
528 }
529
xsk_cq_reserve_addr_locked(struct xsk_buff_pool * pool,u64 addr)530 static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
531 {
532 unsigned long flags;
533 int ret;
534
535 spin_lock_irqsave(&pool->cq_lock, flags);
536 ret = xskq_prod_reserve_addr(pool->cq, addr);
537 spin_unlock_irqrestore(&pool->cq_lock, flags);
538
539 return ret;
540 }
541
xsk_cq_submit_locked(struct xsk_buff_pool * pool,u32 n)542 static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
543 {
544 unsigned long flags;
545
546 spin_lock_irqsave(&pool->cq_lock, flags);
547 xskq_prod_submit_n(pool->cq, n);
548 spin_unlock_irqrestore(&pool->cq_lock, flags);
549 }
550
xsk_cq_cancel_locked(struct xsk_buff_pool * pool,u32 n)551 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
552 {
553 unsigned long flags;
554
555 spin_lock_irqsave(&pool->cq_lock, flags);
556 xskq_prod_cancel_n(pool->cq, n);
557 spin_unlock_irqrestore(&pool->cq_lock, flags);
558 }
559
xsk_get_num_desc(struct sk_buff * skb)560 static u32 xsk_get_num_desc(struct sk_buff *skb)
561 {
562 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
563 }
564
xsk_destruct_skb(struct sk_buff * skb)565 static void xsk_destruct_skb(struct sk_buff *skb)
566 {
567 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
568
569 if (compl->tx_timestamp) {
570 /* sw completion timestamp, not a real one */
571 *compl->tx_timestamp = ktime_get_tai_fast_ns();
572 }
573
574 xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
575 sock_wfree(skb);
576 }
577
xsk_set_destructor_arg(struct sk_buff * skb)578 static void xsk_set_destructor_arg(struct sk_buff *skb)
579 {
580 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
581
582 skb_shinfo(skb)->destructor_arg = (void *)num;
583 }
584
xsk_consume_skb(struct sk_buff * skb)585 static void xsk_consume_skb(struct sk_buff *skb)
586 {
587 struct xdp_sock *xs = xdp_sk(skb->sk);
588
589 skb->destructor = sock_wfree;
590 xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
591 /* Free skb without triggering the perf drop trace */
592 consume_skb(skb);
593 xs->skb = NULL;
594 }
595
xsk_drop_skb(struct sk_buff * skb)596 static void xsk_drop_skb(struct sk_buff *skb)
597 {
598 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
599 xsk_consume_skb(skb);
600 }
601
xsk_build_skb_zerocopy(struct xdp_sock * xs,struct xdp_desc * desc)602 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
603 struct xdp_desc *desc)
604 {
605 struct xsk_buff_pool *pool = xs->pool;
606 u32 hr, len, ts, offset, copy, copied;
607 struct sk_buff *skb = xs->skb;
608 struct page *page;
609 void *buffer;
610 int err, i;
611 u64 addr;
612
613 if (!skb) {
614 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
615
616 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
617 if (unlikely(!skb))
618 return ERR_PTR(err);
619
620 skb_reserve(skb, hr);
621 }
622
623 addr = desc->addr;
624 len = desc->len;
625 ts = pool->unaligned ? len : pool->chunk_size;
626
627 buffer = xsk_buff_raw_get_data(pool, addr);
628 offset = offset_in_page(buffer);
629 addr = buffer - pool->addrs;
630
631 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
632 if (unlikely(i >= MAX_SKB_FRAGS))
633 return ERR_PTR(-EOVERFLOW);
634
635 page = pool->umem->pgs[addr >> PAGE_SHIFT];
636 get_page(page);
637
638 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
639 skb_fill_page_desc(skb, i, page, offset, copy);
640
641 copied += copy;
642 addr += copy;
643 offset = 0;
644 }
645
646 skb->len += len;
647 skb->data_len += len;
648 skb->truesize += ts;
649
650 refcount_add(ts, &xs->sk.sk_wmem_alloc);
651
652 return skb;
653 }
654
xsk_build_skb(struct xdp_sock * xs,struct xdp_desc * desc)655 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
656 struct xdp_desc *desc)
657 {
658 struct xsk_tx_metadata *meta = NULL;
659 struct net_device *dev = xs->dev;
660 struct sk_buff *skb = xs->skb;
661 bool first_frag = false;
662 int err;
663
664 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
665 skb = xsk_build_skb_zerocopy(xs, desc);
666 if (IS_ERR(skb)) {
667 err = PTR_ERR(skb);
668 goto free_err;
669 }
670 } else {
671 u32 hr, tr, len;
672 void *buffer;
673
674 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
675 len = desc->len;
676
677 if (!skb) {
678 first_frag = true;
679
680 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
681 tr = dev->needed_tailroom;
682 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
683 if (unlikely(!skb))
684 goto free_err;
685
686 skb_reserve(skb, hr);
687 skb_put(skb, len);
688
689 err = skb_store_bits(skb, 0, buffer, len);
690 if (unlikely(err))
691 goto free_err;
692 } else {
693 int nr_frags = skb_shinfo(skb)->nr_frags;
694 struct page *page;
695 u8 *vaddr;
696
697 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
698 err = -EOVERFLOW;
699 goto free_err;
700 }
701
702 page = alloc_page(xs->sk.sk_allocation);
703 if (unlikely(!page)) {
704 err = -EAGAIN;
705 goto free_err;
706 }
707
708 vaddr = kmap_local_page(page);
709 memcpy(vaddr, buffer, len);
710 kunmap_local(vaddr);
711
712 skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
713 refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
714 }
715
716 if (first_frag && desc->options & XDP_TX_METADATA) {
717 if (unlikely(xs->pool->tx_metadata_len == 0)) {
718 err = -EINVAL;
719 goto free_err;
720 }
721
722 meta = buffer - xs->pool->tx_metadata_len;
723 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
724 err = -EINVAL;
725 goto free_err;
726 }
727
728 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
729 if (unlikely(meta->request.csum_start +
730 meta->request.csum_offset +
731 sizeof(__sum16) > len)) {
732 err = -EINVAL;
733 goto free_err;
734 }
735
736 skb->csum_start = hr + meta->request.csum_start;
737 skb->csum_offset = meta->request.csum_offset;
738 skb->ip_summed = CHECKSUM_PARTIAL;
739
740 if (unlikely(xs->pool->tx_sw_csum)) {
741 err = skb_checksum_help(skb);
742 if (err)
743 goto free_err;
744 }
745 }
746
747 if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
748 skb->skb_mstamp_ns = meta->request.launch_time;
749 }
750 }
751
752 skb->dev = dev;
753 skb->priority = READ_ONCE(xs->sk.sk_priority);
754 skb->mark = READ_ONCE(xs->sk.sk_mark);
755 skb->destructor = xsk_destruct_skb;
756 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
757 xsk_set_destructor_arg(skb);
758
759 return skb;
760
761 free_err:
762 if (first_frag && skb)
763 kfree_skb(skb);
764
765 if (err == -EOVERFLOW) {
766 /* Drop the packet */
767 xsk_set_destructor_arg(xs->skb);
768 xsk_drop_skb(xs->skb);
769 xskq_cons_release(xs->tx);
770 } else {
771 /* Let application retry */
772 xsk_cq_cancel_locked(xs->pool, 1);
773 }
774
775 return ERR_PTR(err);
776 }
777
__xsk_generic_xmit(struct sock * sk)778 static int __xsk_generic_xmit(struct sock *sk)
779 {
780 struct xdp_sock *xs = xdp_sk(sk);
781 u32 max_batch = TX_BATCH_SIZE;
782 bool sent_frame = false;
783 struct xdp_desc desc;
784 struct sk_buff *skb;
785 int err = 0;
786
787 mutex_lock(&xs->mutex);
788
789 /* Since we dropped the RCU read lock, the socket state might have changed. */
790 if (unlikely(!xsk_is_bound(xs))) {
791 err = -ENXIO;
792 goto out;
793 }
794
795 if (xs->queue_id >= xs->dev->real_num_tx_queues)
796 goto out;
797
798 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
799 if (max_batch-- == 0) {
800 err = -EAGAIN;
801 goto out;
802 }
803
804 /* This is the backpressure mechanism for the Tx path.
805 * Reserve space in the completion queue and only proceed
806 * if there is space in it. This avoids having to implement
807 * any buffering in the Tx path.
808 */
809 if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
810 goto out;
811
812 skb = xsk_build_skb(xs, &desc);
813 if (IS_ERR(skb)) {
814 err = PTR_ERR(skb);
815 if (err != -EOVERFLOW)
816 goto out;
817 err = 0;
818 continue;
819 }
820
821 xskq_cons_release(xs->tx);
822
823 if (xp_mb_desc(&desc)) {
824 xs->skb = skb;
825 continue;
826 }
827
828 err = __dev_direct_xmit(skb, xs->queue_id);
829 if (err == NETDEV_TX_BUSY) {
830 /* Tell user-space to retry the send */
831 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
832 xsk_consume_skb(skb);
833 err = -EAGAIN;
834 goto out;
835 }
836
837 /* Ignore NET_XMIT_CN as packet might have been sent */
838 if (err == NET_XMIT_DROP) {
839 /* SKB completed but not sent */
840 err = -EBUSY;
841 xs->skb = NULL;
842 goto out;
843 }
844
845 sent_frame = true;
846 xs->skb = NULL;
847 }
848
849 if (xskq_has_descs(xs->tx)) {
850 if (xs->skb)
851 xsk_drop_skb(xs->skb);
852 xskq_cons_release(xs->tx);
853 }
854
855 out:
856 if (sent_frame)
857 if (xsk_tx_writeable(xs))
858 sk->sk_write_space(sk);
859
860 mutex_unlock(&xs->mutex);
861 return err;
862 }
863
xsk_generic_xmit(struct sock * sk)864 static int xsk_generic_xmit(struct sock *sk)
865 {
866 int ret;
867
868 /* Drop the RCU lock since the SKB path might sleep. */
869 rcu_read_unlock();
870 ret = __xsk_generic_xmit(sk);
871 /* Reaquire RCU lock before going into common code. */
872 rcu_read_lock();
873
874 return ret;
875 }
876
xsk_no_wakeup(struct sock * sk)877 static bool xsk_no_wakeup(struct sock *sk)
878 {
879 #ifdef CONFIG_NET_RX_BUSY_POLL
880 /* Prefer busy-polling, skip the wakeup. */
881 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
882 napi_id_valid(READ_ONCE(sk->sk_napi_id));
883 #else
884 return false;
885 #endif
886 }
887
xsk_check_common(struct xdp_sock * xs)888 static int xsk_check_common(struct xdp_sock *xs)
889 {
890 if (unlikely(!xsk_is_bound(xs)))
891 return -ENXIO;
892 if (unlikely(!(xs->dev->flags & IFF_UP)))
893 return -ENETDOWN;
894
895 return 0;
896 }
897
__xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)898 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
899 {
900 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
901 struct sock *sk = sock->sk;
902 struct xdp_sock *xs = xdp_sk(sk);
903 struct xsk_buff_pool *pool;
904 int err;
905
906 err = xsk_check_common(xs);
907 if (err)
908 return err;
909 if (unlikely(need_wait))
910 return -EOPNOTSUPP;
911 if (unlikely(!xs->tx))
912 return -ENOBUFS;
913
914 if (sk_can_busy_loop(sk))
915 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
916
917 if (xs->zc && xsk_no_wakeup(sk))
918 return 0;
919
920 pool = xs->pool;
921 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
922 if (xs->zc)
923 return xsk_wakeup(xs, XDP_WAKEUP_TX);
924 return xsk_generic_xmit(sk);
925 }
926 return 0;
927 }
928
xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)929 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
930 {
931 int ret;
932
933 rcu_read_lock();
934 ret = __xsk_sendmsg(sock, m, total_len);
935 rcu_read_unlock();
936
937 return ret;
938 }
939
__xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)940 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
941 {
942 bool need_wait = !(flags & MSG_DONTWAIT);
943 struct sock *sk = sock->sk;
944 struct xdp_sock *xs = xdp_sk(sk);
945 int err;
946
947 err = xsk_check_common(xs);
948 if (err)
949 return err;
950 if (unlikely(!xs->rx))
951 return -ENOBUFS;
952 if (unlikely(need_wait))
953 return -EOPNOTSUPP;
954
955 if (sk_can_busy_loop(sk))
956 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
957
958 if (xsk_no_wakeup(sk))
959 return 0;
960
961 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
962 return xsk_wakeup(xs, XDP_WAKEUP_RX);
963 return 0;
964 }
965
xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)966 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
967 {
968 int ret;
969
970 rcu_read_lock();
971 ret = __xsk_recvmsg(sock, m, len, flags);
972 rcu_read_unlock();
973
974 return ret;
975 }
976
xsk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)977 static __poll_t xsk_poll(struct file *file, struct socket *sock,
978 struct poll_table_struct *wait)
979 {
980 __poll_t mask = 0;
981 struct sock *sk = sock->sk;
982 struct xdp_sock *xs = xdp_sk(sk);
983 struct xsk_buff_pool *pool;
984
985 sock_poll_wait(file, sock, wait);
986
987 rcu_read_lock();
988 if (xsk_check_common(xs))
989 goto out;
990
991 pool = xs->pool;
992
993 if (pool->cached_need_wakeup) {
994 if (xs->zc)
995 xsk_wakeup(xs, pool->cached_need_wakeup);
996 else if (xs->tx)
997 /* Poll needs to drive Tx also in copy mode */
998 xsk_generic_xmit(sk);
999 }
1000
1001 if (xs->rx && !xskq_prod_is_empty(xs->rx))
1002 mask |= EPOLLIN | EPOLLRDNORM;
1003 if (xs->tx && xsk_tx_writeable(xs))
1004 mask |= EPOLLOUT | EPOLLWRNORM;
1005 out:
1006 rcu_read_unlock();
1007 return mask;
1008 }
1009
xsk_init_queue(u32 entries,struct xsk_queue ** queue,bool umem_queue)1010 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1011 bool umem_queue)
1012 {
1013 struct xsk_queue *q;
1014
1015 if (entries == 0 || *queue || !is_power_of_2(entries))
1016 return -EINVAL;
1017
1018 q = xskq_create(entries, umem_queue);
1019 if (!q)
1020 return -ENOMEM;
1021
1022 /* Make sure queue is ready before it can be seen by others */
1023 smp_wmb();
1024 WRITE_ONCE(*queue, q);
1025 return 0;
1026 }
1027
xsk_unbind_dev(struct xdp_sock * xs)1028 static void xsk_unbind_dev(struct xdp_sock *xs)
1029 {
1030 struct net_device *dev = xs->dev;
1031
1032 if (xs->state != XSK_BOUND)
1033 return;
1034 WRITE_ONCE(xs->state, XSK_UNBOUND);
1035
1036 /* Wait for driver to stop using the xdp socket. */
1037 xp_del_xsk(xs->pool, xs);
1038 synchronize_net();
1039 dev_put(dev);
1040 }
1041
xsk_get_map_list_entry(struct xdp_sock * xs,struct xdp_sock __rcu *** map_entry)1042 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1043 struct xdp_sock __rcu ***map_entry)
1044 {
1045 struct xsk_map *map = NULL;
1046 struct xsk_map_node *node;
1047
1048 *map_entry = NULL;
1049
1050 spin_lock_bh(&xs->map_list_lock);
1051 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1052 node);
1053 if (node) {
1054 bpf_map_inc(&node->map->map);
1055 map = node->map;
1056 *map_entry = node->map_entry;
1057 }
1058 spin_unlock_bh(&xs->map_list_lock);
1059 return map;
1060 }
1061
xsk_delete_from_maps(struct xdp_sock * xs)1062 static void xsk_delete_from_maps(struct xdp_sock *xs)
1063 {
1064 /* This function removes the current XDP socket from all the
1065 * maps it resides in. We need to take extra care here, due to
1066 * the two locks involved. Each map has a lock synchronizing
1067 * updates to the entries, and each socket has a lock that
1068 * synchronizes access to the list of maps (map_list). For
1069 * deadlock avoidance the locks need to be taken in the order
1070 * "map lock"->"socket map list lock". We start off by
1071 * accessing the socket map list, and take a reference to the
1072 * map to guarantee existence between the
1073 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1074 * calls. Then we ask the map to remove the socket, which
1075 * tries to remove the socket from the map. Note that there
1076 * might be updates to the map between
1077 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1078 */
1079 struct xdp_sock __rcu **map_entry = NULL;
1080 struct xsk_map *map;
1081
1082 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1083 xsk_map_try_sock_delete(map, xs, map_entry);
1084 bpf_map_put(&map->map);
1085 }
1086 }
1087
xsk_release(struct socket * sock)1088 static int xsk_release(struct socket *sock)
1089 {
1090 struct sock *sk = sock->sk;
1091 struct xdp_sock *xs = xdp_sk(sk);
1092 struct net *net;
1093
1094 if (!sk)
1095 return 0;
1096
1097 net = sock_net(sk);
1098
1099 if (xs->skb)
1100 xsk_drop_skb(xs->skb);
1101
1102 mutex_lock(&net->xdp.lock);
1103 sk_del_node_init_rcu(sk);
1104 mutex_unlock(&net->xdp.lock);
1105
1106 sock_prot_inuse_add(net, sk->sk_prot, -1);
1107
1108 xsk_delete_from_maps(xs);
1109 mutex_lock(&xs->mutex);
1110 xsk_unbind_dev(xs);
1111 mutex_unlock(&xs->mutex);
1112
1113 xskq_destroy(xs->rx);
1114 xskq_destroy(xs->tx);
1115 xskq_destroy(xs->fq_tmp);
1116 xskq_destroy(xs->cq_tmp);
1117
1118 sock_orphan(sk);
1119 sock->sk = NULL;
1120
1121 sock_put(sk);
1122
1123 return 0;
1124 }
1125
xsk_lookup_xsk_from_fd(int fd)1126 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1127 {
1128 struct socket *sock;
1129 int err;
1130
1131 sock = sockfd_lookup(fd, &err);
1132 if (!sock)
1133 return ERR_PTR(-ENOTSOCK);
1134
1135 if (sock->sk->sk_family != PF_XDP) {
1136 sockfd_put(sock);
1137 return ERR_PTR(-ENOPROTOOPT);
1138 }
1139
1140 return sock;
1141 }
1142
xsk_validate_queues(struct xdp_sock * xs)1143 static bool xsk_validate_queues(struct xdp_sock *xs)
1144 {
1145 return xs->fq_tmp && xs->cq_tmp;
1146 }
1147
xsk_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1148 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1149 {
1150 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1151 struct sock *sk = sock->sk;
1152 struct xdp_sock *xs = xdp_sk(sk);
1153 struct net_device *dev;
1154 int bound_dev_if;
1155 u32 flags, qid;
1156 int err = 0;
1157
1158 if (addr_len < sizeof(struct sockaddr_xdp))
1159 return -EINVAL;
1160 if (sxdp->sxdp_family != AF_XDP)
1161 return -EINVAL;
1162
1163 flags = sxdp->sxdp_flags;
1164 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1165 XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1166 return -EINVAL;
1167
1168 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1169 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1170 return -EINVAL;
1171
1172 rtnl_lock();
1173 mutex_lock(&xs->mutex);
1174 if (xs->state != XSK_READY) {
1175 err = -EBUSY;
1176 goto out_release;
1177 }
1178
1179 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1180 if (!dev) {
1181 err = -ENODEV;
1182 goto out_release;
1183 }
1184
1185 netdev_lock_ops(dev);
1186
1187 if (!xs->rx && !xs->tx) {
1188 err = -EINVAL;
1189 goto out_unlock;
1190 }
1191
1192 qid = sxdp->sxdp_queue_id;
1193
1194 if (flags & XDP_SHARED_UMEM) {
1195 struct xdp_sock *umem_xs;
1196 struct socket *sock;
1197
1198 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1199 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1200 /* Cannot specify flags for shared sockets. */
1201 err = -EINVAL;
1202 goto out_unlock;
1203 }
1204
1205 if (xs->umem) {
1206 /* We have already our own. */
1207 err = -EINVAL;
1208 goto out_unlock;
1209 }
1210
1211 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1212 if (IS_ERR(sock)) {
1213 err = PTR_ERR(sock);
1214 goto out_unlock;
1215 }
1216
1217 umem_xs = xdp_sk(sock->sk);
1218 if (!xsk_is_bound(umem_xs)) {
1219 err = -EBADF;
1220 sockfd_put(sock);
1221 goto out_unlock;
1222 }
1223
1224 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1225 /* Share the umem with another socket on another qid
1226 * and/or device.
1227 */
1228 xs->pool = xp_create_and_assign_umem(xs,
1229 umem_xs->umem);
1230 if (!xs->pool) {
1231 err = -ENOMEM;
1232 sockfd_put(sock);
1233 goto out_unlock;
1234 }
1235
1236 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1237 qid);
1238 if (err) {
1239 xp_destroy(xs->pool);
1240 xs->pool = NULL;
1241 sockfd_put(sock);
1242 goto out_unlock;
1243 }
1244 } else {
1245 /* Share the buffer pool with the other socket. */
1246 if (xs->fq_tmp || xs->cq_tmp) {
1247 /* Do not allow setting your own fq or cq. */
1248 err = -EINVAL;
1249 sockfd_put(sock);
1250 goto out_unlock;
1251 }
1252
1253 xp_get_pool(umem_xs->pool);
1254 xs->pool = umem_xs->pool;
1255
1256 /* If underlying shared umem was created without Tx
1257 * ring, allocate Tx descs array that Tx batching API
1258 * utilizes
1259 */
1260 if (xs->tx && !xs->pool->tx_descs) {
1261 err = xp_alloc_tx_descs(xs->pool, xs);
1262 if (err) {
1263 xp_put_pool(xs->pool);
1264 xs->pool = NULL;
1265 sockfd_put(sock);
1266 goto out_unlock;
1267 }
1268 }
1269 }
1270
1271 xdp_get_umem(umem_xs->umem);
1272 WRITE_ONCE(xs->umem, umem_xs->umem);
1273 sockfd_put(sock);
1274 } else if (!xs->umem || !xsk_validate_queues(xs)) {
1275 err = -EINVAL;
1276 goto out_unlock;
1277 } else {
1278 /* This xsk has its own umem. */
1279 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1280 if (!xs->pool) {
1281 err = -ENOMEM;
1282 goto out_unlock;
1283 }
1284
1285 err = xp_assign_dev(xs->pool, dev, qid, flags);
1286 if (err) {
1287 xp_destroy(xs->pool);
1288 xs->pool = NULL;
1289 goto out_unlock;
1290 }
1291 }
1292
1293 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1294 xs->fq_tmp = NULL;
1295 xs->cq_tmp = NULL;
1296
1297 xs->dev = dev;
1298 xs->zc = xs->umem->zc;
1299 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1300 xs->queue_id = qid;
1301 xp_add_xsk(xs->pool, xs);
1302
1303 if (xs->zc && qid < dev->real_num_rx_queues) {
1304 struct netdev_rx_queue *rxq;
1305
1306 rxq = __netif_get_rx_queue(dev, qid);
1307 if (rxq->napi)
1308 __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1309 }
1310
1311 out_unlock:
1312 if (err) {
1313 dev_put(dev);
1314 } else {
1315 /* Matches smp_rmb() in bind() for shared umem
1316 * sockets, and xsk_is_bound().
1317 */
1318 smp_wmb();
1319 WRITE_ONCE(xs->state, XSK_BOUND);
1320 }
1321 netdev_unlock_ops(dev);
1322 out_release:
1323 mutex_unlock(&xs->mutex);
1324 rtnl_unlock();
1325 return err;
1326 }
1327
1328 struct xdp_umem_reg_v1 {
1329 __u64 addr; /* Start of packet data area */
1330 __u64 len; /* Length of packet data area */
1331 __u32 chunk_size;
1332 __u32 headroom;
1333 };
1334
xsk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1335 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1336 sockptr_t optval, unsigned int optlen)
1337 {
1338 struct sock *sk = sock->sk;
1339 struct xdp_sock *xs = xdp_sk(sk);
1340 int err;
1341
1342 if (level != SOL_XDP)
1343 return -ENOPROTOOPT;
1344
1345 switch (optname) {
1346 case XDP_RX_RING:
1347 case XDP_TX_RING:
1348 {
1349 struct xsk_queue **q;
1350 int entries;
1351
1352 if (optlen < sizeof(entries))
1353 return -EINVAL;
1354 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1355 return -EFAULT;
1356
1357 mutex_lock(&xs->mutex);
1358 if (xs->state != XSK_READY) {
1359 mutex_unlock(&xs->mutex);
1360 return -EBUSY;
1361 }
1362 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1363 err = xsk_init_queue(entries, q, false);
1364 if (!err && optname == XDP_TX_RING)
1365 /* Tx needs to be explicitly woken up the first time */
1366 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1367 mutex_unlock(&xs->mutex);
1368 return err;
1369 }
1370 case XDP_UMEM_REG:
1371 {
1372 size_t mr_size = sizeof(struct xdp_umem_reg);
1373 struct xdp_umem_reg mr = {};
1374 struct xdp_umem *umem;
1375
1376 if (optlen < sizeof(struct xdp_umem_reg_v1))
1377 return -EINVAL;
1378 else if (optlen < sizeof(mr))
1379 mr_size = sizeof(struct xdp_umem_reg_v1);
1380
1381 BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
1382
1383 /* Make sure the last field of the struct doesn't have
1384 * uninitialized padding. All padding has to be explicit
1385 * and has to be set to zero by the userspace to make
1386 * struct xdp_umem_reg extensible in the future.
1387 */
1388 BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
1389 sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
1390 sizeof(struct xdp_umem_reg));
1391
1392 if (copy_from_sockptr(&mr, optval, mr_size))
1393 return -EFAULT;
1394
1395 mutex_lock(&xs->mutex);
1396 if (xs->state != XSK_READY || xs->umem) {
1397 mutex_unlock(&xs->mutex);
1398 return -EBUSY;
1399 }
1400
1401 umem = xdp_umem_create(&mr);
1402 if (IS_ERR(umem)) {
1403 mutex_unlock(&xs->mutex);
1404 return PTR_ERR(umem);
1405 }
1406
1407 /* Make sure umem is ready before it can be seen by others */
1408 smp_wmb();
1409 WRITE_ONCE(xs->umem, umem);
1410 mutex_unlock(&xs->mutex);
1411 return 0;
1412 }
1413 case XDP_UMEM_FILL_RING:
1414 case XDP_UMEM_COMPLETION_RING:
1415 {
1416 struct xsk_queue **q;
1417 int entries;
1418
1419 if (optlen < sizeof(entries))
1420 return -EINVAL;
1421 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1422 return -EFAULT;
1423
1424 mutex_lock(&xs->mutex);
1425 if (xs->state != XSK_READY) {
1426 mutex_unlock(&xs->mutex);
1427 return -EBUSY;
1428 }
1429
1430 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1431 &xs->cq_tmp;
1432 err = xsk_init_queue(entries, q, true);
1433 mutex_unlock(&xs->mutex);
1434 return err;
1435 }
1436 default:
1437 break;
1438 }
1439
1440 return -ENOPROTOOPT;
1441 }
1442
xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 * ring)1443 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1444 {
1445 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1446 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1447 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1448 }
1449
xsk_enter_umem_offsets(struct xdp_ring_offset_v1 * ring)1450 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1451 {
1452 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1453 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1454 ring->desc = offsetof(struct xdp_umem_ring, desc);
1455 }
1456
1457 struct xdp_statistics_v1 {
1458 __u64 rx_dropped;
1459 __u64 rx_invalid_descs;
1460 __u64 tx_invalid_descs;
1461 };
1462
xsk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1463 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1464 char __user *optval, int __user *optlen)
1465 {
1466 struct sock *sk = sock->sk;
1467 struct xdp_sock *xs = xdp_sk(sk);
1468 int len;
1469
1470 if (level != SOL_XDP)
1471 return -ENOPROTOOPT;
1472
1473 if (get_user(len, optlen))
1474 return -EFAULT;
1475 if (len < 0)
1476 return -EINVAL;
1477
1478 switch (optname) {
1479 case XDP_STATISTICS:
1480 {
1481 struct xdp_statistics stats = {};
1482 bool extra_stats = true;
1483 size_t stats_size;
1484
1485 if (len < sizeof(struct xdp_statistics_v1)) {
1486 return -EINVAL;
1487 } else if (len < sizeof(stats)) {
1488 extra_stats = false;
1489 stats_size = sizeof(struct xdp_statistics_v1);
1490 } else {
1491 stats_size = sizeof(stats);
1492 }
1493
1494 mutex_lock(&xs->mutex);
1495 stats.rx_dropped = xs->rx_dropped;
1496 if (extra_stats) {
1497 stats.rx_ring_full = xs->rx_queue_full;
1498 stats.rx_fill_ring_empty_descs =
1499 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1500 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1501 } else {
1502 stats.rx_dropped += xs->rx_queue_full;
1503 }
1504 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1505 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1506 mutex_unlock(&xs->mutex);
1507
1508 if (copy_to_user(optval, &stats, stats_size))
1509 return -EFAULT;
1510 if (put_user(stats_size, optlen))
1511 return -EFAULT;
1512
1513 return 0;
1514 }
1515 case XDP_MMAP_OFFSETS:
1516 {
1517 struct xdp_mmap_offsets off;
1518 struct xdp_mmap_offsets_v1 off_v1;
1519 bool flags_supported = true;
1520 void *to_copy;
1521
1522 if (len < sizeof(off_v1))
1523 return -EINVAL;
1524 else if (len < sizeof(off))
1525 flags_supported = false;
1526
1527 if (flags_supported) {
1528 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1529 * except for the flags field added to the end.
1530 */
1531 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1532 &off.rx);
1533 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1534 &off.tx);
1535 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1536 &off.fr);
1537 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1538 &off.cr);
1539 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1540 ptrs.flags);
1541 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1542 ptrs.flags);
1543 off.fr.flags = offsetof(struct xdp_umem_ring,
1544 ptrs.flags);
1545 off.cr.flags = offsetof(struct xdp_umem_ring,
1546 ptrs.flags);
1547
1548 len = sizeof(off);
1549 to_copy = &off;
1550 } else {
1551 xsk_enter_rxtx_offsets(&off_v1.rx);
1552 xsk_enter_rxtx_offsets(&off_v1.tx);
1553 xsk_enter_umem_offsets(&off_v1.fr);
1554 xsk_enter_umem_offsets(&off_v1.cr);
1555
1556 len = sizeof(off_v1);
1557 to_copy = &off_v1;
1558 }
1559
1560 if (copy_to_user(optval, to_copy, len))
1561 return -EFAULT;
1562 if (put_user(len, optlen))
1563 return -EFAULT;
1564
1565 return 0;
1566 }
1567 case XDP_OPTIONS:
1568 {
1569 struct xdp_options opts = {};
1570
1571 if (len < sizeof(opts))
1572 return -EINVAL;
1573
1574 mutex_lock(&xs->mutex);
1575 if (xs->zc)
1576 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1577 mutex_unlock(&xs->mutex);
1578
1579 len = sizeof(opts);
1580 if (copy_to_user(optval, &opts, len))
1581 return -EFAULT;
1582 if (put_user(len, optlen))
1583 return -EFAULT;
1584
1585 return 0;
1586 }
1587 default:
1588 break;
1589 }
1590
1591 return -EOPNOTSUPP;
1592 }
1593
xsk_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)1594 static int xsk_mmap(struct file *file, struct socket *sock,
1595 struct vm_area_struct *vma)
1596 {
1597 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1598 unsigned long size = vma->vm_end - vma->vm_start;
1599 struct xdp_sock *xs = xdp_sk(sock->sk);
1600 int state = READ_ONCE(xs->state);
1601 struct xsk_queue *q = NULL;
1602
1603 if (state != XSK_READY && state != XSK_BOUND)
1604 return -EBUSY;
1605
1606 if (offset == XDP_PGOFF_RX_RING) {
1607 q = READ_ONCE(xs->rx);
1608 } else if (offset == XDP_PGOFF_TX_RING) {
1609 q = READ_ONCE(xs->tx);
1610 } else {
1611 /* Matches the smp_wmb() in XDP_UMEM_REG */
1612 smp_rmb();
1613 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1614 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1615 READ_ONCE(xs->pool->fq);
1616 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1617 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1618 READ_ONCE(xs->pool->cq);
1619 }
1620
1621 if (!q)
1622 return -EINVAL;
1623
1624 /* Matches the smp_wmb() in xsk_init_queue */
1625 smp_rmb();
1626 if (size > q->ring_vmalloc_size)
1627 return -EINVAL;
1628
1629 return remap_vmalloc_range(vma, q->ring, 0);
1630 }
1631
xsk_notifier(struct notifier_block * this,unsigned long msg,void * ptr)1632 static int xsk_notifier(struct notifier_block *this,
1633 unsigned long msg, void *ptr)
1634 {
1635 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1636 struct net *net = dev_net(dev);
1637 struct sock *sk;
1638
1639 switch (msg) {
1640 case NETDEV_UNREGISTER:
1641 mutex_lock(&net->xdp.lock);
1642 sk_for_each(sk, &net->xdp.list) {
1643 struct xdp_sock *xs = xdp_sk(sk);
1644
1645 mutex_lock(&xs->mutex);
1646 if (xs->dev == dev) {
1647 sk->sk_err = ENETDOWN;
1648 if (!sock_flag(sk, SOCK_DEAD))
1649 sk_error_report(sk);
1650
1651 xsk_unbind_dev(xs);
1652
1653 /* Clear device references. */
1654 xp_clear_dev(xs->pool);
1655 }
1656 mutex_unlock(&xs->mutex);
1657 }
1658 mutex_unlock(&net->xdp.lock);
1659 break;
1660 }
1661 return NOTIFY_DONE;
1662 }
1663
1664 static struct proto xsk_proto = {
1665 .name = "XDP",
1666 .owner = THIS_MODULE,
1667 .obj_size = sizeof(struct xdp_sock),
1668 };
1669
1670 static const struct proto_ops xsk_proto_ops = {
1671 .family = PF_XDP,
1672 .owner = THIS_MODULE,
1673 .release = xsk_release,
1674 .bind = xsk_bind,
1675 .connect = sock_no_connect,
1676 .socketpair = sock_no_socketpair,
1677 .accept = sock_no_accept,
1678 .getname = sock_no_getname,
1679 .poll = xsk_poll,
1680 .ioctl = sock_no_ioctl,
1681 .listen = sock_no_listen,
1682 .shutdown = sock_no_shutdown,
1683 .setsockopt = xsk_setsockopt,
1684 .getsockopt = xsk_getsockopt,
1685 .sendmsg = xsk_sendmsg,
1686 .recvmsg = xsk_recvmsg,
1687 .mmap = xsk_mmap,
1688 };
1689
xsk_destruct(struct sock * sk)1690 static void xsk_destruct(struct sock *sk)
1691 {
1692 struct xdp_sock *xs = xdp_sk(sk);
1693
1694 if (!sock_flag(sk, SOCK_DEAD))
1695 return;
1696
1697 if (!xp_put_pool(xs->pool))
1698 xdp_put_umem(xs->umem, !xs->pool);
1699 }
1700
xsk_create(struct net * net,struct socket * sock,int protocol,int kern)1701 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1702 int kern)
1703 {
1704 struct xdp_sock *xs;
1705 struct sock *sk;
1706
1707 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1708 return -EPERM;
1709 if (sock->type != SOCK_RAW)
1710 return -ESOCKTNOSUPPORT;
1711
1712 if (protocol)
1713 return -EPROTONOSUPPORT;
1714
1715 sock->state = SS_UNCONNECTED;
1716
1717 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1718 if (!sk)
1719 return -ENOBUFS;
1720
1721 sock->ops = &xsk_proto_ops;
1722
1723 sock_init_data(sock, sk);
1724
1725 sk->sk_family = PF_XDP;
1726
1727 sk->sk_destruct = xsk_destruct;
1728
1729 sock_set_flag(sk, SOCK_RCU_FREE);
1730
1731 xs = xdp_sk(sk);
1732 xs->state = XSK_READY;
1733 mutex_init(&xs->mutex);
1734 spin_lock_init(&xs->rx_lock);
1735
1736 INIT_LIST_HEAD(&xs->map_list);
1737 spin_lock_init(&xs->map_list_lock);
1738
1739 mutex_lock(&net->xdp.lock);
1740 sk_add_node_rcu(sk, &net->xdp.list);
1741 mutex_unlock(&net->xdp.lock);
1742
1743 sock_prot_inuse_add(net, &xsk_proto, 1);
1744
1745 return 0;
1746 }
1747
1748 static const struct net_proto_family xsk_family_ops = {
1749 .family = PF_XDP,
1750 .create = xsk_create,
1751 .owner = THIS_MODULE,
1752 };
1753
1754 static struct notifier_block xsk_netdev_notifier = {
1755 .notifier_call = xsk_notifier,
1756 };
1757
xsk_net_init(struct net * net)1758 static int __net_init xsk_net_init(struct net *net)
1759 {
1760 mutex_init(&net->xdp.lock);
1761 INIT_HLIST_HEAD(&net->xdp.list);
1762 return 0;
1763 }
1764
xsk_net_exit(struct net * net)1765 static void __net_exit xsk_net_exit(struct net *net)
1766 {
1767 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1768 }
1769
1770 static struct pernet_operations xsk_net_ops = {
1771 .init = xsk_net_init,
1772 .exit = xsk_net_exit,
1773 };
1774
xsk_init(void)1775 static int __init xsk_init(void)
1776 {
1777 int err;
1778
1779 err = proto_register(&xsk_proto, 0 /* no slab */);
1780 if (err)
1781 goto out;
1782
1783 err = sock_register(&xsk_family_ops);
1784 if (err)
1785 goto out_proto;
1786
1787 err = register_pernet_subsys(&xsk_net_ops);
1788 if (err)
1789 goto out_sk;
1790
1791 err = register_netdevice_notifier(&xsk_netdev_notifier);
1792 if (err)
1793 goto out_pernet;
1794
1795 return 0;
1796
1797 out_pernet:
1798 unregister_pernet_subsys(&xsk_net_ops);
1799 out_sk:
1800 sock_unregister(PF_XDP);
1801 out_proto:
1802 proto_unregister(&xsk_proto);
1803 out:
1804 return err;
1805 }
1806
1807 fs_initcall(xsk_init);
1808