xref: /linux/net/xdp/xsk.c (revision 7f356166aebb0d956d367dfe55e19d7783277d09)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
26 #include <net/busy_poll.h>
27 #include <net/xdp.h>
28 
29 #include "xsk_queue.h"
30 #include "xdp_umem.h"
31 #include "xsk.h"
32 
33 #define TX_BATCH_SIZE 16
34 
35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 
37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
38 {
39 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
40 		return;
41 
42 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
43 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
44 }
45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
46 
47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
48 {
49 	struct xdp_sock *xs;
50 
51 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
52 		return;
53 
54 	rcu_read_lock();
55 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
56 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
57 	}
58 	rcu_read_unlock();
59 
60 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
61 }
62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
63 
64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
65 {
66 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
67 		return;
68 
69 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
70 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
71 }
72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
73 
74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
75 {
76 	struct xdp_sock *xs;
77 
78 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
79 		return;
80 
81 	rcu_read_lock();
82 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
83 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
84 	}
85 	rcu_read_unlock();
86 
87 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
88 }
89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
90 
91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
92 {
93 	return pool->uses_need_wakeup;
94 }
95 EXPORT_SYMBOL(xsk_uses_need_wakeup);
96 
97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
98 					    u16 queue_id)
99 {
100 	if (queue_id < dev->real_num_rx_queues)
101 		return dev->_rx[queue_id].pool;
102 	if (queue_id < dev->real_num_tx_queues)
103 		return dev->_tx[queue_id].pool;
104 
105 	return NULL;
106 }
107 EXPORT_SYMBOL(xsk_get_pool_from_qid);
108 
109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
110 {
111 	if (queue_id < dev->real_num_rx_queues)
112 		dev->_rx[queue_id].pool = NULL;
113 	if (queue_id < dev->real_num_tx_queues)
114 		dev->_tx[queue_id].pool = NULL;
115 }
116 
117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118  * not know if the device has more tx queues than rx, or the opposite.
119  * This might also change during run time.
120  */
121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
122 			u16 queue_id)
123 {
124 	if (queue_id >= max_t(unsigned int,
125 			      dev->real_num_rx_queues,
126 			      dev->real_num_tx_queues))
127 		return -EINVAL;
128 
129 	if (queue_id < dev->real_num_rx_queues)
130 		dev->_rx[queue_id].pool = pool;
131 	if (queue_id < dev->real_num_tx_queues)
132 		dev->_tx[queue_id].pool = pool;
133 
134 	return 0;
135 }
136 
137 void xp_release(struct xdp_buff_xsk *xskb)
138 {
139 	xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
140 }
141 
142 static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
143 {
144 	u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
145 
146 	offset += xskb->pool->headroom;
147 	if (!xskb->pool->unaligned)
148 		return xskb->orig_addr + offset;
149 	return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
150 }
151 
152 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
153 {
154 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
155 	u64 addr;
156 	int err;
157 
158 	addr = xp_get_handle(xskb);
159 	err = xskq_prod_reserve_desc(xs->rx, addr, len);
160 	if (err) {
161 		xs->rx_queue_full++;
162 		return err;
163 	}
164 
165 	xp_release(xskb);
166 	return 0;
167 }
168 
169 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
170 {
171 	void *from_buf, *to_buf;
172 	u32 metalen;
173 
174 	if (unlikely(xdp_data_meta_unsupported(from))) {
175 		from_buf = from->data;
176 		to_buf = to->data;
177 		metalen = 0;
178 	} else {
179 		from_buf = from->data_meta;
180 		metalen = from->data - from->data_meta;
181 		to_buf = to->data - metalen;
182 	}
183 
184 	memcpy(to_buf, from_buf, len + metalen);
185 }
186 
187 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
188 		     bool explicit_free)
189 {
190 	struct xdp_buff *xsk_xdp;
191 	int err;
192 
193 	if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
194 		xs->rx_dropped++;
195 		return -ENOSPC;
196 	}
197 
198 	xsk_xdp = xsk_buff_alloc(xs->pool);
199 	if (!xsk_xdp) {
200 		xs->rx_dropped++;
201 		return -ENOSPC;
202 	}
203 
204 	xsk_copy_xdp(xsk_xdp, xdp, len);
205 	err = __xsk_rcv_zc(xs, xsk_xdp, len);
206 	if (err) {
207 		xsk_buff_free(xsk_xdp);
208 		return err;
209 	}
210 	if (explicit_free)
211 		xdp_return_buff(xdp);
212 	return 0;
213 }
214 
215 static bool xsk_is_bound(struct xdp_sock *xs)
216 {
217 	if (READ_ONCE(xs->state) == XSK_BOUND) {
218 		/* Matches smp_wmb() in bind(). */
219 		smp_rmb();
220 		return true;
221 	}
222 	return false;
223 }
224 
225 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
226 		   bool explicit_free)
227 {
228 	u32 len;
229 
230 	if (!xsk_is_bound(xs))
231 		return -EINVAL;
232 
233 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
234 		return -EINVAL;
235 
236 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
237 	len = xdp->data_end - xdp->data;
238 
239 	return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
240 		__xsk_rcv_zc(xs, xdp, len) :
241 		__xsk_rcv(xs, xdp, len, explicit_free);
242 }
243 
244 static void xsk_flush(struct xdp_sock *xs)
245 {
246 	xskq_prod_submit(xs->rx);
247 	__xskq_cons_release(xs->pool->fq);
248 	sock_def_readable(&xs->sk);
249 }
250 
251 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
252 {
253 	int err;
254 
255 	spin_lock_bh(&xs->rx_lock);
256 	err = xsk_rcv(xs, xdp, false);
257 	xsk_flush(xs);
258 	spin_unlock_bh(&xs->rx_lock);
259 	return err;
260 }
261 
262 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
263 {
264 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
265 	int err;
266 
267 	err = xsk_rcv(xs, xdp, true);
268 	if (err)
269 		return err;
270 
271 	if (!xs->flush_node.prev)
272 		list_add(&xs->flush_node, flush_list);
273 
274 	return 0;
275 }
276 
277 void __xsk_map_flush(void)
278 {
279 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
280 	struct xdp_sock *xs, *tmp;
281 
282 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
283 		xsk_flush(xs);
284 		__list_del_clearprev(&xs->flush_node);
285 	}
286 }
287 
288 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
289 {
290 	xskq_prod_submit_n(pool->cq, nb_entries);
291 }
292 EXPORT_SYMBOL(xsk_tx_completed);
293 
294 void xsk_tx_release(struct xsk_buff_pool *pool)
295 {
296 	struct xdp_sock *xs;
297 
298 	rcu_read_lock();
299 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
300 		__xskq_cons_release(xs->tx);
301 		xs->sk.sk_write_space(&xs->sk);
302 	}
303 	rcu_read_unlock();
304 }
305 EXPORT_SYMBOL(xsk_tx_release);
306 
307 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
308 {
309 	struct xdp_sock *xs;
310 
311 	rcu_read_lock();
312 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
313 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
314 			xs->tx->queue_empty_descs++;
315 			continue;
316 		}
317 
318 		/* This is the backpressure mechanism for the Tx path.
319 		 * Reserve space in the completion queue and only proceed
320 		 * if there is space in it. This avoids having to implement
321 		 * any buffering in the Tx path.
322 		 */
323 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
324 			goto out;
325 
326 		xskq_cons_release(xs->tx);
327 		rcu_read_unlock();
328 		return true;
329 	}
330 
331 out:
332 	rcu_read_unlock();
333 	return false;
334 }
335 EXPORT_SYMBOL(xsk_tx_peek_desc);
336 
337 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs,
338 					u32 max_entries)
339 {
340 	u32 nb_pkts = 0;
341 
342 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
343 		nb_pkts++;
344 
345 	xsk_tx_release(pool);
346 	return nb_pkts;
347 }
348 
349 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs,
350 				   u32 max_entries)
351 {
352 	struct xdp_sock *xs;
353 	u32 nb_pkts;
354 
355 	rcu_read_lock();
356 	if (!list_is_singular(&pool->xsk_tx_list)) {
357 		/* Fallback to the non-batched version */
358 		rcu_read_unlock();
359 		return xsk_tx_peek_release_fallback(pool, descs, max_entries);
360 	}
361 
362 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
363 	if (!xs) {
364 		nb_pkts = 0;
365 		goto out;
366 	}
367 
368 	nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries);
369 	if (!nb_pkts) {
370 		xs->tx->queue_empty_descs++;
371 		goto out;
372 	}
373 
374 	/* This is the backpressure mechanism for the Tx path. Try to
375 	 * reserve space in the completion queue for all packets, but
376 	 * if there are fewer slots available, just process that many
377 	 * packets. This avoids having to implement any buffering in
378 	 * the Tx path.
379 	 */
380 	nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts);
381 	if (!nb_pkts)
382 		goto out;
383 
384 	xskq_cons_release_n(xs->tx, nb_pkts);
385 	__xskq_cons_release(xs->tx);
386 	xs->sk.sk_write_space(&xs->sk);
387 
388 out:
389 	rcu_read_unlock();
390 	return nb_pkts;
391 }
392 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
393 
394 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
395 {
396 	struct net_device *dev = xs->dev;
397 	int err;
398 
399 	rcu_read_lock();
400 	err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
401 	rcu_read_unlock();
402 
403 	return err;
404 }
405 
406 static int xsk_zc_xmit(struct xdp_sock *xs)
407 {
408 	return xsk_wakeup(xs, XDP_WAKEUP_TX);
409 }
410 
411 static void xsk_destruct_skb(struct sk_buff *skb)
412 {
413 	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
414 	struct xdp_sock *xs = xdp_sk(skb->sk);
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&xs->tx_completion_lock, flags);
418 	xskq_prod_submit_addr(xs->pool->cq, addr);
419 	spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
420 
421 	sock_wfree(skb);
422 }
423 
424 static int xsk_generic_xmit(struct sock *sk)
425 {
426 	struct xdp_sock *xs = xdp_sk(sk);
427 	u32 max_batch = TX_BATCH_SIZE;
428 	bool sent_frame = false;
429 	struct xdp_desc desc;
430 	struct sk_buff *skb;
431 	int err = 0;
432 
433 	mutex_lock(&xs->mutex);
434 
435 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
436 		goto out;
437 
438 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
439 		char *buffer;
440 		u64 addr;
441 		u32 len;
442 
443 		if (max_batch-- == 0) {
444 			err = -EAGAIN;
445 			goto out;
446 		}
447 
448 		len = desc.len;
449 		skb = sock_alloc_send_skb(sk, len, 1, &err);
450 		if (unlikely(!skb))
451 			goto out;
452 
453 		skb_put(skb, len);
454 		addr = desc.addr;
455 		buffer = xsk_buff_raw_get_data(xs->pool, addr);
456 		err = skb_store_bits(skb, 0, buffer, len);
457 		/* This is the backpressure mechanism for the Tx path.
458 		 * Reserve space in the completion queue and only proceed
459 		 * if there is space in it. This avoids having to implement
460 		 * any buffering in the Tx path.
461 		 */
462 		if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
463 			kfree_skb(skb);
464 			goto out;
465 		}
466 
467 		skb->dev = xs->dev;
468 		skb->priority = sk->sk_priority;
469 		skb->mark = sk->sk_mark;
470 		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
471 		skb->destructor = xsk_destruct_skb;
472 
473 		err = __dev_direct_xmit(skb, xs->queue_id);
474 		if  (err == NETDEV_TX_BUSY) {
475 			/* Tell user-space to retry the send */
476 			skb->destructor = sock_wfree;
477 			/* Free skb without triggering the perf drop trace */
478 			consume_skb(skb);
479 			err = -EAGAIN;
480 			goto out;
481 		}
482 
483 		xskq_cons_release(xs->tx);
484 		/* Ignore NET_XMIT_CN as packet might have been sent */
485 		if (err == NET_XMIT_DROP) {
486 			/* SKB completed but not sent */
487 			err = -EBUSY;
488 			goto out;
489 		}
490 
491 		sent_frame = true;
492 	}
493 
494 	xs->tx->queue_empty_descs++;
495 
496 out:
497 	if (sent_frame)
498 		sk->sk_write_space(sk);
499 
500 	mutex_unlock(&xs->mutex);
501 	return err;
502 }
503 
504 static int __xsk_sendmsg(struct sock *sk)
505 {
506 	struct xdp_sock *xs = xdp_sk(sk);
507 
508 	if (unlikely(!(xs->dev->flags & IFF_UP)))
509 		return -ENETDOWN;
510 	if (unlikely(!xs->tx))
511 		return -ENOBUFS;
512 
513 	return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
514 }
515 
516 static bool xsk_no_wakeup(struct sock *sk)
517 {
518 #ifdef CONFIG_NET_RX_BUSY_POLL
519 	/* Prefer busy-polling, skip the wakeup. */
520 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
521 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
522 #else
523 	return false;
524 #endif
525 }
526 
527 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
528 {
529 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
530 	struct sock *sk = sock->sk;
531 	struct xdp_sock *xs = xdp_sk(sk);
532 	struct xsk_buff_pool *pool;
533 
534 	if (unlikely(!xsk_is_bound(xs)))
535 		return -ENXIO;
536 	if (unlikely(need_wait))
537 		return -EOPNOTSUPP;
538 
539 	if (sk_can_busy_loop(sk))
540 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
541 
542 	if (xsk_no_wakeup(sk))
543 		return 0;
544 
545 	pool = xs->pool;
546 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
547 		return __xsk_sendmsg(sk);
548 	return 0;
549 }
550 
551 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
552 {
553 	bool need_wait = !(flags & MSG_DONTWAIT);
554 	struct sock *sk = sock->sk;
555 	struct xdp_sock *xs = xdp_sk(sk);
556 
557 	if (unlikely(!(xs->dev->flags & IFF_UP)))
558 		return -ENETDOWN;
559 	if (unlikely(!xs->rx))
560 		return -ENOBUFS;
561 	if (unlikely(!xsk_is_bound(xs)))
562 		return -ENXIO;
563 	if (unlikely(need_wait))
564 		return -EOPNOTSUPP;
565 
566 	if (sk_can_busy_loop(sk))
567 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
568 
569 	if (xsk_no_wakeup(sk))
570 		return 0;
571 
572 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
573 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
574 	return 0;
575 }
576 
577 static __poll_t xsk_poll(struct file *file, struct socket *sock,
578 			     struct poll_table_struct *wait)
579 {
580 	__poll_t mask = datagram_poll(file, sock, wait);
581 	struct sock *sk = sock->sk;
582 	struct xdp_sock *xs = xdp_sk(sk);
583 	struct xsk_buff_pool *pool;
584 
585 	if (unlikely(!xsk_is_bound(xs)))
586 		return mask;
587 
588 	pool = xs->pool;
589 
590 	if (pool->cached_need_wakeup) {
591 		if (xs->zc)
592 			xsk_wakeup(xs, pool->cached_need_wakeup);
593 		else
594 			/* Poll needs to drive Tx also in copy mode */
595 			__xsk_sendmsg(sk);
596 	}
597 
598 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
599 		mask |= EPOLLIN | EPOLLRDNORM;
600 	if (xs->tx && !xskq_cons_is_full(xs->tx))
601 		mask |= EPOLLOUT | EPOLLWRNORM;
602 
603 	return mask;
604 }
605 
606 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
607 			  bool umem_queue)
608 {
609 	struct xsk_queue *q;
610 
611 	if (entries == 0 || *queue || !is_power_of_2(entries))
612 		return -EINVAL;
613 
614 	q = xskq_create(entries, umem_queue);
615 	if (!q)
616 		return -ENOMEM;
617 
618 	/* Make sure queue is ready before it can be seen by others */
619 	smp_wmb();
620 	WRITE_ONCE(*queue, q);
621 	return 0;
622 }
623 
624 static void xsk_unbind_dev(struct xdp_sock *xs)
625 {
626 	struct net_device *dev = xs->dev;
627 
628 	if (xs->state != XSK_BOUND)
629 		return;
630 	WRITE_ONCE(xs->state, XSK_UNBOUND);
631 
632 	/* Wait for driver to stop using the xdp socket. */
633 	xp_del_xsk(xs->pool, xs);
634 	xs->dev = NULL;
635 	synchronize_net();
636 	dev_put(dev);
637 }
638 
639 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
640 					      struct xdp_sock ***map_entry)
641 {
642 	struct xsk_map *map = NULL;
643 	struct xsk_map_node *node;
644 
645 	*map_entry = NULL;
646 
647 	spin_lock_bh(&xs->map_list_lock);
648 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
649 					node);
650 	if (node) {
651 		bpf_map_inc(&node->map->map);
652 		map = node->map;
653 		*map_entry = node->map_entry;
654 	}
655 	spin_unlock_bh(&xs->map_list_lock);
656 	return map;
657 }
658 
659 static void xsk_delete_from_maps(struct xdp_sock *xs)
660 {
661 	/* This function removes the current XDP socket from all the
662 	 * maps it resides in. We need to take extra care here, due to
663 	 * the two locks involved. Each map has a lock synchronizing
664 	 * updates to the entries, and each socket has a lock that
665 	 * synchronizes access to the list of maps (map_list). For
666 	 * deadlock avoidance the locks need to be taken in the order
667 	 * "map lock"->"socket map list lock". We start off by
668 	 * accessing the socket map list, and take a reference to the
669 	 * map to guarantee existence between the
670 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
671 	 * calls. Then we ask the map to remove the socket, which
672 	 * tries to remove the socket from the map. Note that there
673 	 * might be updates to the map between
674 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
675 	 */
676 	struct xdp_sock **map_entry = NULL;
677 	struct xsk_map *map;
678 
679 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
680 		xsk_map_try_sock_delete(map, xs, map_entry);
681 		bpf_map_put(&map->map);
682 	}
683 }
684 
685 static int xsk_release(struct socket *sock)
686 {
687 	struct sock *sk = sock->sk;
688 	struct xdp_sock *xs = xdp_sk(sk);
689 	struct net *net;
690 
691 	if (!sk)
692 		return 0;
693 
694 	net = sock_net(sk);
695 
696 	mutex_lock(&net->xdp.lock);
697 	sk_del_node_init_rcu(sk);
698 	mutex_unlock(&net->xdp.lock);
699 
700 	local_bh_disable();
701 	sock_prot_inuse_add(net, sk->sk_prot, -1);
702 	local_bh_enable();
703 
704 	xsk_delete_from_maps(xs);
705 	mutex_lock(&xs->mutex);
706 	xsk_unbind_dev(xs);
707 	mutex_unlock(&xs->mutex);
708 
709 	xskq_destroy(xs->rx);
710 	xskq_destroy(xs->tx);
711 	xskq_destroy(xs->fq_tmp);
712 	xskq_destroy(xs->cq_tmp);
713 
714 	sock_orphan(sk);
715 	sock->sk = NULL;
716 
717 	sk_refcnt_debug_release(sk);
718 	sock_put(sk);
719 
720 	return 0;
721 }
722 
723 static struct socket *xsk_lookup_xsk_from_fd(int fd)
724 {
725 	struct socket *sock;
726 	int err;
727 
728 	sock = sockfd_lookup(fd, &err);
729 	if (!sock)
730 		return ERR_PTR(-ENOTSOCK);
731 
732 	if (sock->sk->sk_family != PF_XDP) {
733 		sockfd_put(sock);
734 		return ERR_PTR(-ENOPROTOOPT);
735 	}
736 
737 	return sock;
738 }
739 
740 static bool xsk_validate_queues(struct xdp_sock *xs)
741 {
742 	return xs->fq_tmp && xs->cq_tmp;
743 }
744 
745 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
746 {
747 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
748 	struct sock *sk = sock->sk;
749 	struct xdp_sock *xs = xdp_sk(sk);
750 	struct net_device *dev;
751 	u32 flags, qid;
752 	int err = 0;
753 
754 	if (addr_len < sizeof(struct sockaddr_xdp))
755 		return -EINVAL;
756 	if (sxdp->sxdp_family != AF_XDP)
757 		return -EINVAL;
758 
759 	flags = sxdp->sxdp_flags;
760 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
761 		      XDP_USE_NEED_WAKEUP))
762 		return -EINVAL;
763 
764 	rtnl_lock();
765 	mutex_lock(&xs->mutex);
766 	if (xs->state != XSK_READY) {
767 		err = -EBUSY;
768 		goto out_release;
769 	}
770 
771 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
772 	if (!dev) {
773 		err = -ENODEV;
774 		goto out_release;
775 	}
776 
777 	if (!xs->rx && !xs->tx) {
778 		err = -EINVAL;
779 		goto out_unlock;
780 	}
781 
782 	qid = sxdp->sxdp_queue_id;
783 
784 	if (flags & XDP_SHARED_UMEM) {
785 		struct xdp_sock *umem_xs;
786 		struct socket *sock;
787 
788 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
789 		    (flags & XDP_USE_NEED_WAKEUP)) {
790 			/* Cannot specify flags for shared sockets. */
791 			err = -EINVAL;
792 			goto out_unlock;
793 		}
794 
795 		if (xs->umem) {
796 			/* We have already our own. */
797 			err = -EINVAL;
798 			goto out_unlock;
799 		}
800 
801 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
802 		if (IS_ERR(sock)) {
803 			err = PTR_ERR(sock);
804 			goto out_unlock;
805 		}
806 
807 		umem_xs = xdp_sk(sock->sk);
808 		if (!xsk_is_bound(umem_xs)) {
809 			err = -EBADF;
810 			sockfd_put(sock);
811 			goto out_unlock;
812 		}
813 
814 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
815 			/* Share the umem with another socket on another qid
816 			 * and/or device.
817 			 */
818 			xs->pool = xp_create_and_assign_umem(xs,
819 							     umem_xs->umem);
820 			if (!xs->pool) {
821 				err = -ENOMEM;
822 				sockfd_put(sock);
823 				goto out_unlock;
824 			}
825 
826 			err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
827 						   dev, qid);
828 			if (err) {
829 				xp_destroy(xs->pool);
830 				xs->pool = NULL;
831 				sockfd_put(sock);
832 				goto out_unlock;
833 			}
834 		} else {
835 			/* Share the buffer pool with the other socket. */
836 			if (xs->fq_tmp || xs->cq_tmp) {
837 				/* Do not allow setting your own fq or cq. */
838 				err = -EINVAL;
839 				sockfd_put(sock);
840 				goto out_unlock;
841 			}
842 
843 			xp_get_pool(umem_xs->pool);
844 			xs->pool = umem_xs->pool;
845 		}
846 
847 		xdp_get_umem(umem_xs->umem);
848 		WRITE_ONCE(xs->umem, umem_xs->umem);
849 		sockfd_put(sock);
850 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
851 		err = -EINVAL;
852 		goto out_unlock;
853 	} else {
854 		/* This xsk has its own umem. */
855 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
856 		if (!xs->pool) {
857 			err = -ENOMEM;
858 			goto out_unlock;
859 		}
860 
861 		err = xp_assign_dev(xs->pool, dev, qid, flags);
862 		if (err) {
863 			xp_destroy(xs->pool);
864 			xs->pool = NULL;
865 			goto out_unlock;
866 		}
867 	}
868 
869 	xs->dev = dev;
870 	xs->zc = xs->umem->zc;
871 	xs->queue_id = qid;
872 	xp_add_xsk(xs->pool, xs);
873 
874 out_unlock:
875 	if (err) {
876 		dev_put(dev);
877 	} else {
878 		/* Matches smp_rmb() in bind() for shared umem
879 		 * sockets, and xsk_is_bound().
880 		 */
881 		smp_wmb();
882 		WRITE_ONCE(xs->state, XSK_BOUND);
883 	}
884 out_release:
885 	mutex_unlock(&xs->mutex);
886 	rtnl_unlock();
887 	return err;
888 }
889 
890 struct xdp_umem_reg_v1 {
891 	__u64 addr; /* Start of packet data area */
892 	__u64 len; /* Length of packet data area */
893 	__u32 chunk_size;
894 	__u32 headroom;
895 };
896 
897 static int xsk_setsockopt(struct socket *sock, int level, int optname,
898 			  sockptr_t optval, unsigned int optlen)
899 {
900 	struct sock *sk = sock->sk;
901 	struct xdp_sock *xs = xdp_sk(sk);
902 	int err;
903 
904 	if (level != SOL_XDP)
905 		return -ENOPROTOOPT;
906 
907 	switch (optname) {
908 	case XDP_RX_RING:
909 	case XDP_TX_RING:
910 	{
911 		struct xsk_queue **q;
912 		int entries;
913 
914 		if (optlen < sizeof(entries))
915 			return -EINVAL;
916 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
917 			return -EFAULT;
918 
919 		mutex_lock(&xs->mutex);
920 		if (xs->state != XSK_READY) {
921 			mutex_unlock(&xs->mutex);
922 			return -EBUSY;
923 		}
924 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
925 		err = xsk_init_queue(entries, q, false);
926 		if (!err && optname == XDP_TX_RING)
927 			/* Tx needs to be explicitly woken up the first time */
928 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
929 		mutex_unlock(&xs->mutex);
930 		return err;
931 	}
932 	case XDP_UMEM_REG:
933 	{
934 		size_t mr_size = sizeof(struct xdp_umem_reg);
935 		struct xdp_umem_reg mr = {};
936 		struct xdp_umem *umem;
937 
938 		if (optlen < sizeof(struct xdp_umem_reg_v1))
939 			return -EINVAL;
940 		else if (optlen < sizeof(mr))
941 			mr_size = sizeof(struct xdp_umem_reg_v1);
942 
943 		if (copy_from_sockptr(&mr, optval, mr_size))
944 			return -EFAULT;
945 
946 		mutex_lock(&xs->mutex);
947 		if (xs->state != XSK_READY || xs->umem) {
948 			mutex_unlock(&xs->mutex);
949 			return -EBUSY;
950 		}
951 
952 		umem = xdp_umem_create(&mr);
953 		if (IS_ERR(umem)) {
954 			mutex_unlock(&xs->mutex);
955 			return PTR_ERR(umem);
956 		}
957 
958 		/* Make sure umem is ready before it can be seen by others */
959 		smp_wmb();
960 		WRITE_ONCE(xs->umem, umem);
961 		mutex_unlock(&xs->mutex);
962 		return 0;
963 	}
964 	case XDP_UMEM_FILL_RING:
965 	case XDP_UMEM_COMPLETION_RING:
966 	{
967 		struct xsk_queue **q;
968 		int entries;
969 
970 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
971 			return -EFAULT;
972 
973 		mutex_lock(&xs->mutex);
974 		if (xs->state != XSK_READY) {
975 			mutex_unlock(&xs->mutex);
976 			return -EBUSY;
977 		}
978 
979 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
980 			&xs->cq_tmp;
981 		err = xsk_init_queue(entries, q, true);
982 		mutex_unlock(&xs->mutex);
983 		return err;
984 	}
985 	default:
986 		break;
987 	}
988 
989 	return -ENOPROTOOPT;
990 }
991 
992 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
993 {
994 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
995 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
996 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
997 }
998 
999 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1000 {
1001 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1002 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1003 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1004 }
1005 
1006 struct xdp_statistics_v1 {
1007 	__u64 rx_dropped;
1008 	__u64 rx_invalid_descs;
1009 	__u64 tx_invalid_descs;
1010 };
1011 
1012 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1013 			  char __user *optval, int __user *optlen)
1014 {
1015 	struct sock *sk = sock->sk;
1016 	struct xdp_sock *xs = xdp_sk(sk);
1017 	int len;
1018 
1019 	if (level != SOL_XDP)
1020 		return -ENOPROTOOPT;
1021 
1022 	if (get_user(len, optlen))
1023 		return -EFAULT;
1024 	if (len < 0)
1025 		return -EINVAL;
1026 
1027 	switch (optname) {
1028 	case XDP_STATISTICS:
1029 	{
1030 		struct xdp_statistics stats = {};
1031 		bool extra_stats = true;
1032 		size_t stats_size;
1033 
1034 		if (len < sizeof(struct xdp_statistics_v1)) {
1035 			return -EINVAL;
1036 		} else if (len < sizeof(stats)) {
1037 			extra_stats = false;
1038 			stats_size = sizeof(struct xdp_statistics_v1);
1039 		} else {
1040 			stats_size = sizeof(stats);
1041 		}
1042 
1043 		mutex_lock(&xs->mutex);
1044 		stats.rx_dropped = xs->rx_dropped;
1045 		if (extra_stats) {
1046 			stats.rx_ring_full = xs->rx_queue_full;
1047 			stats.rx_fill_ring_empty_descs =
1048 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1049 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1050 		} else {
1051 			stats.rx_dropped += xs->rx_queue_full;
1052 		}
1053 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1054 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1055 		mutex_unlock(&xs->mutex);
1056 
1057 		if (copy_to_user(optval, &stats, stats_size))
1058 			return -EFAULT;
1059 		if (put_user(stats_size, optlen))
1060 			return -EFAULT;
1061 
1062 		return 0;
1063 	}
1064 	case XDP_MMAP_OFFSETS:
1065 	{
1066 		struct xdp_mmap_offsets off;
1067 		struct xdp_mmap_offsets_v1 off_v1;
1068 		bool flags_supported = true;
1069 		void *to_copy;
1070 
1071 		if (len < sizeof(off_v1))
1072 			return -EINVAL;
1073 		else if (len < sizeof(off))
1074 			flags_supported = false;
1075 
1076 		if (flags_supported) {
1077 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1078 			 * except for the flags field added to the end.
1079 			 */
1080 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1081 					       &off.rx);
1082 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1083 					       &off.tx);
1084 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1085 					       &off.fr);
1086 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1087 					       &off.cr);
1088 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1089 						ptrs.flags);
1090 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1091 						ptrs.flags);
1092 			off.fr.flags = offsetof(struct xdp_umem_ring,
1093 						ptrs.flags);
1094 			off.cr.flags = offsetof(struct xdp_umem_ring,
1095 						ptrs.flags);
1096 
1097 			len = sizeof(off);
1098 			to_copy = &off;
1099 		} else {
1100 			xsk_enter_rxtx_offsets(&off_v1.rx);
1101 			xsk_enter_rxtx_offsets(&off_v1.tx);
1102 			xsk_enter_umem_offsets(&off_v1.fr);
1103 			xsk_enter_umem_offsets(&off_v1.cr);
1104 
1105 			len = sizeof(off_v1);
1106 			to_copy = &off_v1;
1107 		}
1108 
1109 		if (copy_to_user(optval, to_copy, len))
1110 			return -EFAULT;
1111 		if (put_user(len, optlen))
1112 			return -EFAULT;
1113 
1114 		return 0;
1115 	}
1116 	case XDP_OPTIONS:
1117 	{
1118 		struct xdp_options opts = {};
1119 
1120 		if (len < sizeof(opts))
1121 			return -EINVAL;
1122 
1123 		mutex_lock(&xs->mutex);
1124 		if (xs->zc)
1125 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1126 		mutex_unlock(&xs->mutex);
1127 
1128 		len = sizeof(opts);
1129 		if (copy_to_user(optval, &opts, len))
1130 			return -EFAULT;
1131 		if (put_user(len, optlen))
1132 			return -EFAULT;
1133 
1134 		return 0;
1135 	}
1136 	default:
1137 		break;
1138 	}
1139 
1140 	return -EOPNOTSUPP;
1141 }
1142 
1143 static int xsk_mmap(struct file *file, struct socket *sock,
1144 		    struct vm_area_struct *vma)
1145 {
1146 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1147 	unsigned long size = vma->vm_end - vma->vm_start;
1148 	struct xdp_sock *xs = xdp_sk(sock->sk);
1149 	struct xsk_queue *q = NULL;
1150 	unsigned long pfn;
1151 	struct page *qpg;
1152 
1153 	if (READ_ONCE(xs->state) != XSK_READY)
1154 		return -EBUSY;
1155 
1156 	if (offset == XDP_PGOFF_RX_RING) {
1157 		q = READ_ONCE(xs->rx);
1158 	} else if (offset == XDP_PGOFF_TX_RING) {
1159 		q = READ_ONCE(xs->tx);
1160 	} else {
1161 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1162 		smp_rmb();
1163 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1164 			q = READ_ONCE(xs->fq_tmp);
1165 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1166 			q = READ_ONCE(xs->cq_tmp);
1167 	}
1168 
1169 	if (!q)
1170 		return -EINVAL;
1171 
1172 	/* Matches the smp_wmb() in xsk_init_queue */
1173 	smp_rmb();
1174 	qpg = virt_to_head_page(q->ring);
1175 	if (size > page_size(qpg))
1176 		return -EINVAL;
1177 
1178 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1179 	return remap_pfn_range(vma, vma->vm_start, pfn,
1180 			       size, vma->vm_page_prot);
1181 }
1182 
1183 static int xsk_notifier(struct notifier_block *this,
1184 			unsigned long msg, void *ptr)
1185 {
1186 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1187 	struct net *net = dev_net(dev);
1188 	struct sock *sk;
1189 
1190 	switch (msg) {
1191 	case NETDEV_UNREGISTER:
1192 		mutex_lock(&net->xdp.lock);
1193 		sk_for_each(sk, &net->xdp.list) {
1194 			struct xdp_sock *xs = xdp_sk(sk);
1195 
1196 			mutex_lock(&xs->mutex);
1197 			if (xs->dev == dev) {
1198 				sk->sk_err = ENETDOWN;
1199 				if (!sock_flag(sk, SOCK_DEAD))
1200 					sk->sk_error_report(sk);
1201 
1202 				xsk_unbind_dev(xs);
1203 
1204 				/* Clear device references. */
1205 				xp_clear_dev(xs->pool);
1206 			}
1207 			mutex_unlock(&xs->mutex);
1208 		}
1209 		mutex_unlock(&net->xdp.lock);
1210 		break;
1211 	}
1212 	return NOTIFY_DONE;
1213 }
1214 
1215 static struct proto xsk_proto = {
1216 	.name =		"XDP",
1217 	.owner =	THIS_MODULE,
1218 	.obj_size =	sizeof(struct xdp_sock),
1219 };
1220 
1221 static const struct proto_ops xsk_proto_ops = {
1222 	.family		= PF_XDP,
1223 	.owner		= THIS_MODULE,
1224 	.release	= xsk_release,
1225 	.bind		= xsk_bind,
1226 	.connect	= sock_no_connect,
1227 	.socketpair	= sock_no_socketpair,
1228 	.accept		= sock_no_accept,
1229 	.getname	= sock_no_getname,
1230 	.poll		= xsk_poll,
1231 	.ioctl		= sock_no_ioctl,
1232 	.listen		= sock_no_listen,
1233 	.shutdown	= sock_no_shutdown,
1234 	.setsockopt	= xsk_setsockopt,
1235 	.getsockopt	= xsk_getsockopt,
1236 	.sendmsg	= xsk_sendmsg,
1237 	.recvmsg	= xsk_recvmsg,
1238 	.mmap		= xsk_mmap,
1239 	.sendpage	= sock_no_sendpage,
1240 };
1241 
1242 static void xsk_destruct(struct sock *sk)
1243 {
1244 	struct xdp_sock *xs = xdp_sk(sk);
1245 
1246 	if (!sock_flag(sk, SOCK_DEAD))
1247 		return;
1248 
1249 	if (!xp_put_pool(xs->pool))
1250 		xdp_put_umem(xs->umem, !xs->pool);
1251 
1252 	sk_refcnt_debug_dec(sk);
1253 }
1254 
1255 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1256 		      int kern)
1257 {
1258 	struct xdp_sock *xs;
1259 	struct sock *sk;
1260 
1261 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1262 		return -EPERM;
1263 	if (sock->type != SOCK_RAW)
1264 		return -ESOCKTNOSUPPORT;
1265 
1266 	if (protocol)
1267 		return -EPROTONOSUPPORT;
1268 
1269 	sock->state = SS_UNCONNECTED;
1270 
1271 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1272 	if (!sk)
1273 		return -ENOBUFS;
1274 
1275 	sock->ops = &xsk_proto_ops;
1276 
1277 	sock_init_data(sock, sk);
1278 
1279 	sk->sk_family = PF_XDP;
1280 
1281 	sk->sk_destruct = xsk_destruct;
1282 	sk_refcnt_debug_inc(sk);
1283 
1284 	sock_set_flag(sk, SOCK_RCU_FREE);
1285 
1286 	xs = xdp_sk(sk);
1287 	xs->state = XSK_READY;
1288 	mutex_init(&xs->mutex);
1289 	spin_lock_init(&xs->rx_lock);
1290 	spin_lock_init(&xs->tx_completion_lock);
1291 
1292 	INIT_LIST_HEAD(&xs->map_list);
1293 	spin_lock_init(&xs->map_list_lock);
1294 
1295 	mutex_lock(&net->xdp.lock);
1296 	sk_add_node_rcu(sk, &net->xdp.list);
1297 	mutex_unlock(&net->xdp.lock);
1298 
1299 	local_bh_disable();
1300 	sock_prot_inuse_add(net, &xsk_proto, 1);
1301 	local_bh_enable();
1302 
1303 	return 0;
1304 }
1305 
1306 static const struct net_proto_family xsk_family_ops = {
1307 	.family = PF_XDP,
1308 	.create = xsk_create,
1309 	.owner	= THIS_MODULE,
1310 };
1311 
1312 static struct notifier_block xsk_netdev_notifier = {
1313 	.notifier_call	= xsk_notifier,
1314 };
1315 
1316 static int __net_init xsk_net_init(struct net *net)
1317 {
1318 	mutex_init(&net->xdp.lock);
1319 	INIT_HLIST_HEAD(&net->xdp.list);
1320 	return 0;
1321 }
1322 
1323 static void __net_exit xsk_net_exit(struct net *net)
1324 {
1325 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1326 }
1327 
1328 static struct pernet_operations xsk_net_ops = {
1329 	.init = xsk_net_init,
1330 	.exit = xsk_net_exit,
1331 };
1332 
1333 static int __init xsk_init(void)
1334 {
1335 	int err, cpu;
1336 
1337 	err = proto_register(&xsk_proto, 0 /* no slab */);
1338 	if (err)
1339 		goto out;
1340 
1341 	err = sock_register(&xsk_family_ops);
1342 	if (err)
1343 		goto out_proto;
1344 
1345 	err = register_pernet_subsys(&xsk_net_ops);
1346 	if (err)
1347 		goto out_sk;
1348 
1349 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1350 	if (err)
1351 		goto out_pernet;
1352 
1353 	for_each_possible_cpu(cpu)
1354 		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1355 	return 0;
1356 
1357 out_pernet:
1358 	unregister_pernet_subsys(&xsk_net_ops);
1359 out_sk:
1360 	sock_unregister(PF_XDP);
1361 out_proto:
1362 	proto_unregister(&xsk_proto);
1363 out:
1364 	return err;
1365 }
1366 
1367 fs_initcall(xsk_init);
1368