xref: /linux/net/xdp/xsk.c (revision 3d2c3d2eea9acdbee5b5742d15d021069b49d3f9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 
27 #include <net/netdev_queues.h>
28 #include <net/xdp_sock_drv.h>
29 #include <net/busy_poll.h>
30 #include <net/netdev_lock.h>
31 #include <net/netdev_rx_queue.h>
32 #include <net/xdp.h>
33 
34 #include "../core/dev.h"
35 
36 #include "xsk_queue.h"
37 #include "xdp_umem.h"
38 #include "xsk.h"
39 
40 #define TX_BATCH_SIZE 32
41 #define MAX_PER_SOCKET_BUDGET 32
42 
43 struct xsk_addrs {
44 	u32 num_descs;
45 	u64 addrs[MAX_SKB_FRAGS + 1];
46 };
47 
48 static struct kmem_cache *xsk_tx_generic_cache;
49 
50 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
51 {
52 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
53 		return;
54 
55 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
56 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
57 }
58 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
59 
60 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
61 {
62 	struct xdp_sock *xs;
63 
64 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
65 		return;
66 
67 	rcu_read_lock();
68 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
69 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
70 	}
71 	rcu_read_unlock();
72 
73 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
74 }
75 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
76 
77 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
78 {
79 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
80 		return;
81 
82 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
83 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
84 }
85 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
86 
87 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
88 {
89 	struct xdp_sock *xs;
90 
91 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
92 		return;
93 
94 	rcu_read_lock();
95 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
96 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
97 	}
98 	rcu_read_unlock();
99 
100 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
101 }
102 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
103 
104 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
105 {
106 	return pool->uses_need_wakeup;
107 }
108 EXPORT_SYMBOL(xsk_uses_need_wakeup);
109 
110 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
111 					    u16 queue_id)
112 {
113 	if (queue_id < dev->real_num_rx_queues)
114 		return dev->_rx[queue_id].pool;
115 	if (queue_id < dev->real_num_tx_queues)
116 		return dev->_tx[queue_id].pool;
117 
118 	return NULL;
119 }
120 EXPORT_SYMBOL(xsk_get_pool_from_qid);
121 
122 static void __xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
123 {
124 	if (queue_id < dev->num_rx_queues)
125 		dev->_rx[queue_id].pool = NULL;
126 	if (queue_id < dev->num_tx_queues)
127 		dev->_tx[queue_id].pool = NULL;
128 }
129 
130 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
131 {
132 	struct netdev_rx_queue *hw_rxq;
133 
134 	if (!netif_rxq_is_leased(dev, queue_id))
135 		return __xsk_clear_pool_at_qid(dev, queue_id);
136 	WARN_ON_ONCE(!netif_is_queue_leasee(dev));
137 
138 	hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
139 
140 	netdev_lock(hw_rxq->dev);
141 	queue_id = get_netdev_rx_queue_index(hw_rxq);
142 	__xsk_clear_pool_at_qid(hw_rxq->dev, queue_id);
143 	netdev_unlock(hw_rxq->dev);
144 }
145 
146 static int __xsk_reg_pool_at_qid(struct net_device *dev,
147 				 struct xsk_buff_pool *pool, u16 queue_id)
148 {
149 	if (xsk_get_pool_from_qid(dev, queue_id))
150 		return -EBUSY;
151 
152 	if (queue_id < dev->real_num_rx_queues)
153 		dev->_rx[queue_id].pool = pool;
154 	if (queue_id < dev->real_num_tx_queues)
155 		dev->_tx[queue_id].pool = pool;
156 
157 	return 0;
158 }
159 
160 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
161  * not know if the device has more tx queues than rx, or the opposite.
162  * This might also change during run time.
163  */
164 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
165 			u16 queue_id)
166 {
167 	struct netdev_rx_queue *hw_rxq;
168 	int ret;
169 
170 	if (queue_id >= max(dev->real_num_rx_queues,
171 			    dev->real_num_tx_queues))
172 		return -EINVAL;
173 
174 	if (queue_id >= dev->real_num_rx_queues ||
175 	    !netif_rxq_is_leased(dev, queue_id))
176 		return __xsk_reg_pool_at_qid(dev, pool, queue_id);
177 	if (!netif_is_queue_leasee(dev))
178 		return -EBUSY;
179 
180 	hw_rxq = __netif_get_rx_queue(dev, queue_id)->lease;
181 
182 	netdev_lock(hw_rxq->dev);
183 	queue_id = get_netdev_rx_queue_index(hw_rxq);
184 	ret = __xsk_reg_pool_at_qid(hw_rxq->dev, pool, queue_id);
185 	netdev_unlock(hw_rxq->dev);
186 
187 	return ret;
188 }
189 
190 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
191 			u32 flags)
192 {
193 	u64 addr;
194 	int err;
195 
196 	addr = xp_get_handle(xskb, xskb->pool);
197 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
198 	if (err) {
199 		xs->rx_queue_full++;
200 		return err;
201 	}
202 
203 	xp_release(xskb);
204 	return 0;
205 }
206 
207 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
208 {
209 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
210 	u32 frags = xdp_buff_has_frags(xdp);
211 	struct xdp_buff_xsk *pos, *tmp;
212 	struct list_head *xskb_list;
213 	u32 contd = 0;
214 	u32 num_desc;
215 	int err;
216 
217 	if (likely(!frags)) {
218 		err = __xsk_rcv_zc(xs, xskb, len, contd);
219 		if (err)
220 			goto err;
221 		return 0;
222 	}
223 
224 	contd = XDP_PKT_CONTD;
225 	num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
226 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
227 		xs->rx_queue_full++;
228 		err = -ENOBUFS;
229 		goto err;
230 	}
231 
232 	__xsk_rcv_zc(xs, xskb, len, contd);
233 	xskb_list = &xskb->pool->xskb_list;
234 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
235 		if (list_is_singular(xskb_list))
236 			contd = 0;
237 		len = pos->xdp.data_end - pos->xdp.data;
238 		__xsk_rcv_zc(xs, pos, len, contd);
239 		list_del_init(&pos->list_node);
240 	}
241 
242 	return 0;
243 err:
244 	xsk_buff_free(xdp);
245 	return err;
246 }
247 
248 static void *xsk_copy_xdp_start(struct xdp_buff *from)
249 {
250 	if (unlikely(xdp_data_meta_unsupported(from)))
251 		return from->data;
252 	else
253 		return from->data_meta;
254 }
255 
256 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
257 			u32 *from_len, skb_frag_t **frag, u32 rem)
258 {
259 	u32 copied = 0;
260 
261 	while (1) {
262 		u32 copy_len = min_t(u32, *from_len, to_len);
263 
264 		memcpy(to, *from, copy_len);
265 		copied += copy_len;
266 		if (rem == copied)
267 			return copied;
268 
269 		if (*from_len == copy_len) {
270 			*from = skb_frag_address(*frag);
271 			*from_len = skb_frag_size((*frag)++);
272 		} else {
273 			*from += copy_len;
274 			*from_len -= copy_len;
275 		}
276 		if (to_len == copy_len)
277 			return copied;
278 
279 		to_len -= copy_len;
280 		to += copy_len;
281 	}
282 }
283 
284 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
285 {
286 	u32 frame_size = __xsk_pool_get_rx_frame_size(xs->pool);
287 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
288 	u32 from_len, meta_len, rem, num_desc;
289 	struct xdp_buff_xsk *xskb;
290 	struct xdp_buff *xsk_xdp;
291 	skb_frag_t *frag;
292 
293 	from_len = xdp->data_end - copy_from;
294 	meta_len = xdp->data - copy_from;
295 	rem = len + meta_len;
296 
297 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
298 		int err;
299 
300 		xsk_xdp = xsk_buff_alloc(xs->pool);
301 		if (!xsk_xdp) {
302 			xs->rx_dropped++;
303 			return -ENOMEM;
304 		}
305 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
306 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
307 		err = __xsk_rcv_zc(xs, xskb, len, 0);
308 		if (err) {
309 			xsk_buff_free(xsk_xdp);
310 			return err;
311 		}
312 
313 		return 0;
314 	}
315 
316 	num_desc = (len - 1) / frame_size + 1;
317 
318 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
319 		xs->rx_dropped++;
320 		return -ENOMEM;
321 	}
322 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
323 		xs->rx_queue_full++;
324 		return -ENOBUFS;
325 	}
326 
327 	if (xdp_buff_has_frags(xdp)) {
328 		struct skb_shared_info *sinfo;
329 
330 		sinfo = xdp_get_shared_info_from_buff(xdp);
331 		frag =  &sinfo->frags[0];
332 	}
333 
334 	do {
335 		u32 to_len = frame_size + meta_len;
336 		u32 copied;
337 
338 		xsk_xdp = xsk_buff_alloc(xs->pool);
339 		copy_to = xsk_xdp->data - meta_len;
340 
341 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
342 		rem -= copied;
343 
344 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
345 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
346 		meta_len = 0;
347 	} while (rem);
348 
349 	return 0;
350 }
351 
352 static bool xsk_tx_writeable(struct xdp_sock *xs)
353 {
354 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
355 		return false;
356 
357 	return true;
358 }
359 
360 static void __xsk_tx_release(struct xdp_sock *xs)
361 {
362 	__xskq_cons_release(xs->tx);
363 	if (xsk_tx_writeable(xs))
364 		xs->sk.sk_write_space(&xs->sk);
365 }
366 
367 static bool xsk_is_bound(struct xdp_sock *xs)
368 {
369 	if (READ_ONCE(xs->state) == XSK_BOUND) {
370 		/* Matches smp_wmb() in bind(). */
371 		smp_rmb();
372 		return true;
373 	}
374 	return false;
375 }
376 
377 static bool xsk_dev_queue_valid(const struct xdp_sock *xs,
378 				const struct xdp_rxq_info *info)
379 {
380 	struct net_device *dev = xs->dev;
381 	u32 queue_index = xs->queue_id;
382 	struct netdev_rx_queue *rxq;
383 
384 	if (info->dev == dev &&
385 	    info->queue_index == queue_index)
386 		return true;
387 
388 	if (queue_index < dev->real_num_rx_queues) {
389 		rxq = READ_ONCE(__netif_get_rx_queue(dev, queue_index)->lease);
390 		if (!rxq)
391 			return false;
392 
393 		dev = rxq->dev;
394 		queue_index = get_netdev_rx_queue_index(rxq);
395 
396 		return info->dev == dev &&
397 		       info->queue_index == queue_index;
398 	}
399 	return false;
400 }
401 
402 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
403 {
404 	if (!xsk_is_bound(xs))
405 		return -ENXIO;
406 	if (!xsk_dev_queue_valid(xs, xdp->rxq))
407 		return -EINVAL;
408 
409 	if (len > __xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
410 		xs->rx_dropped++;
411 		return -ENOSPC;
412 	}
413 
414 	return 0;
415 }
416 
417 static void xsk_flush(struct xdp_sock *xs)
418 {
419 	xskq_prod_submit(xs->rx);
420 	__xskq_cons_release(xs->pool->fq);
421 	sock_def_readable(&xs->sk);
422 }
423 
424 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
425 {
426 	u32 len = xdp_get_buff_len(xdp);
427 	int err;
428 
429 	err = xsk_rcv_check(xs, xdp, len);
430 	if (!err) {
431 		spin_lock_bh(&xs->pool->rx_lock);
432 		err = __xsk_rcv(xs, xdp, len);
433 		xsk_flush(xs);
434 		spin_unlock_bh(&xs->pool->rx_lock);
435 	}
436 
437 	return err;
438 }
439 
440 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
441 {
442 	u32 len = xdp_get_buff_len(xdp);
443 	int err;
444 
445 	err = xsk_rcv_check(xs, xdp, len);
446 	if (err)
447 		return err;
448 
449 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
450 		len = xdp->data_end - xdp->data;
451 		return xsk_rcv_zc(xs, xdp, len);
452 	}
453 
454 	err = __xsk_rcv(xs, xdp, len);
455 	if (!err)
456 		xdp_return_buff(xdp);
457 	return err;
458 }
459 
460 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
461 {
462 	int err;
463 
464 	err = xsk_rcv(xs, xdp);
465 	if (err)
466 		return err;
467 
468 	if (!xs->flush_node.prev) {
469 		struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
470 
471 		list_add(&xs->flush_node, flush_list);
472 	}
473 
474 	return 0;
475 }
476 
477 void __xsk_map_flush(struct list_head *flush_list)
478 {
479 	struct xdp_sock *xs, *tmp;
480 
481 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
482 		xsk_flush(xs);
483 		__list_del_clearprev(&xs->flush_node);
484 	}
485 }
486 
487 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
488 {
489 	xskq_prod_submit_n(pool->cq, nb_entries);
490 }
491 EXPORT_SYMBOL(xsk_tx_completed);
492 
493 void xsk_tx_release(struct xsk_buff_pool *pool)
494 {
495 	struct xdp_sock *xs;
496 
497 	rcu_read_lock();
498 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
499 		__xsk_tx_release(xs);
500 	rcu_read_unlock();
501 }
502 EXPORT_SYMBOL(xsk_tx_release);
503 
504 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
505 {
506 	bool budget_exhausted = false;
507 	struct xdp_sock *xs;
508 
509 	rcu_read_lock();
510 again:
511 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
512 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
513 			budget_exhausted = true;
514 			continue;
515 		}
516 
517 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
518 			if (xskq_has_descs(xs->tx))
519 				xskq_cons_release(xs->tx);
520 			continue;
521 		}
522 
523 		xs->tx_budget_spent++;
524 
525 		/* This is the backpressure mechanism for the Tx path.
526 		 * Reserve space in the completion queue and only proceed
527 		 * if there is space in it. This avoids having to implement
528 		 * any buffering in the Tx path.
529 		 */
530 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
531 			goto out;
532 
533 		xskq_cons_release(xs->tx);
534 		rcu_read_unlock();
535 		return true;
536 	}
537 
538 	if (budget_exhausted) {
539 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
540 			xs->tx_budget_spent = 0;
541 
542 		budget_exhausted = false;
543 		goto again;
544 	}
545 
546 out:
547 	rcu_read_unlock();
548 	return false;
549 }
550 EXPORT_SYMBOL(xsk_tx_peek_desc);
551 
552 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
553 {
554 	struct xdp_desc *descs = pool->tx_descs;
555 	u32 nb_pkts = 0;
556 
557 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
558 		nb_pkts++;
559 
560 	xsk_tx_release(pool);
561 	return nb_pkts;
562 }
563 
564 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
565 {
566 	struct xdp_sock *xs;
567 
568 	rcu_read_lock();
569 	if (!list_is_singular(&pool->xsk_tx_list)) {
570 		/* Fallback to the non-batched version */
571 		rcu_read_unlock();
572 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
573 	}
574 
575 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
576 	if (!xs) {
577 		nb_pkts = 0;
578 		goto out;
579 	}
580 
581 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
582 
583 	/* This is the backpressure mechanism for the Tx path. Try to
584 	 * reserve space in the completion queue for all packets, but
585 	 * if there are fewer slots available, just process that many
586 	 * packets. This avoids having to implement any buffering in
587 	 * the Tx path.
588 	 */
589 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
590 	if (!nb_pkts)
591 		goto out;
592 
593 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
594 	if (!nb_pkts) {
595 		xs->tx->queue_empty_descs++;
596 		goto out;
597 	}
598 
599 	__xskq_cons_release(xs->tx);
600 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
601 	xs->sk.sk_write_space(&xs->sk);
602 
603 out:
604 	rcu_read_unlock();
605 	return nb_pkts;
606 }
607 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
608 
609 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
610 {
611 	struct net_device *dev = xs->dev;
612 
613 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
614 }
615 
616 static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
617 {
618 	int ret;
619 
620 	spin_lock(&pool->cq->cq_cached_prod_lock);
621 	ret = xskq_prod_reserve(pool->cq);
622 	spin_unlock(&pool->cq->cq_cached_prod_lock);
623 
624 	return ret;
625 }
626 
627 static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
628 {
629 	return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
630 }
631 
632 static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
633 {
634 	return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
635 }
636 
637 static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
638 {
639 	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
640 }
641 
642 static void xsk_inc_num_desc(struct sk_buff *skb)
643 {
644 	struct xsk_addrs *xsk_addr;
645 
646 	if (!xsk_skb_destructor_is_addr(skb)) {
647 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
648 		xsk_addr->num_descs++;
649 	}
650 }
651 
652 static u32 xsk_get_num_desc(struct sk_buff *skb)
653 {
654 	struct xsk_addrs *xsk_addr;
655 
656 	if (xsk_skb_destructor_is_addr(skb))
657 		return 1;
658 
659 	xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
660 
661 	return xsk_addr->num_descs;
662 }
663 
664 static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
665 				      struct sk_buff *skb)
666 {
667 	u32 num_descs = xsk_get_num_desc(skb);
668 	struct xsk_addrs *xsk_addr;
669 	u32 descs_processed = 0;
670 	unsigned long flags;
671 	u32 idx, i;
672 
673 	spin_lock_irqsave(&pool->cq_prod_lock, flags);
674 	idx = xskq_get_prod(pool->cq);
675 
676 	if (unlikely(num_descs > 1)) {
677 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
678 
679 		for (i = 0; i < num_descs; i++) {
680 			xskq_prod_write_addr(pool->cq, idx + descs_processed,
681 					     xsk_addr->addrs[i]);
682 			descs_processed++;
683 		}
684 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
685 	} else {
686 		xskq_prod_write_addr(pool->cq, idx,
687 				     xsk_skb_destructor_get_addr(skb));
688 		descs_processed++;
689 	}
690 	xskq_prod_submit_n(pool->cq, descs_processed);
691 	spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
692 }
693 
694 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
695 {
696 	spin_lock(&pool->cq->cq_cached_prod_lock);
697 	xskq_prod_cancel_n(pool->cq, n);
698 	spin_unlock(&pool->cq->cq_cached_prod_lock);
699 }
700 
701 INDIRECT_CALLABLE_SCOPE
702 void xsk_destruct_skb(struct sk_buff *skb)
703 {
704 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
705 
706 	if (compl->tx_timestamp) {
707 		/* sw completion timestamp, not a real one */
708 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
709 	}
710 
711 	xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
712 	sock_wfree(skb);
713 }
714 
715 static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
716 			      u64 addr)
717 {
718 	skb->dev = xs->dev;
719 	skb->priority = READ_ONCE(xs->sk.sk_priority);
720 	skb->mark = READ_ONCE(xs->sk.sk_mark);
721 	skb->destructor = xsk_destruct_skb;
722 	xsk_skb_destructor_set_addr(skb, addr);
723 }
724 
725 static void xsk_consume_skb(struct sk_buff *skb)
726 {
727 	struct xdp_sock *xs = xdp_sk(skb->sk);
728 	u32 num_descs = xsk_get_num_desc(skb);
729 	struct xsk_addrs *xsk_addr;
730 
731 	if (unlikely(num_descs > 1)) {
732 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
733 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
734 	}
735 
736 	skb->destructor = sock_wfree;
737 	xsk_cq_cancel_locked(xs->pool, num_descs);
738 	/* Free skb without triggering the perf drop trace */
739 	consume_skb(skb);
740 	xs->skb = NULL;
741 }
742 
743 static void xsk_drop_skb(struct sk_buff *skb)
744 {
745 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
746 	xsk_consume_skb(skb);
747 }
748 
749 static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
750 			    struct xdp_desc *desc, struct xsk_buff_pool *pool,
751 			    u32 hr)
752 {
753 	struct xsk_tx_metadata *meta = NULL;
754 
755 	if (unlikely(pool->tx_metadata_len == 0))
756 		return -EINVAL;
757 
758 	meta = buffer - pool->tx_metadata_len;
759 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
760 		return -EINVAL;
761 
762 	if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
763 		if (unlikely(meta->request.csum_start +
764 			     meta->request.csum_offset +
765 			     sizeof(__sum16) > desc->len))
766 			return -EINVAL;
767 
768 		skb->csum_start = hr + meta->request.csum_start;
769 		skb->csum_offset = meta->request.csum_offset;
770 		skb->ip_summed = CHECKSUM_PARTIAL;
771 
772 		if (unlikely(pool->tx_sw_csum)) {
773 			int err;
774 
775 			err = skb_checksum_help(skb);
776 			if (err)
777 				return err;
778 		}
779 	}
780 
781 	if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
782 		skb->skb_mstamp_ns = meta->request.launch_time;
783 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
784 
785 	return 0;
786 }
787 
788 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
789 					      struct xdp_desc *desc)
790 {
791 	struct xsk_buff_pool *pool = xs->pool;
792 	u32 hr, len, ts, offset, copy, copied;
793 	struct sk_buff *skb = xs->skb;
794 	struct page *page;
795 	void *buffer;
796 	int err, i;
797 	u64 addr;
798 
799 	addr = desc->addr;
800 	buffer = xsk_buff_raw_get_data(pool, addr);
801 
802 	if (!skb) {
803 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
804 
805 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
806 		if (unlikely(!skb))
807 			return ERR_PTR(err);
808 
809 		skb_reserve(skb, hr);
810 
811 		xsk_skb_init_misc(skb, xs, desc->addr);
812 		if (desc->options & XDP_TX_METADATA) {
813 			err = xsk_skb_metadata(skb, buffer, desc, pool, hr);
814 			if (unlikely(err))
815 				return ERR_PTR(err);
816 		}
817 	} else {
818 		struct xsk_addrs *xsk_addr;
819 
820 		if (xsk_skb_destructor_is_addr(skb)) {
821 			xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
822 						     GFP_KERNEL);
823 			if (!xsk_addr)
824 				return ERR_PTR(-ENOMEM);
825 
826 			xsk_addr->num_descs = 1;
827 			xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
828 			skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
829 		} else {
830 			xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
831 		}
832 
833 		/* in case of -EOVERFLOW that could happen below,
834 		 * xsk_consume_skb() will release this node as whole skb
835 		 * would be dropped, which implies freeing all list elements
836 		 */
837 		xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
838 	}
839 
840 	len = desc->len;
841 	ts = pool->unaligned ? len : pool->chunk_size;
842 
843 	offset = offset_in_page(buffer);
844 	addr = buffer - pool->addrs;
845 
846 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
847 		if (unlikely(i >= MAX_SKB_FRAGS))
848 			return ERR_PTR(-EOVERFLOW);
849 
850 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
851 		get_page(page);
852 
853 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
854 		skb_fill_page_desc(skb, i, page, offset, copy);
855 
856 		copied += copy;
857 		addr += copy;
858 		offset = 0;
859 	}
860 
861 	skb->len += len;
862 	skb->data_len += len;
863 	skb->truesize += ts;
864 
865 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
866 
867 	return skb;
868 }
869 
870 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
871 				     struct xdp_desc *desc)
872 {
873 	struct net_device *dev = xs->dev;
874 	struct sk_buff *skb = xs->skb;
875 	int err;
876 
877 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
878 		skb = xsk_build_skb_zerocopy(xs, desc);
879 		if (IS_ERR(skb)) {
880 			err = PTR_ERR(skb);
881 			skb = NULL;
882 			goto free_err;
883 		}
884 	} else {
885 		u32 hr, tr, len;
886 		void *buffer;
887 
888 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
889 		len = desc->len;
890 
891 		if (!skb) {
892 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
893 			tr = dev->needed_tailroom;
894 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
895 			if (unlikely(!skb))
896 				goto free_err;
897 
898 			skb_reserve(skb, hr);
899 			skb_put(skb, len);
900 
901 			err = skb_store_bits(skb, 0, buffer, len);
902 			if (unlikely(err))
903 				goto free_err;
904 
905 			xsk_skb_init_misc(skb, xs, desc->addr);
906 			if (desc->options & XDP_TX_METADATA) {
907 				err = xsk_skb_metadata(skb, buffer, desc,
908 						       xs->pool, hr);
909 				if (unlikely(err))
910 					goto free_err;
911 			}
912 		} else {
913 			int nr_frags = skb_shinfo(skb)->nr_frags;
914 			struct xsk_addrs *xsk_addr;
915 			struct page *page;
916 			u8 *vaddr;
917 
918 			if (xsk_skb_destructor_is_addr(skb)) {
919 				xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
920 							     GFP_KERNEL);
921 				if (!xsk_addr) {
922 					err = -ENOMEM;
923 					goto free_err;
924 				}
925 
926 				xsk_addr->num_descs = 1;
927 				xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
928 				skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
929 			} else {
930 				xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
931 			}
932 
933 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
934 				err = -EOVERFLOW;
935 				goto free_err;
936 			}
937 
938 			page = alloc_page(xs->sk.sk_allocation);
939 			if (unlikely(!page)) {
940 				err = -EAGAIN;
941 				goto free_err;
942 			}
943 
944 			vaddr = kmap_local_page(page);
945 			memcpy(vaddr, buffer, len);
946 			kunmap_local(vaddr);
947 
948 			skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
949 			refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
950 
951 			xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
952 		}
953 	}
954 
955 	xsk_inc_num_desc(skb);
956 
957 	return skb;
958 
959 free_err:
960 	if (skb && !skb_shinfo(skb)->nr_frags)
961 		kfree_skb(skb);
962 
963 	if (err == -EOVERFLOW) {
964 		/* Drop the packet */
965 		xsk_inc_num_desc(xs->skb);
966 		xsk_drop_skb(xs->skb);
967 		xskq_cons_release(xs->tx);
968 	} else {
969 		/* Let application retry */
970 		xsk_cq_cancel_locked(xs->pool, 1);
971 	}
972 
973 	return ERR_PTR(err);
974 }
975 
976 static int __xsk_generic_xmit(struct sock *sk)
977 {
978 	struct xdp_sock *xs = xdp_sk(sk);
979 	bool sent_frame = false;
980 	struct xdp_desc desc;
981 	struct sk_buff *skb;
982 	u32 max_batch;
983 	int err = 0;
984 
985 	mutex_lock(&xs->mutex);
986 
987 	/* Since we dropped the RCU read lock, the socket state might have changed. */
988 	if (unlikely(!xsk_is_bound(xs))) {
989 		err = -ENXIO;
990 		goto out;
991 	}
992 
993 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
994 		goto out;
995 
996 	max_batch = READ_ONCE(xs->max_tx_budget);
997 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
998 		if (max_batch-- == 0) {
999 			err = -EAGAIN;
1000 			goto out;
1001 		}
1002 
1003 		/* This is the backpressure mechanism for the Tx path.
1004 		 * Reserve space in the completion queue and only proceed
1005 		 * if there is space in it. This avoids having to implement
1006 		 * any buffering in the Tx path.
1007 		 */
1008 		err = xsk_cq_reserve_locked(xs->pool);
1009 		if (err) {
1010 			err = -EAGAIN;
1011 			goto out;
1012 		}
1013 
1014 		skb = xsk_build_skb(xs, &desc);
1015 		if (IS_ERR(skb)) {
1016 			err = PTR_ERR(skb);
1017 			if (err != -EOVERFLOW)
1018 				goto out;
1019 			err = 0;
1020 			continue;
1021 		}
1022 
1023 		xskq_cons_release(xs->tx);
1024 
1025 		if (xp_mb_desc(&desc)) {
1026 			xs->skb = skb;
1027 			continue;
1028 		}
1029 
1030 		err = __dev_direct_xmit(skb, xs->queue_id);
1031 		if  (err == NETDEV_TX_BUSY) {
1032 			/* Tell user-space to retry the send */
1033 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
1034 			xsk_consume_skb(skb);
1035 			err = -EAGAIN;
1036 			goto out;
1037 		}
1038 
1039 		/* Ignore NET_XMIT_CN as packet might have been sent */
1040 		if (err == NET_XMIT_DROP) {
1041 			/* SKB completed but not sent */
1042 			err = -EBUSY;
1043 			xs->skb = NULL;
1044 			goto out;
1045 		}
1046 
1047 		sent_frame = true;
1048 		xs->skb = NULL;
1049 	}
1050 
1051 	if (xskq_has_descs(xs->tx)) {
1052 		if (xs->skb)
1053 			xsk_drop_skb(xs->skb);
1054 		xskq_cons_release(xs->tx);
1055 	}
1056 
1057 out:
1058 	if (sent_frame)
1059 		__xsk_tx_release(xs);
1060 
1061 	mutex_unlock(&xs->mutex);
1062 	return err;
1063 }
1064 
1065 static int xsk_generic_xmit(struct sock *sk)
1066 {
1067 	int ret;
1068 
1069 	/* Drop the RCU lock since the SKB path might sleep. */
1070 	rcu_read_unlock();
1071 	ret = __xsk_generic_xmit(sk);
1072 	/* Reaquire RCU lock before going into common code. */
1073 	rcu_read_lock();
1074 
1075 	return ret;
1076 }
1077 
1078 static bool xsk_no_wakeup(struct sock *sk)
1079 {
1080 #ifdef CONFIG_NET_RX_BUSY_POLL
1081 	/* Prefer busy-polling, skip the wakeup. */
1082 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
1083 		napi_id_valid(READ_ONCE(sk->sk_napi_id));
1084 #else
1085 	return false;
1086 #endif
1087 }
1088 
1089 static int xsk_check_common(struct xdp_sock *xs)
1090 {
1091 	if (unlikely(!xsk_is_bound(xs)))
1092 		return -ENXIO;
1093 	if (unlikely(!(xs->dev->flags & IFF_UP)))
1094 		return -ENETDOWN;
1095 
1096 	return 0;
1097 }
1098 
1099 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1100 {
1101 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
1102 	struct sock *sk = sock->sk;
1103 	struct xdp_sock *xs = xdp_sk(sk);
1104 	struct xsk_buff_pool *pool;
1105 	int err;
1106 
1107 	err = xsk_check_common(xs);
1108 	if (err)
1109 		return err;
1110 	if (unlikely(need_wait))
1111 		return -EOPNOTSUPP;
1112 	if (unlikely(!xs->tx))
1113 		return -ENOBUFS;
1114 
1115 	if (sk_can_busy_loop(sk))
1116 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1117 
1118 	if (xs->zc && xsk_no_wakeup(sk))
1119 		return 0;
1120 
1121 	pool = xs->pool;
1122 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
1123 		if (xs->zc)
1124 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
1125 		return xsk_generic_xmit(sk);
1126 	}
1127 	return 0;
1128 }
1129 
1130 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1131 {
1132 	int ret;
1133 
1134 	rcu_read_lock();
1135 	ret = __xsk_sendmsg(sock, m, total_len);
1136 	rcu_read_unlock();
1137 
1138 	return ret;
1139 }
1140 
1141 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1142 {
1143 	bool need_wait = !(flags & MSG_DONTWAIT);
1144 	struct sock *sk = sock->sk;
1145 	struct xdp_sock *xs = xdp_sk(sk);
1146 	int err;
1147 
1148 	err = xsk_check_common(xs);
1149 	if (err)
1150 		return err;
1151 	if (unlikely(!xs->rx))
1152 		return -ENOBUFS;
1153 	if (unlikely(need_wait))
1154 		return -EOPNOTSUPP;
1155 
1156 	if (sk_can_busy_loop(sk))
1157 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1158 
1159 	if (xsk_no_wakeup(sk))
1160 		return 0;
1161 
1162 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
1163 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
1164 	return 0;
1165 }
1166 
1167 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1168 {
1169 	int ret;
1170 
1171 	rcu_read_lock();
1172 	ret = __xsk_recvmsg(sock, m, len, flags);
1173 	rcu_read_unlock();
1174 
1175 	return ret;
1176 }
1177 
1178 static __poll_t xsk_poll(struct file *file, struct socket *sock,
1179 			     struct poll_table_struct *wait)
1180 {
1181 	__poll_t mask = 0;
1182 	struct sock *sk = sock->sk;
1183 	struct xdp_sock *xs = xdp_sk(sk);
1184 	struct xsk_buff_pool *pool;
1185 
1186 	sock_poll_wait(file, sock, wait);
1187 
1188 	rcu_read_lock();
1189 	if (xsk_check_common(xs))
1190 		goto out;
1191 
1192 	pool = xs->pool;
1193 
1194 	if (pool->cached_need_wakeup) {
1195 		if (xs->zc)
1196 			xsk_wakeup(xs, pool->cached_need_wakeup);
1197 		else if (xs->tx)
1198 			/* Poll needs to drive Tx also in copy mode */
1199 			xsk_generic_xmit(sk);
1200 	}
1201 
1202 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
1203 		mask |= EPOLLIN | EPOLLRDNORM;
1204 	if (xs->tx && xsk_tx_writeable(xs))
1205 		mask |= EPOLLOUT | EPOLLWRNORM;
1206 out:
1207 	rcu_read_unlock();
1208 	return mask;
1209 }
1210 
1211 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1212 			  bool umem_queue)
1213 {
1214 	struct xsk_queue *q;
1215 
1216 	if (entries == 0 || *queue || !is_power_of_2(entries))
1217 		return -EINVAL;
1218 
1219 	q = xskq_create(entries, umem_queue);
1220 	if (!q)
1221 		return -ENOMEM;
1222 
1223 	/* Make sure queue is ready before it can be seen by others */
1224 	smp_wmb();
1225 	WRITE_ONCE(*queue, q);
1226 	return 0;
1227 }
1228 
1229 static void xsk_unbind_dev(struct xdp_sock *xs)
1230 {
1231 	struct net_device *dev = xs->dev;
1232 
1233 	if (xs->state != XSK_BOUND)
1234 		return;
1235 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1236 
1237 	/* Wait for driver to stop using the xdp socket. */
1238 	xp_del_xsk(xs->pool, xs);
1239 	synchronize_net();
1240 	dev_put(dev);
1241 }
1242 
1243 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1244 					      struct xdp_sock __rcu ***map_entry)
1245 {
1246 	struct xsk_map *map = NULL;
1247 	struct xsk_map_node *node;
1248 
1249 	*map_entry = NULL;
1250 
1251 	spin_lock_bh(&xs->map_list_lock);
1252 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1253 					node);
1254 	if (node) {
1255 		bpf_map_inc(&node->map->map);
1256 		map = node->map;
1257 		*map_entry = node->map_entry;
1258 	}
1259 	spin_unlock_bh(&xs->map_list_lock);
1260 	return map;
1261 }
1262 
1263 static void xsk_delete_from_maps(struct xdp_sock *xs)
1264 {
1265 	/* This function removes the current XDP socket from all the
1266 	 * maps it resides in. We need to take extra care here, due to
1267 	 * the two locks involved. Each map has a lock synchronizing
1268 	 * updates to the entries, and each socket has a lock that
1269 	 * synchronizes access to the list of maps (map_list). For
1270 	 * deadlock avoidance the locks need to be taken in the order
1271 	 * "map lock"->"socket map list lock". We start off by
1272 	 * accessing the socket map list, and take a reference to the
1273 	 * map to guarantee existence between the
1274 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1275 	 * calls. Then we ask the map to remove the socket, which
1276 	 * tries to remove the socket from the map. Note that there
1277 	 * might be updates to the map between
1278 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1279 	 */
1280 	struct xdp_sock __rcu **map_entry = NULL;
1281 	struct xsk_map *map;
1282 
1283 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1284 		xsk_map_try_sock_delete(map, xs, map_entry);
1285 		bpf_map_put(&map->map);
1286 	}
1287 }
1288 
1289 static int xsk_release(struct socket *sock)
1290 {
1291 	struct sock *sk = sock->sk;
1292 	struct xdp_sock *xs = xdp_sk(sk);
1293 	struct net *net;
1294 
1295 	if (!sk)
1296 		return 0;
1297 
1298 	net = sock_net(sk);
1299 
1300 	if (xs->skb)
1301 		xsk_drop_skb(xs->skb);
1302 
1303 	mutex_lock(&net->xdp.lock);
1304 	sk_del_node_init_rcu(sk);
1305 	mutex_unlock(&net->xdp.lock);
1306 
1307 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1308 
1309 	xsk_delete_from_maps(xs);
1310 	mutex_lock(&xs->mutex);
1311 	xsk_unbind_dev(xs);
1312 	mutex_unlock(&xs->mutex);
1313 
1314 	xskq_destroy(xs->rx);
1315 	xskq_destroy(xs->tx);
1316 	xskq_destroy(xs->fq_tmp);
1317 	xskq_destroy(xs->cq_tmp);
1318 
1319 	sock_orphan(sk);
1320 	sock->sk = NULL;
1321 
1322 	sock_put(sk);
1323 
1324 	return 0;
1325 }
1326 
1327 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1328 {
1329 	struct socket *sock;
1330 	int err;
1331 
1332 	sock = sockfd_lookup(fd, &err);
1333 	if (!sock)
1334 		return ERR_PTR(-ENOTSOCK);
1335 
1336 	if (sock->sk->sk_family != PF_XDP) {
1337 		sockfd_put(sock);
1338 		return ERR_PTR(-ENOPROTOOPT);
1339 	}
1340 
1341 	return sock;
1342 }
1343 
1344 static bool xsk_validate_queues(struct xdp_sock *xs)
1345 {
1346 	return xs->fq_tmp && xs->cq_tmp;
1347 }
1348 
1349 static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
1350 {
1351 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1352 	struct sock *sk = sock->sk;
1353 	struct xdp_sock *xs = xdp_sk(sk);
1354 	struct net_device *dev;
1355 	int bound_dev_if;
1356 	u32 flags, qid;
1357 	int err = 0;
1358 
1359 	if (addr_len < sizeof(struct sockaddr_xdp))
1360 		return -EINVAL;
1361 	if (sxdp->sxdp_family != AF_XDP)
1362 		return -EINVAL;
1363 
1364 	flags = sxdp->sxdp_flags;
1365 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1366 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1367 		return -EINVAL;
1368 
1369 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1370 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1371 		return -EINVAL;
1372 
1373 	rtnl_lock();
1374 	mutex_lock(&xs->mutex);
1375 	if (xs->state != XSK_READY) {
1376 		err = -EBUSY;
1377 		goto out_release;
1378 	}
1379 
1380 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1381 	if (!dev) {
1382 		err = -ENODEV;
1383 		goto out_release;
1384 	}
1385 
1386 	netdev_lock_ops(dev);
1387 
1388 	if (!xs->rx && !xs->tx) {
1389 		err = -EINVAL;
1390 		goto out_unlock;
1391 	}
1392 
1393 	qid = sxdp->sxdp_queue_id;
1394 
1395 	if (flags & XDP_SHARED_UMEM) {
1396 		struct xdp_sock *umem_xs;
1397 		struct socket *sock;
1398 
1399 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1400 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1401 			/* Cannot specify flags for shared sockets. */
1402 			err = -EINVAL;
1403 			goto out_unlock;
1404 		}
1405 
1406 		if (xs->umem) {
1407 			/* We have already our own. */
1408 			err = -EINVAL;
1409 			goto out_unlock;
1410 		}
1411 
1412 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1413 		if (IS_ERR(sock)) {
1414 			err = PTR_ERR(sock);
1415 			goto out_unlock;
1416 		}
1417 
1418 		umem_xs = xdp_sk(sock->sk);
1419 		if (!xsk_is_bound(umem_xs)) {
1420 			err = -EBADF;
1421 			sockfd_put(sock);
1422 			goto out_unlock;
1423 		}
1424 
1425 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1426 			/* One fill and completion ring required for each queue id. */
1427 			if (!xsk_validate_queues(xs)) {
1428 				err = -EINVAL;
1429 				sockfd_put(sock);
1430 				goto out_unlock;
1431 			}
1432 
1433 			/* Share the umem with another socket on another qid
1434 			 * and/or device.
1435 			 */
1436 			xs->pool = xp_create_and_assign_umem(xs,
1437 							     umem_xs->umem);
1438 			if (!xs->pool) {
1439 				err = -ENOMEM;
1440 				sockfd_put(sock);
1441 				goto out_unlock;
1442 			}
1443 
1444 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1445 						   qid);
1446 			if (err) {
1447 				xp_destroy(xs->pool);
1448 				xs->pool = NULL;
1449 				sockfd_put(sock);
1450 				goto out_unlock;
1451 			}
1452 		} else {
1453 			/* Share the buffer pool with the other socket. */
1454 			if (xs->fq_tmp || xs->cq_tmp) {
1455 				/* Do not allow setting your own fq or cq. */
1456 				err = -EINVAL;
1457 				sockfd_put(sock);
1458 				goto out_unlock;
1459 			}
1460 
1461 			xp_get_pool(umem_xs->pool);
1462 			xs->pool = umem_xs->pool;
1463 
1464 			/* If underlying shared umem was created without Tx
1465 			 * ring, allocate Tx descs array that Tx batching API
1466 			 * utilizes
1467 			 */
1468 			if (xs->tx && !xs->pool->tx_descs) {
1469 				err = xp_alloc_tx_descs(xs->pool, xs);
1470 				if (err) {
1471 					xp_put_pool(xs->pool);
1472 					xs->pool = NULL;
1473 					sockfd_put(sock);
1474 					goto out_unlock;
1475 				}
1476 			}
1477 		}
1478 
1479 		xdp_get_umem(umem_xs->umem);
1480 		WRITE_ONCE(xs->umem, umem_xs->umem);
1481 		sockfd_put(sock);
1482 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1483 		err = -EINVAL;
1484 		goto out_unlock;
1485 	} else {
1486 		/* This xsk has its own umem. */
1487 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1488 		if (!xs->pool) {
1489 			err = -ENOMEM;
1490 			goto out_unlock;
1491 		}
1492 
1493 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1494 		if (err) {
1495 			xp_destroy(xs->pool);
1496 			xs->pool = NULL;
1497 			goto out_unlock;
1498 		}
1499 	}
1500 
1501 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1502 	xs->fq_tmp = NULL;
1503 	xs->cq_tmp = NULL;
1504 
1505 	xs->dev = dev;
1506 	xs->zc = xs->umem->zc;
1507 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1508 	xs->queue_id = qid;
1509 	xp_add_xsk(xs->pool, xs);
1510 
1511 	if (qid < dev->real_num_rx_queues) {
1512 		struct netdev_rx_queue *rxq;
1513 
1514 		rxq = __netif_get_rx_queue(dev, qid);
1515 		if (rxq->napi)
1516 			__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1517 	}
1518 
1519 out_unlock:
1520 	if (err) {
1521 		dev_put(dev);
1522 	} else {
1523 		/* Matches smp_rmb() in bind() for shared umem
1524 		 * sockets, and xsk_is_bound().
1525 		 */
1526 		smp_wmb();
1527 		WRITE_ONCE(xs->state, XSK_BOUND);
1528 	}
1529 	netdev_unlock_ops(dev);
1530 out_release:
1531 	mutex_unlock(&xs->mutex);
1532 	rtnl_unlock();
1533 	return err;
1534 }
1535 
1536 struct xdp_umem_reg_v1 {
1537 	__u64 addr; /* Start of packet data area */
1538 	__u64 len; /* Length of packet data area */
1539 	__u32 chunk_size;
1540 	__u32 headroom;
1541 };
1542 
1543 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1544 			  sockptr_t optval, unsigned int optlen)
1545 {
1546 	struct sock *sk = sock->sk;
1547 	struct xdp_sock *xs = xdp_sk(sk);
1548 	int err;
1549 
1550 	if (level != SOL_XDP)
1551 		return -ENOPROTOOPT;
1552 
1553 	switch (optname) {
1554 	case XDP_RX_RING:
1555 	case XDP_TX_RING:
1556 	{
1557 		struct xsk_queue **q;
1558 		int entries;
1559 
1560 		if (optlen < sizeof(entries))
1561 			return -EINVAL;
1562 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1563 			return -EFAULT;
1564 
1565 		mutex_lock(&xs->mutex);
1566 		if (xs->state != XSK_READY) {
1567 			mutex_unlock(&xs->mutex);
1568 			return -EBUSY;
1569 		}
1570 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1571 		err = xsk_init_queue(entries, q, false);
1572 		if (!err && optname == XDP_TX_RING)
1573 			/* Tx needs to be explicitly woken up the first time */
1574 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1575 		mutex_unlock(&xs->mutex);
1576 		return err;
1577 	}
1578 	case XDP_UMEM_REG:
1579 	{
1580 		size_t mr_size = sizeof(struct xdp_umem_reg);
1581 		struct xdp_umem_reg mr = {};
1582 		struct xdp_umem *umem;
1583 
1584 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1585 			return -EINVAL;
1586 		else if (optlen < sizeof(mr))
1587 			mr_size = sizeof(struct xdp_umem_reg_v1);
1588 
1589 		BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
1590 
1591 		/* Make sure the last field of the struct doesn't have
1592 		 * uninitialized padding. All padding has to be explicit
1593 		 * and has to be set to zero by the userspace to make
1594 		 * struct xdp_umem_reg extensible in the future.
1595 		 */
1596 		BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
1597 			     sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
1598 			     sizeof(struct xdp_umem_reg));
1599 
1600 		if (copy_from_sockptr(&mr, optval, mr_size))
1601 			return -EFAULT;
1602 
1603 		mutex_lock(&xs->mutex);
1604 		if (xs->state != XSK_READY || xs->umem) {
1605 			mutex_unlock(&xs->mutex);
1606 			return -EBUSY;
1607 		}
1608 
1609 		umem = xdp_umem_create(&mr);
1610 		if (IS_ERR(umem)) {
1611 			mutex_unlock(&xs->mutex);
1612 			return PTR_ERR(umem);
1613 		}
1614 
1615 		/* Make sure umem is ready before it can be seen by others */
1616 		smp_wmb();
1617 		WRITE_ONCE(xs->umem, umem);
1618 		mutex_unlock(&xs->mutex);
1619 		return 0;
1620 	}
1621 	case XDP_UMEM_FILL_RING:
1622 	case XDP_UMEM_COMPLETION_RING:
1623 	{
1624 		struct xsk_queue **q;
1625 		int entries;
1626 
1627 		if (optlen < sizeof(entries))
1628 			return -EINVAL;
1629 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1630 			return -EFAULT;
1631 
1632 		mutex_lock(&xs->mutex);
1633 		if (xs->state != XSK_READY) {
1634 			mutex_unlock(&xs->mutex);
1635 			return -EBUSY;
1636 		}
1637 
1638 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1639 			&xs->cq_tmp;
1640 		err = xsk_init_queue(entries, q, true);
1641 		mutex_unlock(&xs->mutex);
1642 		return err;
1643 	}
1644 	case XDP_MAX_TX_SKB_BUDGET:
1645 	{
1646 		unsigned int budget;
1647 
1648 		if (optlen != sizeof(budget))
1649 			return -EINVAL;
1650 		if (copy_from_sockptr(&budget, optval, sizeof(budget)))
1651 			return -EFAULT;
1652 		if (!xs->tx ||
1653 		    budget < TX_BATCH_SIZE || budget > xs->tx->nentries)
1654 			return -EACCES;
1655 
1656 		WRITE_ONCE(xs->max_tx_budget, budget);
1657 		return 0;
1658 	}
1659 	default:
1660 		break;
1661 	}
1662 
1663 	return -ENOPROTOOPT;
1664 }
1665 
1666 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1667 {
1668 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1669 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1670 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1671 }
1672 
1673 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1674 {
1675 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1676 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1677 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1678 }
1679 
1680 struct xdp_statistics_v1 {
1681 	__u64 rx_dropped;
1682 	__u64 rx_invalid_descs;
1683 	__u64 tx_invalid_descs;
1684 };
1685 
1686 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1687 			  char __user *optval, int __user *optlen)
1688 {
1689 	struct sock *sk = sock->sk;
1690 	struct xdp_sock *xs = xdp_sk(sk);
1691 	int len;
1692 
1693 	if (level != SOL_XDP)
1694 		return -ENOPROTOOPT;
1695 
1696 	if (get_user(len, optlen))
1697 		return -EFAULT;
1698 	if (len < 0)
1699 		return -EINVAL;
1700 
1701 	switch (optname) {
1702 	case XDP_STATISTICS:
1703 	{
1704 		struct xdp_statistics stats = {};
1705 		bool extra_stats = true;
1706 		size_t stats_size;
1707 
1708 		if (len < sizeof(struct xdp_statistics_v1)) {
1709 			return -EINVAL;
1710 		} else if (len < sizeof(stats)) {
1711 			extra_stats = false;
1712 			stats_size = sizeof(struct xdp_statistics_v1);
1713 		} else {
1714 			stats_size = sizeof(stats);
1715 		}
1716 
1717 		mutex_lock(&xs->mutex);
1718 		stats.rx_dropped = xs->rx_dropped;
1719 		if (extra_stats) {
1720 			stats.rx_ring_full = xs->rx_queue_full;
1721 			stats.rx_fill_ring_empty_descs =
1722 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1723 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1724 		} else {
1725 			stats.rx_dropped += xs->rx_queue_full;
1726 		}
1727 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1728 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1729 		mutex_unlock(&xs->mutex);
1730 
1731 		if (copy_to_user(optval, &stats, stats_size))
1732 			return -EFAULT;
1733 		if (put_user(stats_size, optlen))
1734 			return -EFAULT;
1735 
1736 		return 0;
1737 	}
1738 	case XDP_MMAP_OFFSETS:
1739 	{
1740 		struct xdp_mmap_offsets off;
1741 		struct xdp_mmap_offsets_v1 off_v1;
1742 		bool flags_supported = true;
1743 		void *to_copy;
1744 
1745 		if (len < sizeof(off_v1))
1746 			return -EINVAL;
1747 		else if (len < sizeof(off))
1748 			flags_supported = false;
1749 
1750 		if (flags_supported) {
1751 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1752 			 * except for the flags field added to the end.
1753 			 */
1754 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1755 					       &off.rx);
1756 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1757 					       &off.tx);
1758 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1759 					       &off.fr);
1760 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1761 					       &off.cr);
1762 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1763 						ptrs.flags);
1764 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1765 						ptrs.flags);
1766 			off.fr.flags = offsetof(struct xdp_umem_ring,
1767 						ptrs.flags);
1768 			off.cr.flags = offsetof(struct xdp_umem_ring,
1769 						ptrs.flags);
1770 
1771 			len = sizeof(off);
1772 			to_copy = &off;
1773 		} else {
1774 			xsk_enter_rxtx_offsets(&off_v1.rx);
1775 			xsk_enter_rxtx_offsets(&off_v1.tx);
1776 			xsk_enter_umem_offsets(&off_v1.fr);
1777 			xsk_enter_umem_offsets(&off_v1.cr);
1778 
1779 			len = sizeof(off_v1);
1780 			to_copy = &off_v1;
1781 		}
1782 
1783 		if (copy_to_user(optval, to_copy, len))
1784 			return -EFAULT;
1785 		if (put_user(len, optlen))
1786 			return -EFAULT;
1787 
1788 		return 0;
1789 	}
1790 	case XDP_OPTIONS:
1791 	{
1792 		struct xdp_options opts = {};
1793 
1794 		if (len < sizeof(opts))
1795 			return -EINVAL;
1796 
1797 		mutex_lock(&xs->mutex);
1798 		if (xs->zc)
1799 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1800 		mutex_unlock(&xs->mutex);
1801 
1802 		len = sizeof(opts);
1803 		if (copy_to_user(optval, &opts, len))
1804 			return -EFAULT;
1805 		if (put_user(len, optlen))
1806 			return -EFAULT;
1807 
1808 		return 0;
1809 	}
1810 	default:
1811 		break;
1812 	}
1813 
1814 	return -EOPNOTSUPP;
1815 }
1816 
1817 static int xsk_mmap(struct file *file, struct socket *sock,
1818 		    struct vm_area_struct *vma)
1819 {
1820 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1821 	unsigned long size = vma->vm_end - vma->vm_start;
1822 	struct xdp_sock *xs = xdp_sk(sock->sk);
1823 	int state = READ_ONCE(xs->state);
1824 	struct xsk_queue *q = NULL;
1825 
1826 	if (state != XSK_READY && state != XSK_BOUND)
1827 		return -EBUSY;
1828 
1829 	if (offset == XDP_PGOFF_RX_RING) {
1830 		q = READ_ONCE(xs->rx);
1831 	} else if (offset == XDP_PGOFF_TX_RING) {
1832 		q = READ_ONCE(xs->tx);
1833 	} else {
1834 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1835 		smp_rmb();
1836 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1837 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1838 						 READ_ONCE(xs->pool->fq);
1839 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1840 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1841 						 READ_ONCE(xs->pool->cq);
1842 	}
1843 
1844 	if (!q)
1845 		return -EINVAL;
1846 
1847 	/* Matches the smp_wmb() in xsk_init_queue */
1848 	smp_rmb();
1849 	if (size > q->ring_vmalloc_size)
1850 		return -EINVAL;
1851 
1852 	return remap_vmalloc_range(vma, q->ring, 0);
1853 }
1854 
1855 static int xsk_notifier(struct notifier_block *this,
1856 			unsigned long msg, void *ptr)
1857 {
1858 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1859 	struct net *net = dev_net(dev);
1860 	struct sock *sk;
1861 
1862 	switch (msg) {
1863 	case NETDEV_UNREGISTER:
1864 		mutex_lock(&net->xdp.lock);
1865 		sk_for_each(sk, &net->xdp.list) {
1866 			struct xdp_sock *xs = xdp_sk(sk);
1867 
1868 			mutex_lock(&xs->mutex);
1869 			if (xs->dev == dev) {
1870 				sk->sk_err = ENETDOWN;
1871 				if (!sock_flag(sk, SOCK_DEAD))
1872 					sk_error_report(sk);
1873 
1874 				xsk_unbind_dev(xs);
1875 
1876 				/* Clear device references. */
1877 				xp_clear_dev(xs->pool);
1878 			}
1879 			mutex_unlock(&xs->mutex);
1880 		}
1881 		mutex_unlock(&net->xdp.lock);
1882 		break;
1883 	}
1884 	return NOTIFY_DONE;
1885 }
1886 
1887 static struct proto xsk_proto = {
1888 	.name =		"XDP",
1889 	.owner =	THIS_MODULE,
1890 	.obj_size =	sizeof(struct xdp_sock),
1891 };
1892 
1893 static const struct proto_ops xsk_proto_ops = {
1894 	.family		= PF_XDP,
1895 	.owner		= THIS_MODULE,
1896 	.release	= xsk_release,
1897 	.bind		= xsk_bind,
1898 	.connect	= sock_no_connect,
1899 	.socketpair	= sock_no_socketpair,
1900 	.accept		= sock_no_accept,
1901 	.getname	= sock_no_getname,
1902 	.poll		= xsk_poll,
1903 	.ioctl		= sock_no_ioctl,
1904 	.listen		= sock_no_listen,
1905 	.shutdown	= sock_no_shutdown,
1906 	.setsockopt	= xsk_setsockopt,
1907 	.getsockopt	= xsk_getsockopt,
1908 	.sendmsg	= xsk_sendmsg,
1909 	.recvmsg	= xsk_recvmsg,
1910 	.mmap		= xsk_mmap,
1911 };
1912 
1913 static void xsk_destruct(struct sock *sk)
1914 {
1915 	struct xdp_sock *xs = xdp_sk(sk);
1916 
1917 	if (!sock_flag(sk, SOCK_DEAD))
1918 		return;
1919 
1920 	if (!xp_put_pool(xs->pool))
1921 		xdp_put_umem(xs->umem, !xs->pool);
1922 }
1923 
1924 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1925 		      int kern)
1926 {
1927 	struct xdp_sock *xs;
1928 	struct sock *sk;
1929 
1930 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1931 		return -EPERM;
1932 	if (sock->type != SOCK_RAW)
1933 		return -ESOCKTNOSUPPORT;
1934 
1935 	if (protocol)
1936 		return -EPROTONOSUPPORT;
1937 
1938 	sock->state = SS_UNCONNECTED;
1939 
1940 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1941 	if (!sk)
1942 		return -ENOBUFS;
1943 
1944 	sock->ops = &xsk_proto_ops;
1945 
1946 	sock_init_data(sock, sk);
1947 
1948 	sk->sk_family = PF_XDP;
1949 
1950 	sk->sk_destruct = xsk_destruct;
1951 
1952 	sock_set_flag(sk, SOCK_RCU_FREE);
1953 
1954 	xs = xdp_sk(sk);
1955 	xs->state = XSK_READY;
1956 	xs->max_tx_budget = TX_BATCH_SIZE;
1957 	mutex_init(&xs->mutex);
1958 
1959 	INIT_LIST_HEAD(&xs->map_list);
1960 	spin_lock_init(&xs->map_list_lock);
1961 
1962 	mutex_lock(&net->xdp.lock);
1963 	sk_add_node_rcu(sk, &net->xdp.list);
1964 	mutex_unlock(&net->xdp.lock);
1965 
1966 	sock_prot_inuse_add(net, &xsk_proto, 1);
1967 
1968 	return 0;
1969 }
1970 
1971 static const struct net_proto_family xsk_family_ops = {
1972 	.family = PF_XDP,
1973 	.create = xsk_create,
1974 	.owner	= THIS_MODULE,
1975 };
1976 
1977 static struct notifier_block xsk_netdev_notifier = {
1978 	.notifier_call	= xsk_notifier,
1979 };
1980 
1981 static int __net_init xsk_net_init(struct net *net)
1982 {
1983 	mutex_init(&net->xdp.lock);
1984 	INIT_HLIST_HEAD(&net->xdp.list);
1985 	return 0;
1986 }
1987 
1988 static void __net_exit xsk_net_exit(struct net *net)
1989 {
1990 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1991 }
1992 
1993 static struct pernet_operations xsk_net_ops = {
1994 	.init = xsk_net_init,
1995 	.exit = xsk_net_exit,
1996 };
1997 
1998 static int __init xsk_init(void)
1999 {
2000 	int err;
2001 
2002 	err = proto_register(&xsk_proto, 0 /* no slab */);
2003 	if (err)
2004 		goto out;
2005 
2006 	err = sock_register(&xsk_family_ops);
2007 	if (err)
2008 		goto out_proto;
2009 
2010 	err = register_pernet_subsys(&xsk_net_ops);
2011 	if (err)
2012 		goto out_sk;
2013 
2014 	err = register_netdevice_notifier(&xsk_netdev_notifier);
2015 	if (err)
2016 		goto out_pernet;
2017 
2018 	xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
2019 						 sizeof(struct xsk_addrs),
2020 						 0, SLAB_HWCACHE_ALIGN, NULL);
2021 	if (!xsk_tx_generic_cache) {
2022 		err = -ENOMEM;
2023 		goto out_unreg_notif;
2024 	}
2025 
2026 	return 0;
2027 
2028 out_unreg_notif:
2029 	unregister_netdevice_notifier(&xsk_netdev_notifier);
2030 out_pernet:
2031 	unregister_pernet_subsys(&xsk_net_ops);
2032 out_sk:
2033 	sock_unregister(PF_XDP);
2034 out_proto:
2035 	proto_unregister(&xsk_proto);
2036 out:
2037 	return err;
2038 }
2039 
2040 fs_initcall(xsk_init);
2041