xref: /linux/net/xdp/xsk.c (revision b58b13f156c00c2457035b7071eaaac105fe6836)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30 
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34 
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
37 
38 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
39 
40 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
41 {
42 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
43 		return;
44 
45 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
46 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
47 }
48 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
49 
50 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
51 {
52 	struct xdp_sock *xs;
53 
54 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
55 		return;
56 
57 	rcu_read_lock();
58 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
59 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
60 	}
61 	rcu_read_unlock();
62 
63 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
64 }
65 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
66 
67 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
68 {
69 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
70 		return;
71 
72 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
73 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
74 }
75 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
76 
77 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
78 {
79 	struct xdp_sock *xs;
80 
81 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
82 		return;
83 
84 	rcu_read_lock();
85 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
86 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
87 	}
88 	rcu_read_unlock();
89 
90 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
91 }
92 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
93 
94 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
95 {
96 	return pool->uses_need_wakeup;
97 }
98 EXPORT_SYMBOL(xsk_uses_need_wakeup);
99 
100 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
101 					    u16 queue_id)
102 {
103 	if (queue_id < dev->real_num_rx_queues)
104 		return dev->_rx[queue_id].pool;
105 	if (queue_id < dev->real_num_tx_queues)
106 		return dev->_tx[queue_id].pool;
107 
108 	return NULL;
109 }
110 EXPORT_SYMBOL(xsk_get_pool_from_qid);
111 
112 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
113 {
114 	if (queue_id < dev->num_rx_queues)
115 		dev->_rx[queue_id].pool = NULL;
116 	if (queue_id < dev->num_tx_queues)
117 		dev->_tx[queue_id].pool = NULL;
118 }
119 
120 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
121  * not know if the device has more tx queues than rx, or the opposite.
122  * This might also change during run time.
123  */
124 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
125 			u16 queue_id)
126 {
127 	if (queue_id >= max_t(unsigned int,
128 			      dev->real_num_rx_queues,
129 			      dev->real_num_tx_queues))
130 		return -EINVAL;
131 
132 	if (queue_id < dev->real_num_rx_queues)
133 		dev->_rx[queue_id].pool = pool;
134 	if (queue_id < dev->real_num_tx_queues)
135 		dev->_tx[queue_id].pool = pool;
136 
137 	return 0;
138 }
139 
140 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
141 			u32 flags)
142 {
143 	u64 addr;
144 	int err;
145 
146 	addr = xp_get_handle(xskb);
147 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
148 	if (err) {
149 		xs->rx_queue_full++;
150 		return err;
151 	}
152 
153 	xp_release(xskb);
154 	return 0;
155 }
156 
157 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
158 {
159 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
160 	u32 frags = xdp_buff_has_frags(xdp);
161 	struct xdp_buff_xsk *pos, *tmp;
162 	struct list_head *xskb_list;
163 	u32 contd = 0;
164 	int err;
165 
166 	if (frags)
167 		contd = XDP_PKT_CONTD;
168 
169 	err = __xsk_rcv_zc(xs, xskb, len, contd);
170 	if (err || likely(!frags))
171 		goto out;
172 
173 	xskb_list = &xskb->pool->xskb_list;
174 	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
175 		if (list_is_singular(xskb_list))
176 			contd = 0;
177 		len = pos->xdp.data_end - pos->xdp.data;
178 		err = __xsk_rcv_zc(xs, pos, len, contd);
179 		if (err)
180 			return err;
181 		list_del(&pos->xskb_list_node);
182 	}
183 
184 out:
185 	return err;
186 }
187 
188 static void *xsk_copy_xdp_start(struct xdp_buff *from)
189 {
190 	if (unlikely(xdp_data_meta_unsupported(from)))
191 		return from->data;
192 	else
193 		return from->data_meta;
194 }
195 
196 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
197 			u32 *from_len, skb_frag_t **frag, u32 rem)
198 {
199 	u32 copied = 0;
200 
201 	while (1) {
202 		u32 copy_len = min_t(u32, *from_len, to_len);
203 
204 		memcpy(to, *from, copy_len);
205 		copied += copy_len;
206 		if (rem == copied)
207 			return copied;
208 
209 		if (*from_len == copy_len) {
210 			*from = skb_frag_address(*frag);
211 			*from_len = skb_frag_size((*frag)++);
212 		} else {
213 			*from += copy_len;
214 			*from_len -= copy_len;
215 		}
216 		if (to_len == copy_len)
217 			return copied;
218 
219 		to_len -= copy_len;
220 		to += copy_len;
221 	}
222 }
223 
224 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
225 {
226 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
227 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
228 	u32 from_len, meta_len, rem, num_desc;
229 	struct xdp_buff_xsk *xskb;
230 	struct xdp_buff *xsk_xdp;
231 	skb_frag_t *frag;
232 
233 	from_len = xdp->data_end - copy_from;
234 	meta_len = xdp->data - copy_from;
235 	rem = len + meta_len;
236 
237 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
238 		int err;
239 
240 		xsk_xdp = xsk_buff_alloc(xs->pool);
241 		if (!xsk_xdp) {
242 			xs->rx_dropped++;
243 			return -ENOMEM;
244 		}
245 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
246 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
247 		err = __xsk_rcv_zc(xs, xskb, len, 0);
248 		if (err) {
249 			xsk_buff_free(xsk_xdp);
250 			return err;
251 		}
252 
253 		return 0;
254 	}
255 
256 	num_desc = (len - 1) / frame_size + 1;
257 
258 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
259 		xs->rx_dropped++;
260 		return -ENOMEM;
261 	}
262 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
263 		xs->rx_queue_full++;
264 		return -ENOBUFS;
265 	}
266 
267 	if (xdp_buff_has_frags(xdp)) {
268 		struct skb_shared_info *sinfo;
269 
270 		sinfo = xdp_get_shared_info_from_buff(xdp);
271 		frag =  &sinfo->frags[0];
272 	}
273 
274 	do {
275 		u32 to_len = frame_size + meta_len;
276 		u32 copied;
277 
278 		xsk_xdp = xsk_buff_alloc(xs->pool);
279 		copy_to = xsk_xdp->data - meta_len;
280 
281 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
282 		rem -= copied;
283 
284 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
285 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
286 		meta_len = 0;
287 	} while (rem);
288 
289 	return 0;
290 }
291 
292 static bool xsk_tx_writeable(struct xdp_sock *xs)
293 {
294 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
295 		return false;
296 
297 	return true;
298 }
299 
300 static bool xsk_is_bound(struct xdp_sock *xs)
301 {
302 	if (READ_ONCE(xs->state) == XSK_BOUND) {
303 		/* Matches smp_wmb() in bind(). */
304 		smp_rmb();
305 		return true;
306 	}
307 	return false;
308 }
309 
310 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
311 {
312 	if (!xsk_is_bound(xs))
313 		return -ENXIO;
314 
315 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
316 		return -EINVAL;
317 
318 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
319 		xs->rx_dropped++;
320 		return -ENOSPC;
321 	}
322 
323 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
324 	return 0;
325 }
326 
327 static void xsk_flush(struct xdp_sock *xs)
328 {
329 	xskq_prod_submit(xs->rx);
330 	__xskq_cons_release(xs->pool->fq);
331 	sock_def_readable(&xs->sk);
332 }
333 
334 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
335 {
336 	u32 len = xdp_get_buff_len(xdp);
337 	int err;
338 
339 	spin_lock_bh(&xs->rx_lock);
340 	err = xsk_rcv_check(xs, xdp, len);
341 	if (!err) {
342 		err = __xsk_rcv(xs, xdp, len);
343 		xsk_flush(xs);
344 	}
345 	spin_unlock_bh(&xs->rx_lock);
346 	return err;
347 }
348 
349 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
350 {
351 	u32 len = xdp_get_buff_len(xdp);
352 	int err;
353 
354 	err = xsk_rcv_check(xs, xdp, len);
355 	if (err)
356 		return err;
357 
358 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
359 		len = xdp->data_end - xdp->data;
360 		return xsk_rcv_zc(xs, xdp, len);
361 	}
362 
363 	err = __xsk_rcv(xs, xdp, len);
364 	if (!err)
365 		xdp_return_buff(xdp);
366 	return err;
367 }
368 
369 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
370 {
371 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
372 	int err;
373 
374 	err = xsk_rcv(xs, xdp);
375 	if (err)
376 		return err;
377 
378 	if (!xs->flush_node.prev)
379 		list_add(&xs->flush_node, flush_list);
380 
381 	return 0;
382 }
383 
384 void __xsk_map_flush(void)
385 {
386 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
387 	struct xdp_sock *xs, *tmp;
388 
389 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
390 		xsk_flush(xs);
391 		__list_del_clearprev(&xs->flush_node);
392 	}
393 }
394 
395 #ifdef CONFIG_DEBUG_NET
396 bool xsk_map_check_flush(void)
397 {
398 	if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
399 		return false;
400 	__xsk_map_flush();
401 	return true;
402 }
403 #endif
404 
405 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
406 {
407 	xskq_prod_submit_n(pool->cq, nb_entries);
408 }
409 EXPORT_SYMBOL(xsk_tx_completed);
410 
411 void xsk_tx_release(struct xsk_buff_pool *pool)
412 {
413 	struct xdp_sock *xs;
414 
415 	rcu_read_lock();
416 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
417 		__xskq_cons_release(xs->tx);
418 		if (xsk_tx_writeable(xs))
419 			xs->sk.sk_write_space(&xs->sk);
420 	}
421 	rcu_read_unlock();
422 }
423 EXPORT_SYMBOL(xsk_tx_release);
424 
425 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
426 {
427 	bool budget_exhausted = false;
428 	struct xdp_sock *xs;
429 
430 	rcu_read_lock();
431 again:
432 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
433 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
434 			budget_exhausted = true;
435 			continue;
436 		}
437 
438 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
439 			if (xskq_has_descs(xs->tx))
440 				xskq_cons_release(xs->tx);
441 			continue;
442 		}
443 
444 		xs->tx_budget_spent++;
445 
446 		/* This is the backpressure mechanism for the Tx path.
447 		 * Reserve space in the completion queue and only proceed
448 		 * if there is space in it. This avoids having to implement
449 		 * any buffering in the Tx path.
450 		 */
451 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
452 			goto out;
453 
454 		xskq_cons_release(xs->tx);
455 		rcu_read_unlock();
456 		return true;
457 	}
458 
459 	if (budget_exhausted) {
460 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
461 			xs->tx_budget_spent = 0;
462 
463 		budget_exhausted = false;
464 		goto again;
465 	}
466 
467 out:
468 	rcu_read_unlock();
469 	return false;
470 }
471 EXPORT_SYMBOL(xsk_tx_peek_desc);
472 
473 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
474 {
475 	struct xdp_desc *descs = pool->tx_descs;
476 	u32 nb_pkts = 0;
477 
478 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
479 		nb_pkts++;
480 
481 	xsk_tx_release(pool);
482 	return nb_pkts;
483 }
484 
485 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
486 {
487 	struct xdp_sock *xs;
488 
489 	rcu_read_lock();
490 	if (!list_is_singular(&pool->xsk_tx_list)) {
491 		/* Fallback to the non-batched version */
492 		rcu_read_unlock();
493 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
494 	}
495 
496 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
497 	if (!xs) {
498 		nb_pkts = 0;
499 		goto out;
500 	}
501 
502 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
503 
504 	/* This is the backpressure mechanism for the Tx path. Try to
505 	 * reserve space in the completion queue for all packets, but
506 	 * if there are fewer slots available, just process that many
507 	 * packets. This avoids having to implement any buffering in
508 	 * the Tx path.
509 	 */
510 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
511 	if (!nb_pkts)
512 		goto out;
513 
514 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
515 	if (!nb_pkts) {
516 		xs->tx->queue_empty_descs++;
517 		goto out;
518 	}
519 
520 	__xskq_cons_release(xs->tx);
521 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
522 	xs->sk.sk_write_space(&xs->sk);
523 
524 out:
525 	rcu_read_unlock();
526 	return nb_pkts;
527 }
528 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
529 
530 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
531 {
532 	struct net_device *dev = xs->dev;
533 
534 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
535 }
536 
537 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
538 {
539 	unsigned long flags;
540 	int ret;
541 
542 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
543 	ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
544 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
545 
546 	return ret;
547 }
548 
549 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
550 {
551 	unsigned long flags;
552 
553 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
554 	xskq_prod_submit_n(xs->pool->cq, n);
555 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
556 }
557 
558 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
559 {
560 	unsigned long flags;
561 
562 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
563 	xskq_prod_cancel_n(xs->pool->cq, n);
564 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
565 }
566 
567 static u32 xsk_get_num_desc(struct sk_buff *skb)
568 {
569 	return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
570 }
571 
572 static void xsk_destruct_skb(struct sk_buff *skb)
573 {
574 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
575 
576 	if (compl->tx_timestamp) {
577 		/* sw completion timestamp, not a real one */
578 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
579 	}
580 
581 	xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
582 	sock_wfree(skb);
583 }
584 
585 static void xsk_set_destructor_arg(struct sk_buff *skb)
586 {
587 	long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
588 
589 	skb_shinfo(skb)->destructor_arg = (void *)num;
590 }
591 
592 static void xsk_consume_skb(struct sk_buff *skb)
593 {
594 	struct xdp_sock *xs = xdp_sk(skb->sk);
595 
596 	skb->destructor = sock_wfree;
597 	xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
598 	/* Free skb without triggering the perf drop trace */
599 	consume_skb(skb);
600 	xs->skb = NULL;
601 }
602 
603 static void xsk_drop_skb(struct sk_buff *skb)
604 {
605 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
606 	xsk_consume_skb(skb);
607 }
608 
609 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
610 					      struct xdp_desc *desc)
611 {
612 	struct xsk_buff_pool *pool = xs->pool;
613 	u32 hr, len, ts, offset, copy, copied;
614 	struct sk_buff *skb = xs->skb;
615 	struct page *page;
616 	void *buffer;
617 	int err, i;
618 	u64 addr;
619 
620 	if (!skb) {
621 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
622 
623 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
624 		if (unlikely(!skb))
625 			return ERR_PTR(err);
626 
627 		skb_reserve(skb, hr);
628 	}
629 
630 	addr = desc->addr;
631 	len = desc->len;
632 	ts = pool->unaligned ? len : pool->chunk_size;
633 
634 	buffer = xsk_buff_raw_get_data(pool, addr);
635 	offset = offset_in_page(buffer);
636 	addr = buffer - pool->addrs;
637 
638 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
639 		if (unlikely(i >= MAX_SKB_FRAGS))
640 			return ERR_PTR(-EOVERFLOW);
641 
642 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
643 		get_page(page);
644 
645 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
646 		skb_fill_page_desc(skb, i, page, offset, copy);
647 
648 		copied += copy;
649 		addr += copy;
650 		offset = 0;
651 	}
652 
653 	skb->len += len;
654 	skb->data_len += len;
655 	skb->truesize += ts;
656 
657 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
658 
659 	return skb;
660 }
661 
662 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
663 				     struct xdp_desc *desc)
664 {
665 	struct xsk_tx_metadata *meta = NULL;
666 	struct net_device *dev = xs->dev;
667 	struct sk_buff *skb = xs->skb;
668 	bool first_frag = false;
669 	int err;
670 
671 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
672 		skb = xsk_build_skb_zerocopy(xs, desc);
673 		if (IS_ERR(skb)) {
674 			err = PTR_ERR(skb);
675 			goto free_err;
676 		}
677 	} else {
678 		u32 hr, tr, len;
679 		void *buffer;
680 
681 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
682 		len = desc->len;
683 
684 		if (!skb) {
685 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
686 			tr = dev->needed_tailroom;
687 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
688 			if (unlikely(!skb))
689 				goto free_err;
690 
691 			skb_reserve(skb, hr);
692 			skb_put(skb, len);
693 
694 			err = skb_store_bits(skb, 0, buffer, len);
695 			if (unlikely(err)) {
696 				kfree_skb(skb);
697 				goto free_err;
698 			}
699 
700 			first_frag = true;
701 		} else {
702 			int nr_frags = skb_shinfo(skb)->nr_frags;
703 			struct page *page;
704 			u8 *vaddr;
705 
706 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
707 				err = -EOVERFLOW;
708 				goto free_err;
709 			}
710 
711 			page = alloc_page(xs->sk.sk_allocation);
712 			if (unlikely(!page)) {
713 				err = -EAGAIN;
714 				goto free_err;
715 			}
716 
717 			vaddr = kmap_local_page(page);
718 			memcpy(vaddr, buffer, len);
719 			kunmap_local(vaddr);
720 
721 			skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
722 		}
723 
724 		if (first_frag && desc->options & XDP_TX_METADATA) {
725 			if (unlikely(xs->pool->tx_metadata_len == 0)) {
726 				err = -EINVAL;
727 				goto free_err;
728 			}
729 
730 			meta = buffer - xs->pool->tx_metadata_len;
731 			if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
732 				err = -EINVAL;
733 				goto free_err;
734 			}
735 
736 			if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
737 				if (unlikely(meta->request.csum_start +
738 					     meta->request.csum_offset +
739 					     sizeof(__sum16) > len)) {
740 					err = -EINVAL;
741 					goto free_err;
742 				}
743 
744 				skb->csum_start = hr + meta->request.csum_start;
745 				skb->csum_offset = meta->request.csum_offset;
746 				skb->ip_summed = CHECKSUM_PARTIAL;
747 
748 				if (unlikely(xs->pool->tx_sw_csum)) {
749 					err = skb_checksum_help(skb);
750 					if (err)
751 						goto free_err;
752 				}
753 			}
754 		}
755 	}
756 
757 	skb->dev = dev;
758 	skb->priority = READ_ONCE(xs->sk.sk_priority);
759 	skb->mark = READ_ONCE(xs->sk.sk_mark);
760 	skb->destructor = xsk_destruct_skb;
761 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
762 	xsk_set_destructor_arg(skb);
763 
764 	return skb;
765 
766 free_err:
767 	if (err == -EOVERFLOW) {
768 		/* Drop the packet */
769 		xsk_set_destructor_arg(xs->skb);
770 		xsk_drop_skb(xs->skb);
771 		xskq_cons_release(xs->tx);
772 	} else {
773 		/* Let application retry */
774 		xsk_cq_cancel_locked(xs, 1);
775 	}
776 
777 	return ERR_PTR(err);
778 }
779 
780 static int __xsk_generic_xmit(struct sock *sk)
781 {
782 	struct xdp_sock *xs = xdp_sk(sk);
783 	u32 max_batch = TX_BATCH_SIZE;
784 	bool sent_frame = false;
785 	struct xdp_desc desc;
786 	struct sk_buff *skb;
787 	int err = 0;
788 
789 	mutex_lock(&xs->mutex);
790 
791 	/* Since we dropped the RCU read lock, the socket state might have changed. */
792 	if (unlikely(!xsk_is_bound(xs))) {
793 		err = -ENXIO;
794 		goto out;
795 	}
796 
797 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
798 		goto out;
799 
800 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
801 		if (max_batch-- == 0) {
802 			err = -EAGAIN;
803 			goto out;
804 		}
805 
806 		/* This is the backpressure mechanism for the Tx path.
807 		 * Reserve space in the completion queue and only proceed
808 		 * if there is space in it. This avoids having to implement
809 		 * any buffering in the Tx path.
810 		 */
811 		if (xsk_cq_reserve_addr_locked(xs, desc.addr))
812 			goto out;
813 
814 		skb = xsk_build_skb(xs, &desc);
815 		if (IS_ERR(skb)) {
816 			err = PTR_ERR(skb);
817 			if (err != -EOVERFLOW)
818 				goto out;
819 			err = 0;
820 			continue;
821 		}
822 
823 		xskq_cons_release(xs->tx);
824 
825 		if (xp_mb_desc(&desc)) {
826 			xs->skb = skb;
827 			continue;
828 		}
829 
830 		err = __dev_direct_xmit(skb, xs->queue_id);
831 		if  (err == NETDEV_TX_BUSY) {
832 			/* Tell user-space to retry the send */
833 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
834 			xsk_consume_skb(skb);
835 			err = -EAGAIN;
836 			goto out;
837 		}
838 
839 		/* Ignore NET_XMIT_CN as packet might have been sent */
840 		if (err == NET_XMIT_DROP) {
841 			/* SKB completed but not sent */
842 			err = -EBUSY;
843 			xs->skb = NULL;
844 			goto out;
845 		}
846 
847 		sent_frame = true;
848 		xs->skb = NULL;
849 	}
850 
851 	if (xskq_has_descs(xs->tx)) {
852 		if (xs->skb)
853 			xsk_drop_skb(xs->skb);
854 		xskq_cons_release(xs->tx);
855 	}
856 
857 out:
858 	if (sent_frame)
859 		if (xsk_tx_writeable(xs))
860 			sk->sk_write_space(sk);
861 
862 	mutex_unlock(&xs->mutex);
863 	return err;
864 }
865 
866 static int xsk_generic_xmit(struct sock *sk)
867 {
868 	int ret;
869 
870 	/* Drop the RCU lock since the SKB path might sleep. */
871 	rcu_read_unlock();
872 	ret = __xsk_generic_xmit(sk);
873 	/* Reaquire RCU lock before going into common code. */
874 	rcu_read_lock();
875 
876 	return ret;
877 }
878 
879 static bool xsk_no_wakeup(struct sock *sk)
880 {
881 #ifdef CONFIG_NET_RX_BUSY_POLL
882 	/* Prefer busy-polling, skip the wakeup. */
883 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
884 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
885 #else
886 	return false;
887 #endif
888 }
889 
890 static int xsk_check_common(struct xdp_sock *xs)
891 {
892 	if (unlikely(!xsk_is_bound(xs)))
893 		return -ENXIO;
894 	if (unlikely(!(xs->dev->flags & IFF_UP)))
895 		return -ENETDOWN;
896 
897 	return 0;
898 }
899 
900 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
901 {
902 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
903 	struct sock *sk = sock->sk;
904 	struct xdp_sock *xs = xdp_sk(sk);
905 	struct xsk_buff_pool *pool;
906 	int err;
907 
908 	err = xsk_check_common(xs);
909 	if (err)
910 		return err;
911 	if (unlikely(need_wait))
912 		return -EOPNOTSUPP;
913 	if (unlikely(!xs->tx))
914 		return -ENOBUFS;
915 
916 	if (sk_can_busy_loop(sk)) {
917 		if (xs->zc)
918 			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
919 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
920 	}
921 
922 	if (xs->zc && xsk_no_wakeup(sk))
923 		return 0;
924 
925 	pool = xs->pool;
926 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
927 		if (xs->zc)
928 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
929 		return xsk_generic_xmit(sk);
930 	}
931 	return 0;
932 }
933 
934 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
935 {
936 	int ret;
937 
938 	rcu_read_lock();
939 	ret = __xsk_sendmsg(sock, m, total_len);
940 	rcu_read_unlock();
941 
942 	return ret;
943 }
944 
945 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
946 {
947 	bool need_wait = !(flags & MSG_DONTWAIT);
948 	struct sock *sk = sock->sk;
949 	struct xdp_sock *xs = xdp_sk(sk);
950 	int err;
951 
952 	err = xsk_check_common(xs);
953 	if (err)
954 		return err;
955 	if (unlikely(!xs->rx))
956 		return -ENOBUFS;
957 	if (unlikely(need_wait))
958 		return -EOPNOTSUPP;
959 
960 	if (sk_can_busy_loop(sk))
961 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
962 
963 	if (xsk_no_wakeup(sk))
964 		return 0;
965 
966 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
967 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
968 	return 0;
969 }
970 
971 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
972 {
973 	int ret;
974 
975 	rcu_read_lock();
976 	ret = __xsk_recvmsg(sock, m, len, flags);
977 	rcu_read_unlock();
978 
979 	return ret;
980 }
981 
982 static __poll_t xsk_poll(struct file *file, struct socket *sock,
983 			     struct poll_table_struct *wait)
984 {
985 	__poll_t mask = 0;
986 	struct sock *sk = sock->sk;
987 	struct xdp_sock *xs = xdp_sk(sk);
988 	struct xsk_buff_pool *pool;
989 
990 	sock_poll_wait(file, sock, wait);
991 
992 	rcu_read_lock();
993 	if (xsk_check_common(xs))
994 		goto out;
995 
996 	pool = xs->pool;
997 
998 	if (pool->cached_need_wakeup) {
999 		if (xs->zc)
1000 			xsk_wakeup(xs, pool->cached_need_wakeup);
1001 		else if (xs->tx)
1002 			/* Poll needs to drive Tx also in copy mode */
1003 			xsk_generic_xmit(sk);
1004 	}
1005 
1006 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
1007 		mask |= EPOLLIN | EPOLLRDNORM;
1008 	if (xs->tx && xsk_tx_writeable(xs))
1009 		mask |= EPOLLOUT | EPOLLWRNORM;
1010 out:
1011 	rcu_read_unlock();
1012 	return mask;
1013 }
1014 
1015 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1016 			  bool umem_queue)
1017 {
1018 	struct xsk_queue *q;
1019 
1020 	if (entries == 0 || *queue || !is_power_of_2(entries))
1021 		return -EINVAL;
1022 
1023 	q = xskq_create(entries, umem_queue);
1024 	if (!q)
1025 		return -ENOMEM;
1026 
1027 	/* Make sure queue is ready before it can be seen by others */
1028 	smp_wmb();
1029 	WRITE_ONCE(*queue, q);
1030 	return 0;
1031 }
1032 
1033 static void xsk_unbind_dev(struct xdp_sock *xs)
1034 {
1035 	struct net_device *dev = xs->dev;
1036 
1037 	if (xs->state != XSK_BOUND)
1038 		return;
1039 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1040 
1041 	/* Wait for driver to stop using the xdp socket. */
1042 	xp_del_xsk(xs->pool, xs);
1043 	synchronize_net();
1044 	dev_put(dev);
1045 }
1046 
1047 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1048 					      struct xdp_sock __rcu ***map_entry)
1049 {
1050 	struct xsk_map *map = NULL;
1051 	struct xsk_map_node *node;
1052 
1053 	*map_entry = NULL;
1054 
1055 	spin_lock_bh(&xs->map_list_lock);
1056 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1057 					node);
1058 	if (node) {
1059 		bpf_map_inc(&node->map->map);
1060 		map = node->map;
1061 		*map_entry = node->map_entry;
1062 	}
1063 	spin_unlock_bh(&xs->map_list_lock);
1064 	return map;
1065 }
1066 
1067 static void xsk_delete_from_maps(struct xdp_sock *xs)
1068 {
1069 	/* This function removes the current XDP socket from all the
1070 	 * maps it resides in. We need to take extra care here, due to
1071 	 * the two locks involved. Each map has a lock synchronizing
1072 	 * updates to the entries, and each socket has a lock that
1073 	 * synchronizes access to the list of maps (map_list). For
1074 	 * deadlock avoidance the locks need to be taken in the order
1075 	 * "map lock"->"socket map list lock". We start off by
1076 	 * accessing the socket map list, and take a reference to the
1077 	 * map to guarantee existence between the
1078 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1079 	 * calls. Then we ask the map to remove the socket, which
1080 	 * tries to remove the socket from the map. Note that there
1081 	 * might be updates to the map between
1082 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1083 	 */
1084 	struct xdp_sock __rcu **map_entry = NULL;
1085 	struct xsk_map *map;
1086 
1087 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1088 		xsk_map_try_sock_delete(map, xs, map_entry);
1089 		bpf_map_put(&map->map);
1090 	}
1091 }
1092 
1093 static int xsk_release(struct socket *sock)
1094 {
1095 	struct sock *sk = sock->sk;
1096 	struct xdp_sock *xs = xdp_sk(sk);
1097 	struct net *net;
1098 
1099 	if (!sk)
1100 		return 0;
1101 
1102 	net = sock_net(sk);
1103 
1104 	if (xs->skb)
1105 		xsk_drop_skb(xs->skb);
1106 
1107 	mutex_lock(&net->xdp.lock);
1108 	sk_del_node_init_rcu(sk);
1109 	mutex_unlock(&net->xdp.lock);
1110 
1111 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1112 
1113 	xsk_delete_from_maps(xs);
1114 	mutex_lock(&xs->mutex);
1115 	xsk_unbind_dev(xs);
1116 	mutex_unlock(&xs->mutex);
1117 
1118 	xskq_destroy(xs->rx);
1119 	xskq_destroy(xs->tx);
1120 	xskq_destroy(xs->fq_tmp);
1121 	xskq_destroy(xs->cq_tmp);
1122 
1123 	sock_orphan(sk);
1124 	sock->sk = NULL;
1125 
1126 	sock_put(sk);
1127 
1128 	return 0;
1129 }
1130 
1131 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1132 {
1133 	struct socket *sock;
1134 	int err;
1135 
1136 	sock = sockfd_lookup(fd, &err);
1137 	if (!sock)
1138 		return ERR_PTR(-ENOTSOCK);
1139 
1140 	if (sock->sk->sk_family != PF_XDP) {
1141 		sockfd_put(sock);
1142 		return ERR_PTR(-ENOPROTOOPT);
1143 	}
1144 
1145 	return sock;
1146 }
1147 
1148 static bool xsk_validate_queues(struct xdp_sock *xs)
1149 {
1150 	return xs->fq_tmp && xs->cq_tmp;
1151 }
1152 
1153 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1154 {
1155 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1156 	struct sock *sk = sock->sk;
1157 	struct xdp_sock *xs = xdp_sk(sk);
1158 	struct net_device *dev;
1159 	int bound_dev_if;
1160 	u32 flags, qid;
1161 	int err = 0;
1162 
1163 	if (addr_len < sizeof(struct sockaddr_xdp))
1164 		return -EINVAL;
1165 	if (sxdp->sxdp_family != AF_XDP)
1166 		return -EINVAL;
1167 
1168 	flags = sxdp->sxdp_flags;
1169 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1170 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1171 		return -EINVAL;
1172 
1173 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1174 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1175 		return -EINVAL;
1176 
1177 	rtnl_lock();
1178 	mutex_lock(&xs->mutex);
1179 	if (xs->state != XSK_READY) {
1180 		err = -EBUSY;
1181 		goto out_release;
1182 	}
1183 
1184 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1185 	if (!dev) {
1186 		err = -ENODEV;
1187 		goto out_release;
1188 	}
1189 
1190 	if (!xs->rx && !xs->tx) {
1191 		err = -EINVAL;
1192 		goto out_unlock;
1193 	}
1194 
1195 	qid = sxdp->sxdp_queue_id;
1196 
1197 	if (flags & XDP_SHARED_UMEM) {
1198 		struct xdp_sock *umem_xs;
1199 		struct socket *sock;
1200 
1201 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1202 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1203 			/* Cannot specify flags for shared sockets. */
1204 			err = -EINVAL;
1205 			goto out_unlock;
1206 		}
1207 
1208 		if (xs->umem) {
1209 			/* We have already our own. */
1210 			err = -EINVAL;
1211 			goto out_unlock;
1212 		}
1213 
1214 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1215 		if (IS_ERR(sock)) {
1216 			err = PTR_ERR(sock);
1217 			goto out_unlock;
1218 		}
1219 
1220 		umem_xs = xdp_sk(sock->sk);
1221 		if (!xsk_is_bound(umem_xs)) {
1222 			err = -EBADF;
1223 			sockfd_put(sock);
1224 			goto out_unlock;
1225 		}
1226 
1227 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1228 			/* Share the umem with another socket on another qid
1229 			 * and/or device.
1230 			 */
1231 			xs->pool = xp_create_and_assign_umem(xs,
1232 							     umem_xs->umem);
1233 			if (!xs->pool) {
1234 				err = -ENOMEM;
1235 				sockfd_put(sock);
1236 				goto out_unlock;
1237 			}
1238 
1239 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1240 						   qid);
1241 			if (err) {
1242 				xp_destroy(xs->pool);
1243 				xs->pool = NULL;
1244 				sockfd_put(sock);
1245 				goto out_unlock;
1246 			}
1247 		} else {
1248 			/* Share the buffer pool with the other socket. */
1249 			if (xs->fq_tmp || xs->cq_tmp) {
1250 				/* Do not allow setting your own fq or cq. */
1251 				err = -EINVAL;
1252 				sockfd_put(sock);
1253 				goto out_unlock;
1254 			}
1255 
1256 			xp_get_pool(umem_xs->pool);
1257 			xs->pool = umem_xs->pool;
1258 
1259 			/* If underlying shared umem was created without Tx
1260 			 * ring, allocate Tx descs array that Tx batching API
1261 			 * utilizes
1262 			 */
1263 			if (xs->tx && !xs->pool->tx_descs) {
1264 				err = xp_alloc_tx_descs(xs->pool, xs);
1265 				if (err) {
1266 					xp_put_pool(xs->pool);
1267 					xs->pool = NULL;
1268 					sockfd_put(sock);
1269 					goto out_unlock;
1270 				}
1271 			}
1272 		}
1273 
1274 		xdp_get_umem(umem_xs->umem);
1275 		WRITE_ONCE(xs->umem, umem_xs->umem);
1276 		sockfd_put(sock);
1277 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1278 		err = -EINVAL;
1279 		goto out_unlock;
1280 	} else {
1281 		/* This xsk has its own umem. */
1282 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1283 		if (!xs->pool) {
1284 			err = -ENOMEM;
1285 			goto out_unlock;
1286 		}
1287 
1288 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1289 		if (err) {
1290 			xp_destroy(xs->pool);
1291 			xs->pool = NULL;
1292 			goto out_unlock;
1293 		}
1294 	}
1295 
1296 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1297 	xs->fq_tmp = NULL;
1298 	xs->cq_tmp = NULL;
1299 
1300 	xs->dev = dev;
1301 	xs->zc = xs->umem->zc;
1302 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1303 	xs->queue_id = qid;
1304 	xp_add_xsk(xs->pool, xs);
1305 
1306 out_unlock:
1307 	if (err) {
1308 		dev_put(dev);
1309 	} else {
1310 		/* Matches smp_rmb() in bind() for shared umem
1311 		 * sockets, and xsk_is_bound().
1312 		 */
1313 		smp_wmb();
1314 		WRITE_ONCE(xs->state, XSK_BOUND);
1315 	}
1316 out_release:
1317 	mutex_unlock(&xs->mutex);
1318 	rtnl_unlock();
1319 	return err;
1320 }
1321 
1322 struct xdp_umem_reg_v1 {
1323 	__u64 addr; /* Start of packet data area */
1324 	__u64 len; /* Length of packet data area */
1325 	__u32 chunk_size;
1326 	__u32 headroom;
1327 };
1328 
1329 struct xdp_umem_reg_v2 {
1330 	__u64 addr; /* Start of packet data area */
1331 	__u64 len; /* Length of packet data area */
1332 	__u32 chunk_size;
1333 	__u32 headroom;
1334 	__u32 flags;
1335 };
1336 
1337 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1338 			  sockptr_t optval, unsigned int optlen)
1339 {
1340 	struct sock *sk = sock->sk;
1341 	struct xdp_sock *xs = xdp_sk(sk);
1342 	int err;
1343 
1344 	if (level != SOL_XDP)
1345 		return -ENOPROTOOPT;
1346 
1347 	switch (optname) {
1348 	case XDP_RX_RING:
1349 	case XDP_TX_RING:
1350 	{
1351 		struct xsk_queue **q;
1352 		int entries;
1353 
1354 		if (optlen < sizeof(entries))
1355 			return -EINVAL;
1356 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1357 			return -EFAULT;
1358 
1359 		mutex_lock(&xs->mutex);
1360 		if (xs->state != XSK_READY) {
1361 			mutex_unlock(&xs->mutex);
1362 			return -EBUSY;
1363 		}
1364 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1365 		err = xsk_init_queue(entries, q, false);
1366 		if (!err && optname == XDP_TX_RING)
1367 			/* Tx needs to be explicitly woken up the first time */
1368 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1369 		mutex_unlock(&xs->mutex);
1370 		return err;
1371 	}
1372 	case XDP_UMEM_REG:
1373 	{
1374 		size_t mr_size = sizeof(struct xdp_umem_reg);
1375 		struct xdp_umem_reg mr = {};
1376 		struct xdp_umem *umem;
1377 
1378 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1379 			return -EINVAL;
1380 		else if (optlen < sizeof(struct xdp_umem_reg_v2))
1381 			mr_size = sizeof(struct xdp_umem_reg_v1);
1382 		else if (optlen < sizeof(mr))
1383 			mr_size = sizeof(struct xdp_umem_reg_v2);
1384 
1385 		if (copy_from_sockptr(&mr, optval, mr_size))
1386 			return -EFAULT;
1387 
1388 		mutex_lock(&xs->mutex);
1389 		if (xs->state != XSK_READY || xs->umem) {
1390 			mutex_unlock(&xs->mutex);
1391 			return -EBUSY;
1392 		}
1393 
1394 		umem = xdp_umem_create(&mr);
1395 		if (IS_ERR(umem)) {
1396 			mutex_unlock(&xs->mutex);
1397 			return PTR_ERR(umem);
1398 		}
1399 
1400 		/* Make sure umem is ready before it can be seen by others */
1401 		smp_wmb();
1402 		WRITE_ONCE(xs->umem, umem);
1403 		mutex_unlock(&xs->mutex);
1404 		return 0;
1405 	}
1406 	case XDP_UMEM_FILL_RING:
1407 	case XDP_UMEM_COMPLETION_RING:
1408 	{
1409 		struct xsk_queue **q;
1410 		int entries;
1411 
1412 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1413 			return -EFAULT;
1414 
1415 		mutex_lock(&xs->mutex);
1416 		if (xs->state != XSK_READY) {
1417 			mutex_unlock(&xs->mutex);
1418 			return -EBUSY;
1419 		}
1420 
1421 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1422 			&xs->cq_tmp;
1423 		err = xsk_init_queue(entries, q, true);
1424 		mutex_unlock(&xs->mutex);
1425 		return err;
1426 	}
1427 	default:
1428 		break;
1429 	}
1430 
1431 	return -ENOPROTOOPT;
1432 }
1433 
1434 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1435 {
1436 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1437 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1438 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1439 }
1440 
1441 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1442 {
1443 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1444 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1445 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1446 }
1447 
1448 struct xdp_statistics_v1 {
1449 	__u64 rx_dropped;
1450 	__u64 rx_invalid_descs;
1451 	__u64 tx_invalid_descs;
1452 };
1453 
1454 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1455 			  char __user *optval, int __user *optlen)
1456 {
1457 	struct sock *sk = sock->sk;
1458 	struct xdp_sock *xs = xdp_sk(sk);
1459 	int len;
1460 
1461 	if (level != SOL_XDP)
1462 		return -ENOPROTOOPT;
1463 
1464 	if (get_user(len, optlen))
1465 		return -EFAULT;
1466 	if (len < 0)
1467 		return -EINVAL;
1468 
1469 	switch (optname) {
1470 	case XDP_STATISTICS:
1471 	{
1472 		struct xdp_statistics stats = {};
1473 		bool extra_stats = true;
1474 		size_t stats_size;
1475 
1476 		if (len < sizeof(struct xdp_statistics_v1)) {
1477 			return -EINVAL;
1478 		} else if (len < sizeof(stats)) {
1479 			extra_stats = false;
1480 			stats_size = sizeof(struct xdp_statistics_v1);
1481 		} else {
1482 			stats_size = sizeof(stats);
1483 		}
1484 
1485 		mutex_lock(&xs->mutex);
1486 		stats.rx_dropped = xs->rx_dropped;
1487 		if (extra_stats) {
1488 			stats.rx_ring_full = xs->rx_queue_full;
1489 			stats.rx_fill_ring_empty_descs =
1490 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1491 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1492 		} else {
1493 			stats.rx_dropped += xs->rx_queue_full;
1494 		}
1495 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1496 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1497 		mutex_unlock(&xs->mutex);
1498 
1499 		if (copy_to_user(optval, &stats, stats_size))
1500 			return -EFAULT;
1501 		if (put_user(stats_size, optlen))
1502 			return -EFAULT;
1503 
1504 		return 0;
1505 	}
1506 	case XDP_MMAP_OFFSETS:
1507 	{
1508 		struct xdp_mmap_offsets off;
1509 		struct xdp_mmap_offsets_v1 off_v1;
1510 		bool flags_supported = true;
1511 		void *to_copy;
1512 
1513 		if (len < sizeof(off_v1))
1514 			return -EINVAL;
1515 		else if (len < sizeof(off))
1516 			flags_supported = false;
1517 
1518 		if (flags_supported) {
1519 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1520 			 * except for the flags field added to the end.
1521 			 */
1522 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1523 					       &off.rx);
1524 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1525 					       &off.tx);
1526 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1527 					       &off.fr);
1528 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1529 					       &off.cr);
1530 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1531 						ptrs.flags);
1532 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1533 						ptrs.flags);
1534 			off.fr.flags = offsetof(struct xdp_umem_ring,
1535 						ptrs.flags);
1536 			off.cr.flags = offsetof(struct xdp_umem_ring,
1537 						ptrs.flags);
1538 
1539 			len = sizeof(off);
1540 			to_copy = &off;
1541 		} else {
1542 			xsk_enter_rxtx_offsets(&off_v1.rx);
1543 			xsk_enter_rxtx_offsets(&off_v1.tx);
1544 			xsk_enter_umem_offsets(&off_v1.fr);
1545 			xsk_enter_umem_offsets(&off_v1.cr);
1546 
1547 			len = sizeof(off_v1);
1548 			to_copy = &off_v1;
1549 		}
1550 
1551 		if (copy_to_user(optval, to_copy, len))
1552 			return -EFAULT;
1553 		if (put_user(len, optlen))
1554 			return -EFAULT;
1555 
1556 		return 0;
1557 	}
1558 	case XDP_OPTIONS:
1559 	{
1560 		struct xdp_options opts = {};
1561 
1562 		if (len < sizeof(opts))
1563 			return -EINVAL;
1564 
1565 		mutex_lock(&xs->mutex);
1566 		if (xs->zc)
1567 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1568 		mutex_unlock(&xs->mutex);
1569 
1570 		len = sizeof(opts);
1571 		if (copy_to_user(optval, &opts, len))
1572 			return -EFAULT;
1573 		if (put_user(len, optlen))
1574 			return -EFAULT;
1575 
1576 		return 0;
1577 	}
1578 	default:
1579 		break;
1580 	}
1581 
1582 	return -EOPNOTSUPP;
1583 }
1584 
1585 static int xsk_mmap(struct file *file, struct socket *sock,
1586 		    struct vm_area_struct *vma)
1587 {
1588 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1589 	unsigned long size = vma->vm_end - vma->vm_start;
1590 	struct xdp_sock *xs = xdp_sk(sock->sk);
1591 	int state = READ_ONCE(xs->state);
1592 	struct xsk_queue *q = NULL;
1593 
1594 	if (state != XSK_READY && state != XSK_BOUND)
1595 		return -EBUSY;
1596 
1597 	if (offset == XDP_PGOFF_RX_RING) {
1598 		q = READ_ONCE(xs->rx);
1599 	} else if (offset == XDP_PGOFF_TX_RING) {
1600 		q = READ_ONCE(xs->tx);
1601 	} else {
1602 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1603 		smp_rmb();
1604 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1605 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1606 						 READ_ONCE(xs->pool->fq);
1607 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1608 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1609 						 READ_ONCE(xs->pool->cq);
1610 	}
1611 
1612 	if (!q)
1613 		return -EINVAL;
1614 
1615 	/* Matches the smp_wmb() in xsk_init_queue */
1616 	smp_rmb();
1617 	if (size > q->ring_vmalloc_size)
1618 		return -EINVAL;
1619 
1620 	return remap_vmalloc_range(vma, q->ring, 0);
1621 }
1622 
1623 static int xsk_notifier(struct notifier_block *this,
1624 			unsigned long msg, void *ptr)
1625 {
1626 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1627 	struct net *net = dev_net(dev);
1628 	struct sock *sk;
1629 
1630 	switch (msg) {
1631 	case NETDEV_UNREGISTER:
1632 		mutex_lock(&net->xdp.lock);
1633 		sk_for_each(sk, &net->xdp.list) {
1634 			struct xdp_sock *xs = xdp_sk(sk);
1635 
1636 			mutex_lock(&xs->mutex);
1637 			if (xs->dev == dev) {
1638 				sk->sk_err = ENETDOWN;
1639 				if (!sock_flag(sk, SOCK_DEAD))
1640 					sk_error_report(sk);
1641 
1642 				xsk_unbind_dev(xs);
1643 
1644 				/* Clear device references. */
1645 				xp_clear_dev(xs->pool);
1646 			}
1647 			mutex_unlock(&xs->mutex);
1648 		}
1649 		mutex_unlock(&net->xdp.lock);
1650 		break;
1651 	}
1652 	return NOTIFY_DONE;
1653 }
1654 
1655 static struct proto xsk_proto = {
1656 	.name =		"XDP",
1657 	.owner =	THIS_MODULE,
1658 	.obj_size =	sizeof(struct xdp_sock),
1659 };
1660 
1661 static const struct proto_ops xsk_proto_ops = {
1662 	.family		= PF_XDP,
1663 	.owner		= THIS_MODULE,
1664 	.release	= xsk_release,
1665 	.bind		= xsk_bind,
1666 	.connect	= sock_no_connect,
1667 	.socketpair	= sock_no_socketpair,
1668 	.accept		= sock_no_accept,
1669 	.getname	= sock_no_getname,
1670 	.poll		= xsk_poll,
1671 	.ioctl		= sock_no_ioctl,
1672 	.listen		= sock_no_listen,
1673 	.shutdown	= sock_no_shutdown,
1674 	.setsockopt	= xsk_setsockopt,
1675 	.getsockopt	= xsk_getsockopt,
1676 	.sendmsg	= xsk_sendmsg,
1677 	.recvmsg	= xsk_recvmsg,
1678 	.mmap		= xsk_mmap,
1679 };
1680 
1681 static void xsk_destruct(struct sock *sk)
1682 {
1683 	struct xdp_sock *xs = xdp_sk(sk);
1684 
1685 	if (!sock_flag(sk, SOCK_DEAD))
1686 		return;
1687 
1688 	if (!xp_put_pool(xs->pool))
1689 		xdp_put_umem(xs->umem, !xs->pool);
1690 }
1691 
1692 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1693 		      int kern)
1694 {
1695 	struct xdp_sock *xs;
1696 	struct sock *sk;
1697 
1698 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1699 		return -EPERM;
1700 	if (sock->type != SOCK_RAW)
1701 		return -ESOCKTNOSUPPORT;
1702 
1703 	if (protocol)
1704 		return -EPROTONOSUPPORT;
1705 
1706 	sock->state = SS_UNCONNECTED;
1707 
1708 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1709 	if (!sk)
1710 		return -ENOBUFS;
1711 
1712 	sock->ops = &xsk_proto_ops;
1713 
1714 	sock_init_data(sock, sk);
1715 
1716 	sk->sk_family = PF_XDP;
1717 
1718 	sk->sk_destruct = xsk_destruct;
1719 
1720 	sock_set_flag(sk, SOCK_RCU_FREE);
1721 
1722 	xs = xdp_sk(sk);
1723 	xs->state = XSK_READY;
1724 	mutex_init(&xs->mutex);
1725 	spin_lock_init(&xs->rx_lock);
1726 
1727 	INIT_LIST_HEAD(&xs->map_list);
1728 	spin_lock_init(&xs->map_list_lock);
1729 
1730 	mutex_lock(&net->xdp.lock);
1731 	sk_add_node_rcu(sk, &net->xdp.list);
1732 	mutex_unlock(&net->xdp.lock);
1733 
1734 	sock_prot_inuse_add(net, &xsk_proto, 1);
1735 
1736 	return 0;
1737 }
1738 
1739 static const struct net_proto_family xsk_family_ops = {
1740 	.family = PF_XDP,
1741 	.create = xsk_create,
1742 	.owner	= THIS_MODULE,
1743 };
1744 
1745 static struct notifier_block xsk_netdev_notifier = {
1746 	.notifier_call	= xsk_notifier,
1747 };
1748 
1749 static int __net_init xsk_net_init(struct net *net)
1750 {
1751 	mutex_init(&net->xdp.lock);
1752 	INIT_HLIST_HEAD(&net->xdp.list);
1753 	return 0;
1754 }
1755 
1756 static void __net_exit xsk_net_exit(struct net *net)
1757 {
1758 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1759 }
1760 
1761 static struct pernet_operations xsk_net_ops = {
1762 	.init = xsk_net_init,
1763 	.exit = xsk_net_exit,
1764 };
1765 
1766 static int __init xsk_init(void)
1767 {
1768 	int err, cpu;
1769 
1770 	err = proto_register(&xsk_proto, 0 /* no slab */);
1771 	if (err)
1772 		goto out;
1773 
1774 	err = sock_register(&xsk_family_ops);
1775 	if (err)
1776 		goto out_proto;
1777 
1778 	err = register_pernet_subsys(&xsk_net_ops);
1779 	if (err)
1780 		goto out_sk;
1781 
1782 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1783 	if (err)
1784 		goto out_pernet;
1785 
1786 	for_each_possible_cpu(cpu)
1787 		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1788 	return 0;
1789 
1790 out_pernet:
1791 	unregister_pernet_subsys(&xsk_net_ops);
1792 out_sk:
1793 	sock_unregister(PF_XDP);
1794 out_proto:
1795 	proto_unregister(&xsk_proto);
1796 out:
1797 	return err;
1798 }
1799 
1800 fs_initcall(xsk_init);
1801