xref: /linux/net/xdp/xsk.c (revision a6a6a98094116b60e5523a571d9443c53325f5b1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30 
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34 
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
37 
38 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
39 {
40 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
41 		return;
42 
43 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
44 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
45 }
46 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
47 
48 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
49 {
50 	struct xdp_sock *xs;
51 
52 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
53 		return;
54 
55 	rcu_read_lock();
56 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
57 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
58 	}
59 	rcu_read_unlock();
60 
61 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
62 }
63 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
64 
65 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
66 {
67 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
68 		return;
69 
70 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
71 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
72 }
73 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
74 
75 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
76 {
77 	struct xdp_sock *xs;
78 
79 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
80 		return;
81 
82 	rcu_read_lock();
83 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
84 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
85 	}
86 	rcu_read_unlock();
87 
88 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
89 }
90 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
91 
92 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
93 {
94 	return pool->uses_need_wakeup;
95 }
96 EXPORT_SYMBOL(xsk_uses_need_wakeup);
97 
98 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
99 					    u16 queue_id)
100 {
101 	if (queue_id < dev->real_num_rx_queues)
102 		return dev->_rx[queue_id].pool;
103 	if (queue_id < dev->real_num_tx_queues)
104 		return dev->_tx[queue_id].pool;
105 
106 	return NULL;
107 }
108 EXPORT_SYMBOL(xsk_get_pool_from_qid);
109 
110 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
111 {
112 	if (queue_id < dev->num_rx_queues)
113 		dev->_rx[queue_id].pool = NULL;
114 	if (queue_id < dev->num_tx_queues)
115 		dev->_tx[queue_id].pool = NULL;
116 }
117 
118 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
119  * not know if the device has more tx queues than rx, or the opposite.
120  * This might also change during run time.
121  */
122 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
123 			u16 queue_id)
124 {
125 	if (queue_id >= max_t(unsigned int,
126 			      dev->real_num_rx_queues,
127 			      dev->real_num_tx_queues))
128 		return -EINVAL;
129 
130 	if (queue_id < dev->real_num_rx_queues)
131 		dev->_rx[queue_id].pool = pool;
132 	if (queue_id < dev->real_num_tx_queues)
133 		dev->_tx[queue_id].pool = pool;
134 
135 	return 0;
136 }
137 
138 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
139 			u32 flags)
140 {
141 	u64 addr;
142 	int err;
143 
144 	addr = xp_get_handle(xskb);
145 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
146 	if (err) {
147 		xs->rx_queue_full++;
148 		return err;
149 	}
150 
151 	xp_release(xskb);
152 	return 0;
153 }
154 
155 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
156 {
157 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
158 	u32 frags = xdp_buff_has_frags(xdp);
159 	struct xdp_buff_xsk *pos, *tmp;
160 	struct list_head *xskb_list;
161 	u32 contd = 0;
162 	int err;
163 
164 	if (frags)
165 		contd = XDP_PKT_CONTD;
166 
167 	err = __xsk_rcv_zc(xs, xskb, len, contd);
168 	if (err)
169 		goto err;
170 	if (likely(!frags))
171 		return 0;
172 
173 	xskb_list = &xskb->pool->xskb_list;
174 	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
175 		if (list_is_singular(xskb_list))
176 			contd = 0;
177 		len = pos->xdp.data_end - pos->xdp.data;
178 		err = __xsk_rcv_zc(xs, pos, len, contd);
179 		if (err)
180 			goto err;
181 		list_del(&pos->xskb_list_node);
182 	}
183 
184 	return 0;
185 err:
186 	xsk_buff_free(xdp);
187 	return err;
188 }
189 
190 static void *xsk_copy_xdp_start(struct xdp_buff *from)
191 {
192 	if (unlikely(xdp_data_meta_unsupported(from)))
193 		return from->data;
194 	else
195 		return from->data_meta;
196 }
197 
198 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
199 			u32 *from_len, skb_frag_t **frag, u32 rem)
200 {
201 	u32 copied = 0;
202 
203 	while (1) {
204 		u32 copy_len = min_t(u32, *from_len, to_len);
205 
206 		memcpy(to, *from, copy_len);
207 		copied += copy_len;
208 		if (rem == copied)
209 			return copied;
210 
211 		if (*from_len == copy_len) {
212 			*from = skb_frag_address(*frag);
213 			*from_len = skb_frag_size((*frag)++);
214 		} else {
215 			*from += copy_len;
216 			*from_len -= copy_len;
217 		}
218 		if (to_len == copy_len)
219 			return copied;
220 
221 		to_len -= copy_len;
222 		to += copy_len;
223 	}
224 }
225 
226 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
227 {
228 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
229 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
230 	u32 from_len, meta_len, rem, num_desc;
231 	struct xdp_buff_xsk *xskb;
232 	struct xdp_buff *xsk_xdp;
233 	skb_frag_t *frag;
234 
235 	from_len = xdp->data_end - copy_from;
236 	meta_len = xdp->data - copy_from;
237 	rem = len + meta_len;
238 
239 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
240 		int err;
241 
242 		xsk_xdp = xsk_buff_alloc(xs->pool);
243 		if (!xsk_xdp) {
244 			xs->rx_dropped++;
245 			return -ENOMEM;
246 		}
247 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
248 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
249 		err = __xsk_rcv_zc(xs, xskb, len, 0);
250 		if (err) {
251 			xsk_buff_free(xsk_xdp);
252 			return err;
253 		}
254 
255 		return 0;
256 	}
257 
258 	num_desc = (len - 1) / frame_size + 1;
259 
260 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
261 		xs->rx_dropped++;
262 		return -ENOMEM;
263 	}
264 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
265 		xs->rx_queue_full++;
266 		return -ENOBUFS;
267 	}
268 
269 	if (xdp_buff_has_frags(xdp)) {
270 		struct skb_shared_info *sinfo;
271 
272 		sinfo = xdp_get_shared_info_from_buff(xdp);
273 		frag =  &sinfo->frags[0];
274 	}
275 
276 	do {
277 		u32 to_len = frame_size + meta_len;
278 		u32 copied;
279 
280 		xsk_xdp = xsk_buff_alloc(xs->pool);
281 		copy_to = xsk_xdp->data - meta_len;
282 
283 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
284 		rem -= copied;
285 
286 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
287 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
288 		meta_len = 0;
289 	} while (rem);
290 
291 	return 0;
292 }
293 
294 static bool xsk_tx_writeable(struct xdp_sock *xs)
295 {
296 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
297 		return false;
298 
299 	return true;
300 }
301 
302 static bool xsk_is_bound(struct xdp_sock *xs)
303 {
304 	if (READ_ONCE(xs->state) == XSK_BOUND) {
305 		/* Matches smp_wmb() in bind(). */
306 		smp_rmb();
307 		return true;
308 	}
309 	return false;
310 }
311 
312 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
313 {
314 	if (!xsk_is_bound(xs))
315 		return -ENXIO;
316 
317 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
318 		return -EINVAL;
319 
320 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
321 		xs->rx_dropped++;
322 		return -ENOSPC;
323 	}
324 
325 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
326 	return 0;
327 }
328 
329 static void xsk_flush(struct xdp_sock *xs)
330 {
331 	xskq_prod_submit(xs->rx);
332 	__xskq_cons_release(xs->pool->fq);
333 	sock_def_readable(&xs->sk);
334 }
335 
336 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
337 {
338 	u32 len = xdp_get_buff_len(xdp);
339 	int err;
340 
341 	spin_lock_bh(&xs->rx_lock);
342 	err = xsk_rcv_check(xs, xdp, len);
343 	if (!err) {
344 		err = __xsk_rcv(xs, xdp, len);
345 		xsk_flush(xs);
346 	}
347 	spin_unlock_bh(&xs->rx_lock);
348 	return err;
349 }
350 
351 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
352 {
353 	u32 len = xdp_get_buff_len(xdp);
354 	int err;
355 
356 	err = xsk_rcv_check(xs, xdp, len);
357 	if (err)
358 		return err;
359 
360 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
361 		len = xdp->data_end - xdp->data;
362 		return xsk_rcv_zc(xs, xdp, len);
363 	}
364 
365 	err = __xsk_rcv(xs, xdp, len);
366 	if (!err)
367 		xdp_return_buff(xdp);
368 	return err;
369 }
370 
371 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
372 {
373 	struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
374 	int err;
375 
376 	err = xsk_rcv(xs, xdp);
377 	if (err)
378 		return err;
379 
380 	if (!xs->flush_node.prev)
381 		list_add(&xs->flush_node, flush_list);
382 
383 	return 0;
384 }
385 
386 void __xsk_map_flush(void)
387 {
388 	struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
389 	struct xdp_sock *xs, *tmp;
390 
391 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
392 		xsk_flush(xs);
393 		__list_del_clearprev(&xs->flush_node);
394 	}
395 }
396 
397 #ifdef CONFIG_DEBUG_NET
398 bool xsk_map_check_flush(void)
399 {
400 	if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
401 		return false;
402 	__xsk_map_flush();
403 	return true;
404 }
405 #endif
406 
407 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
408 {
409 	xskq_prod_submit_n(pool->cq, nb_entries);
410 }
411 EXPORT_SYMBOL(xsk_tx_completed);
412 
413 void xsk_tx_release(struct xsk_buff_pool *pool)
414 {
415 	struct xdp_sock *xs;
416 
417 	rcu_read_lock();
418 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
419 		__xskq_cons_release(xs->tx);
420 		if (xsk_tx_writeable(xs))
421 			xs->sk.sk_write_space(&xs->sk);
422 	}
423 	rcu_read_unlock();
424 }
425 EXPORT_SYMBOL(xsk_tx_release);
426 
427 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
428 {
429 	bool budget_exhausted = false;
430 	struct xdp_sock *xs;
431 
432 	rcu_read_lock();
433 again:
434 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
435 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
436 			budget_exhausted = true;
437 			continue;
438 		}
439 
440 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
441 			if (xskq_has_descs(xs->tx))
442 				xskq_cons_release(xs->tx);
443 			continue;
444 		}
445 
446 		xs->tx_budget_spent++;
447 
448 		/* This is the backpressure mechanism for the Tx path.
449 		 * Reserve space in the completion queue and only proceed
450 		 * if there is space in it. This avoids having to implement
451 		 * any buffering in the Tx path.
452 		 */
453 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
454 			goto out;
455 
456 		xskq_cons_release(xs->tx);
457 		rcu_read_unlock();
458 		return true;
459 	}
460 
461 	if (budget_exhausted) {
462 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
463 			xs->tx_budget_spent = 0;
464 
465 		budget_exhausted = false;
466 		goto again;
467 	}
468 
469 out:
470 	rcu_read_unlock();
471 	return false;
472 }
473 EXPORT_SYMBOL(xsk_tx_peek_desc);
474 
475 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
476 {
477 	struct xdp_desc *descs = pool->tx_descs;
478 	u32 nb_pkts = 0;
479 
480 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
481 		nb_pkts++;
482 
483 	xsk_tx_release(pool);
484 	return nb_pkts;
485 }
486 
487 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
488 {
489 	struct xdp_sock *xs;
490 
491 	rcu_read_lock();
492 	if (!list_is_singular(&pool->xsk_tx_list)) {
493 		/* Fallback to the non-batched version */
494 		rcu_read_unlock();
495 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
496 	}
497 
498 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
499 	if (!xs) {
500 		nb_pkts = 0;
501 		goto out;
502 	}
503 
504 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
505 
506 	/* This is the backpressure mechanism for the Tx path. Try to
507 	 * reserve space in the completion queue for all packets, but
508 	 * if there are fewer slots available, just process that many
509 	 * packets. This avoids having to implement any buffering in
510 	 * the Tx path.
511 	 */
512 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
513 	if (!nb_pkts)
514 		goto out;
515 
516 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
517 	if (!nb_pkts) {
518 		xs->tx->queue_empty_descs++;
519 		goto out;
520 	}
521 
522 	__xskq_cons_release(xs->tx);
523 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
524 	xs->sk.sk_write_space(&xs->sk);
525 
526 out:
527 	rcu_read_unlock();
528 	return nb_pkts;
529 }
530 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
531 
532 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
533 {
534 	struct net_device *dev = xs->dev;
535 
536 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
537 }
538 
539 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
540 {
541 	unsigned long flags;
542 	int ret;
543 
544 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
545 	ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
546 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
547 
548 	return ret;
549 }
550 
551 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
552 {
553 	unsigned long flags;
554 
555 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
556 	xskq_prod_submit_n(xs->pool->cq, n);
557 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
558 }
559 
560 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
561 {
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
565 	xskq_prod_cancel_n(xs->pool->cq, n);
566 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
567 }
568 
569 static u32 xsk_get_num_desc(struct sk_buff *skb)
570 {
571 	return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
572 }
573 
574 static void xsk_destruct_skb(struct sk_buff *skb)
575 {
576 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
577 
578 	if (compl->tx_timestamp) {
579 		/* sw completion timestamp, not a real one */
580 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
581 	}
582 
583 	xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
584 	sock_wfree(skb);
585 }
586 
587 static void xsk_set_destructor_arg(struct sk_buff *skb)
588 {
589 	long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
590 
591 	skb_shinfo(skb)->destructor_arg = (void *)num;
592 }
593 
594 static void xsk_consume_skb(struct sk_buff *skb)
595 {
596 	struct xdp_sock *xs = xdp_sk(skb->sk);
597 
598 	skb->destructor = sock_wfree;
599 	xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
600 	/* Free skb without triggering the perf drop trace */
601 	consume_skb(skb);
602 	xs->skb = NULL;
603 }
604 
605 static void xsk_drop_skb(struct sk_buff *skb)
606 {
607 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
608 	xsk_consume_skb(skb);
609 }
610 
611 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
612 					      struct xdp_desc *desc)
613 {
614 	struct xsk_buff_pool *pool = xs->pool;
615 	u32 hr, len, ts, offset, copy, copied;
616 	struct sk_buff *skb = xs->skb;
617 	struct page *page;
618 	void *buffer;
619 	int err, i;
620 	u64 addr;
621 
622 	if (!skb) {
623 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
624 
625 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
626 		if (unlikely(!skb))
627 			return ERR_PTR(err);
628 
629 		skb_reserve(skb, hr);
630 	}
631 
632 	addr = desc->addr;
633 	len = desc->len;
634 	ts = pool->unaligned ? len : pool->chunk_size;
635 
636 	buffer = xsk_buff_raw_get_data(pool, addr);
637 	offset = offset_in_page(buffer);
638 	addr = buffer - pool->addrs;
639 
640 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
641 		if (unlikely(i >= MAX_SKB_FRAGS))
642 			return ERR_PTR(-EOVERFLOW);
643 
644 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
645 		get_page(page);
646 
647 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
648 		skb_fill_page_desc(skb, i, page, offset, copy);
649 
650 		copied += copy;
651 		addr += copy;
652 		offset = 0;
653 	}
654 
655 	skb->len += len;
656 	skb->data_len += len;
657 	skb->truesize += ts;
658 
659 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
660 
661 	return skb;
662 }
663 
664 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
665 				     struct xdp_desc *desc)
666 {
667 	struct xsk_tx_metadata *meta = NULL;
668 	struct net_device *dev = xs->dev;
669 	struct sk_buff *skb = xs->skb;
670 	bool first_frag = false;
671 	int err;
672 
673 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
674 		skb = xsk_build_skb_zerocopy(xs, desc);
675 		if (IS_ERR(skb)) {
676 			err = PTR_ERR(skb);
677 			goto free_err;
678 		}
679 	} else {
680 		u32 hr, tr, len;
681 		void *buffer;
682 
683 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
684 		len = desc->len;
685 
686 		if (!skb) {
687 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
688 			tr = dev->needed_tailroom;
689 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
690 			if (unlikely(!skb))
691 				goto free_err;
692 
693 			skb_reserve(skb, hr);
694 			skb_put(skb, len);
695 
696 			err = skb_store_bits(skb, 0, buffer, len);
697 			if (unlikely(err)) {
698 				kfree_skb(skb);
699 				goto free_err;
700 			}
701 
702 			first_frag = true;
703 		} else {
704 			int nr_frags = skb_shinfo(skb)->nr_frags;
705 			struct page *page;
706 			u8 *vaddr;
707 
708 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
709 				err = -EOVERFLOW;
710 				goto free_err;
711 			}
712 
713 			page = alloc_page(xs->sk.sk_allocation);
714 			if (unlikely(!page)) {
715 				err = -EAGAIN;
716 				goto free_err;
717 			}
718 
719 			vaddr = kmap_local_page(page);
720 			memcpy(vaddr, buffer, len);
721 			kunmap_local(vaddr);
722 
723 			skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
724 			refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
725 		}
726 
727 		if (first_frag && desc->options & XDP_TX_METADATA) {
728 			if (unlikely(xs->pool->tx_metadata_len == 0)) {
729 				err = -EINVAL;
730 				goto free_err;
731 			}
732 
733 			meta = buffer - xs->pool->tx_metadata_len;
734 			if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
735 				err = -EINVAL;
736 				goto free_err;
737 			}
738 
739 			if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
740 				if (unlikely(meta->request.csum_start +
741 					     meta->request.csum_offset +
742 					     sizeof(__sum16) > len)) {
743 					err = -EINVAL;
744 					goto free_err;
745 				}
746 
747 				skb->csum_start = hr + meta->request.csum_start;
748 				skb->csum_offset = meta->request.csum_offset;
749 				skb->ip_summed = CHECKSUM_PARTIAL;
750 
751 				if (unlikely(xs->pool->tx_sw_csum)) {
752 					err = skb_checksum_help(skb);
753 					if (err)
754 						goto free_err;
755 				}
756 			}
757 		}
758 	}
759 
760 	skb->dev = dev;
761 	skb->priority = READ_ONCE(xs->sk.sk_priority);
762 	skb->mark = READ_ONCE(xs->sk.sk_mark);
763 	skb->destructor = xsk_destruct_skb;
764 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
765 	xsk_set_destructor_arg(skb);
766 
767 	return skb;
768 
769 free_err:
770 	if (err == -EOVERFLOW) {
771 		/* Drop the packet */
772 		xsk_set_destructor_arg(xs->skb);
773 		xsk_drop_skb(xs->skb);
774 		xskq_cons_release(xs->tx);
775 	} else {
776 		/* Let application retry */
777 		xsk_cq_cancel_locked(xs, 1);
778 	}
779 
780 	return ERR_PTR(err);
781 }
782 
783 static int __xsk_generic_xmit(struct sock *sk)
784 {
785 	struct xdp_sock *xs = xdp_sk(sk);
786 	u32 max_batch = TX_BATCH_SIZE;
787 	bool sent_frame = false;
788 	struct xdp_desc desc;
789 	struct sk_buff *skb;
790 	int err = 0;
791 
792 	mutex_lock(&xs->mutex);
793 
794 	/* Since we dropped the RCU read lock, the socket state might have changed. */
795 	if (unlikely(!xsk_is_bound(xs))) {
796 		err = -ENXIO;
797 		goto out;
798 	}
799 
800 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
801 		goto out;
802 
803 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
804 		if (max_batch-- == 0) {
805 			err = -EAGAIN;
806 			goto out;
807 		}
808 
809 		/* This is the backpressure mechanism for the Tx path.
810 		 * Reserve space in the completion queue and only proceed
811 		 * if there is space in it. This avoids having to implement
812 		 * any buffering in the Tx path.
813 		 */
814 		if (xsk_cq_reserve_addr_locked(xs, desc.addr))
815 			goto out;
816 
817 		skb = xsk_build_skb(xs, &desc);
818 		if (IS_ERR(skb)) {
819 			err = PTR_ERR(skb);
820 			if (err != -EOVERFLOW)
821 				goto out;
822 			err = 0;
823 			continue;
824 		}
825 
826 		xskq_cons_release(xs->tx);
827 
828 		if (xp_mb_desc(&desc)) {
829 			xs->skb = skb;
830 			continue;
831 		}
832 
833 		err = __dev_direct_xmit(skb, xs->queue_id);
834 		if  (err == NETDEV_TX_BUSY) {
835 			/* Tell user-space to retry the send */
836 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
837 			xsk_consume_skb(skb);
838 			err = -EAGAIN;
839 			goto out;
840 		}
841 
842 		/* Ignore NET_XMIT_CN as packet might have been sent */
843 		if (err == NET_XMIT_DROP) {
844 			/* SKB completed but not sent */
845 			err = -EBUSY;
846 			xs->skb = NULL;
847 			goto out;
848 		}
849 
850 		sent_frame = true;
851 		xs->skb = NULL;
852 	}
853 
854 	if (xskq_has_descs(xs->tx)) {
855 		if (xs->skb)
856 			xsk_drop_skb(xs->skb);
857 		xskq_cons_release(xs->tx);
858 	}
859 
860 out:
861 	if (sent_frame)
862 		if (xsk_tx_writeable(xs))
863 			sk->sk_write_space(sk);
864 
865 	mutex_unlock(&xs->mutex);
866 	return err;
867 }
868 
869 static int xsk_generic_xmit(struct sock *sk)
870 {
871 	int ret;
872 
873 	/* Drop the RCU lock since the SKB path might sleep. */
874 	rcu_read_unlock();
875 	ret = __xsk_generic_xmit(sk);
876 	/* Reaquire RCU lock before going into common code. */
877 	rcu_read_lock();
878 
879 	return ret;
880 }
881 
882 static bool xsk_no_wakeup(struct sock *sk)
883 {
884 #ifdef CONFIG_NET_RX_BUSY_POLL
885 	/* Prefer busy-polling, skip the wakeup. */
886 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
887 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
888 #else
889 	return false;
890 #endif
891 }
892 
893 static int xsk_check_common(struct xdp_sock *xs)
894 {
895 	if (unlikely(!xsk_is_bound(xs)))
896 		return -ENXIO;
897 	if (unlikely(!(xs->dev->flags & IFF_UP)))
898 		return -ENETDOWN;
899 
900 	return 0;
901 }
902 
903 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
904 {
905 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
906 	struct sock *sk = sock->sk;
907 	struct xdp_sock *xs = xdp_sk(sk);
908 	struct xsk_buff_pool *pool;
909 	int err;
910 
911 	err = xsk_check_common(xs);
912 	if (err)
913 		return err;
914 	if (unlikely(need_wait))
915 		return -EOPNOTSUPP;
916 	if (unlikely(!xs->tx))
917 		return -ENOBUFS;
918 
919 	if (sk_can_busy_loop(sk)) {
920 		if (xs->zc)
921 			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
922 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
923 	}
924 
925 	if (xs->zc && xsk_no_wakeup(sk))
926 		return 0;
927 
928 	pool = xs->pool;
929 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
930 		if (xs->zc)
931 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
932 		return xsk_generic_xmit(sk);
933 	}
934 	return 0;
935 }
936 
937 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
938 {
939 	int ret;
940 
941 	rcu_read_lock();
942 	ret = __xsk_sendmsg(sock, m, total_len);
943 	rcu_read_unlock();
944 
945 	return ret;
946 }
947 
948 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
949 {
950 	bool need_wait = !(flags & MSG_DONTWAIT);
951 	struct sock *sk = sock->sk;
952 	struct xdp_sock *xs = xdp_sk(sk);
953 	int err;
954 
955 	err = xsk_check_common(xs);
956 	if (err)
957 		return err;
958 	if (unlikely(!xs->rx))
959 		return -ENOBUFS;
960 	if (unlikely(need_wait))
961 		return -EOPNOTSUPP;
962 
963 	if (sk_can_busy_loop(sk))
964 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
965 
966 	if (xsk_no_wakeup(sk))
967 		return 0;
968 
969 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
970 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
971 	return 0;
972 }
973 
974 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
975 {
976 	int ret;
977 
978 	rcu_read_lock();
979 	ret = __xsk_recvmsg(sock, m, len, flags);
980 	rcu_read_unlock();
981 
982 	return ret;
983 }
984 
985 static __poll_t xsk_poll(struct file *file, struct socket *sock,
986 			     struct poll_table_struct *wait)
987 {
988 	__poll_t mask = 0;
989 	struct sock *sk = sock->sk;
990 	struct xdp_sock *xs = xdp_sk(sk);
991 	struct xsk_buff_pool *pool;
992 
993 	sock_poll_wait(file, sock, wait);
994 
995 	rcu_read_lock();
996 	if (xsk_check_common(xs))
997 		goto out;
998 
999 	pool = xs->pool;
1000 
1001 	if (pool->cached_need_wakeup) {
1002 		if (xs->zc)
1003 			xsk_wakeup(xs, pool->cached_need_wakeup);
1004 		else if (xs->tx)
1005 			/* Poll needs to drive Tx also in copy mode */
1006 			xsk_generic_xmit(sk);
1007 	}
1008 
1009 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
1010 		mask |= EPOLLIN | EPOLLRDNORM;
1011 	if (xs->tx && xsk_tx_writeable(xs))
1012 		mask |= EPOLLOUT | EPOLLWRNORM;
1013 out:
1014 	rcu_read_unlock();
1015 	return mask;
1016 }
1017 
1018 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1019 			  bool umem_queue)
1020 {
1021 	struct xsk_queue *q;
1022 
1023 	if (entries == 0 || *queue || !is_power_of_2(entries))
1024 		return -EINVAL;
1025 
1026 	q = xskq_create(entries, umem_queue);
1027 	if (!q)
1028 		return -ENOMEM;
1029 
1030 	/* Make sure queue is ready before it can be seen by others */
1031 	smp_wmb();
1032 	WRITE_ONCE(*queue, q);
1033 	return 0;
1034 }
1035 
1036 static void xsk_unbind_dev(struct xdp_sock *xs)
1037 {
1038 	struct net_device *dev = xs->dev;
1039 
1040 	if (xs->state != XSK_BOUND)
1041 		return;
1042 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1043 
1044 	/* Wait for driver to stop using the xdp socket. */
1045 	xp_del_xsk(xs->pool, xs);
1046 	synchronize_net();
1047 	dev_put(dev);
1048 }
1049 
1050 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1051 					      struct xdp_sock __rcu ***map_entry)
1052 {
1053 	struct xsk_map *map = NULL;
1054 	struct xsk_map_node *node;
1055 
1056 	*map_entry = NULL;
1057 
1058 	spin_lock_bh(&xs->map_list_lock);
1059 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1060 					node);
1061 	if (node) {
1062 		bpf_map_inc(&node->map->map);
1063 		map = node->map;
1064 		*map_entry = node->map_entry;
1065 	}
1066 	spin_unlock_bh(&xs->map_list_lock);
1067 	return map;
1068 }
1069 
1070 static void xsk_delete_from_maps(struct xdp_sock *xs)
1071 {
1072 	/* This function removes the current XDP socket from all the
1073 	 * maps it resides in. We need to take extra care here, due to
1074 	 * the two locks involved. Each map has a lock synchronizing
1075 	 * updates to the entries, and each socket has a lock that
1076 	 * synchronizes access to the list of maps (map_list). For
1077 	 * deadlock avoidance the locks need to be taken in the order
1078 	 * "map lock"->"socket map list lock". We start off by
1079 	 * accessing the socket map list, and take a reference to the
1080 	 * map to guarantee existence between the
1081 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1082 	 * calls. Then we ask the map to remove the socket, which
1083 	 * tries to remove the socket from the map. Note that there
1084 	 * might be updates to the map between
1085 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1086 	 */
1087 	struct xdp_sock __rcu **map_entry = NULL;
1088 	struct xsk_map *map;
1089 
1090 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1091 		xsk_map_try_sock_delete(map, xs, map_entry);
1092 		bpf_map_put(&map->map);
1093 	}
1094 }
1095 
1096 static int xsk_release(struct socket *sock)
1097 {
1098 	struct sock *sk = sock->sk;
1099 	struct xdp_sock *xs = xdp_sk(sk);
1100 	struct net *net;
1101 
1102 	if (!sk)
1103 		return 0;
1104 
1105 	net = sock_net(sk);
1106 
1107 	if (xs->skb)
1108 		xsk_drop_skb(xs->skb);
1109 
1110 	mutex_lock(&net->xdp.lock);
1111 	sk_del_node_init_rcu(sk);
1112 	mutex_unlock(&net->xdp.lock);
1113 
1114 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1115 
1116 	xsk_delete_from_maps(xs);
1117 	mutex_lock(&xs->mutex);
1118 	xsk_unbind_dev(xs);
1119 	mutex_unlock(&xs->mutex);
1120 
1121 	xskq_destroy(xs->rx);
1122 	xskq_destroy(xs->tx);
1123 	xskq_destroy(xs->fq_tmp);
1124 	xskq_destroy(xs->cq_tmp);
1125 
1126 	sock_orphan(sk);
1127 	sock->sk = NULL;
1128 
1129 	sock_put(sk);
1130 
1131 	return 0;
1132 }
1133 
1134 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1135 {
1136 	struct socket *sock;
1137 	int err;
1138 
1139 	sock = sockfd_lookup(fd, &err);
1140 	if (!sock)
1141 		return ERR_PTR(-ENOTSOCK);
1142 
1143 	if (sock->sk->sk_family != PF_XDP) {
1144 		sockfd_put(sock);
1145 		return ERR_PTR(-ENOPROTOOPT);
1146 	}
1147 
1148 	return sock;
1149 }
1150 
1151 static bool xsk_validate_queues(struct xdp_sock *xs)
1152 {
1153 	return xs->fq_tmp && xs->cq_tmp;
1154 }
1155 
1156 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1157 {
1158 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1159 	struct sock *sk = sock->sk;
1160 	struct xdp_sock *xs = xdp_sk(sk);
1161 	struct net_device *dev;
1162 	int bound_dev_if;
1163 	u32 flags, qid;
1164 	int err = 0;
1165 
1166 	if (addr_len < sizeof(struct sockaddr_xdp))
1167 		return -EINVAL;
1168 	if (sxdp->sxdp_family != AF_XDP)
1169 		return -EINVAL;
1170 
1171 	flags = sxdp->sxdp_flags;
1172 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1173 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1174 		return -EINVAL;
1175 
1176 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1177 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1178 		return -EINVAL;
1179 
1180 	rtnl_lock();
1181 	mutex_lock(&xs->mutex);
1182 	if (xs->state != XSK_READY) {
1183 		err = -EBUSY;
1184 		goto out_release;
1185 	}
1186 
1187 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1188 	if (!dev) {
1189 		err = -ENODEV;
1190 		goto out_release;
1191 	}
1192 
1193 	if (!xs->rx && !xs->tx) {
1194 		err = -EINVAL;
1195 		goto out_unlock;
1196 	}
1197 
1198 	qid = sxdp->sxdp_queue_id;
1199 
1200 	if (flags & XDP_SHARED_UMEM) {
1201 		struct xdp_sock *umem_xs;
1202 		struct socket *sock;
1203 
1204 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1205 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1206 			/* Cannot specify flags for shared sockets. */
1207 			err = -EINVAL;
1208 			goto out_unlock;
1209 		}
1210 
1211 		if (xs->umem) {
1212 			/* We have already our own. */
1213 			err = -EINVAL;
1214 			goto out_unlock;
1215 		}
1216 
1217 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1218 		if (IS_ERR(sock)) {
1219 			err = PTR_ERR(sock);
1220 			goto out_unlock;
1221 		}
1222 
1223 		umem_xs = xdp_sk(sock->sk);
1224 		if (!xsk_is_bound(umem_xs)) {
1225 			err = -EBADF;
1226 			sockfd_put(sock);
1227 			goto out_unlock;
1228 		}
1229 
1230 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1231 			/* Share the umem with another socket on another qid
1232 			 * and/or device.
1233 			 */
1234 			xs->pool = xp_create_and_assign_umem(xs,
1235 							     umem_xs->umem);
1236 			if (!xs->pool) {
1237 				err = -ENOMEM;
1238 				sockfd_put(sock);
1239 				goto out_unlock;
1240 			}
1241 
1242 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1243 						   qid);
1244 			if (err) {
1245 				xp_destroy(xs->pool);
1246 				xs->pool = NULL;
1247 				sockfd_put(sock);
1248 				goto out_unlock;
1249 			}
1250 		} else {
1251 			/* Share the buffer pool with the other socket. */
1252 			if (xs->fq_tmp || xs->cq_tmp) {
1253 				/* Do not allow setting your own fq or cq. */
1254 				err = -EINVAL;
1255 				sockfd_put(sock);
1256 				goto out_unlock;
1257 			}
1258 
1259 			xp_get_pool(umem_xs->pool);
1260 			xs->pool = umem_xs->pool;
1261 
1262 			/* If underlying shared umem was created without Tx
1263 			 * ring, allocate Tx descs array that Tx batching API
1264 			 * utilizes
1265 			 */
1266 			if (xs->tx && !xs->pool->tx_descs) {
1267 				err = xp_alloc_tx_descs(xs->pool, xs);
1268 				if (err) {
1269 					xp_put_pool(xs->pool);
1270 					xs->pool = NULL;
1271 					sockfd_put(sock);
1272 					goto out_unlock;
1273 				}
1274 			}
1275 		}
1276 
1277 		xdp_get_umem(umem_xs->umem);
1278 		WRITE_ONCE(xs->umem, umem_xs->umem);
1279 		sockfd_put(sock);
1280 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1281 		err = -EINVAL;
1282 		goto out_unlock;
1283 	} else {
1284 		/* This xsk has its own umem. */
1285 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1286 		if (!xs->pool) {
1287 			err = -ENOMEM;
1288 			goto out_unlock;
1289 		}
1290 
1291 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1292 		if (err) {
1293 			xp_destroy(xs->pool);
1294 			xs->pool = NULL;
1295 			goto out_unlock;
1296 		}
1297 	}
1298 
1299 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1300 	xs->fq_tmp = NULL;
1301 	xs->cq_tmp = NULL;
1302 
1303 	xs->dev = dev;
1304 	xs->zc = xs->umem->zc;
1305 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1306 	xs->queue_id = qid;
1307 	xp_add_xsk(xs->pool, xs);
1308 
1309 out_unlock:
1310 	if (err) {
1311 		dev_put(dev);
1312 	} else {
1313 		/* Matches smp_rmb() in bind() for shared umem
1314 		 * sockets, and xsk_is_bound().
1315 		 */
1316 		smp_wmb();
1317 		WRITE_ONCE(xs->state, XSK_BOUND);
1318 	}
1319 out_release:
1320 	mutex_unlock(&xs->mutex);
1321 	rtnl_unlock();
1322 	return err;
1323 }
1324 
1325 struct xdp_umem_reg_v1 {
1326 	__u64 addr; /* Start of packet data area */
1327 	__u64 len; /* Length of packet data area */
1328 	__u32 chunk_size;
1329 	__u32 headroom;
1330 };
1331 
1332 struct xdp_umem_reg_v2 {
1333 	__u64 addr; /* Start of packet data area */
1334 	__u64 len; /* Length of packet data area */
1335 	__u32 chunk_size;
1336 	__u32 headroom;
1337 	__u32 flags;
1338 };
1339 
1340 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1341 			  sockptr_t optval, unsigned int optlen)
1342 {
1343 	struct sock *sk = sock->sk;
1344 	struct xdp_sock *xs = xdp_sk(sk);
1345 	int err;
1346 
1347 	if (level != SOL_XDP)
1348 		return -ENOPROTOOPT;
1349 
1350 	switch (optname) {
1351 	case XDP_RX_RING:
1352 	case XDP_TX_RING:
1353 	{
1354 		struct xsk_queue **q;
1355 		int entries;
1356 
1357 		if (optlen < sizeof(entries))
1358 			return -EINVAL;
1359 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1360 			return -EFAULT;
1361 
1362 		mutex_lock(&xs->mutex);
1363 		if (xs->state != XSK_READY) {
1364 			mutex_unlock(&xs->mutex);
1365 			return -EBUSY;
1366 		}
1367 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1368 		err = xsk_init_queue(entries, q, false);
1369 		if (!err && optname == XDP_TX_RING)
1370 			/* Tx needs to be explicitly woken up the first time */
1371 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1372 		mutex_unlock(&xs->mutex);
1373 		return err;
1374 	}
1375 	case XDP_UMEM_REG:
1376 	{
1377 		size_t mr_size = sizeof(struct xdp_umem_reg);
1378 		struct xdp_umem_reg mr = {};
1379 		struct xdp_umem *umem;
1380 
1381 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1382 			return -EINVAL;
1383 		else if (optlen < sizeof(struct xdp_umem_reg_v2))
1384 			mr_size = sizeof(struct xdp_umem_reg_v1);
1385 		else if (optlen < sizeof(mr))
1386 			mr_size = sizeof(struct xdp_umem_reg_v2);
1387 
1388 		if (copy_from_sockptr(&mr, optval, mr_size))
1389 			return -EFAULT;
1390 
1391 		mutex_lock(&xs->mutex);
1392 		if (xs->state != XSK_READY || xs->umem) {
1393 			mutex_unlock(&xs->mutex);
1394 			return -EBUSY;
1395 		}
1396 
1397 		umem = xdp_umem_create(&mr);
1398 		if (IS_ERR(umem)) {
1399 			mutex_unlock(&xs->mutex);
1400 			return PTR_ERR(umem);
1401 		}
1402 
1403 		/* Make sure umem is ready before it can be seen by others */
1404 		smp_wmb();
1405 		WRITE_ONCE(xs->umem, umem);
1406 		mutex_unlock(&xs->mutex);
1407 		return 0;
1408 	}
1409 	case XDP_UMEM_FILL_RING:
1410 	case XDP_UMEM_COMPLETION_RING:
1411 	{
1412 		struct xsk_queue **q;
1413 		int entries;
1414 
1415 		if (optlen < sizeof(entries))
1416 			return -EINVAL;
1417 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1418 			return -EFAULT;
1419 
1420 		mutex_lock(&xs->mutex);
1421 		if (xs->state != XSK_READY) {
1422 			mutex_unlock(&xs->mutex);
1423 			return -EBUSY;
1424 		}
1425 
1426 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1427 			&xs->cq_tmp;
1428 		err = xsk_init_queue(entries, q, true);
1429 		mutex_unlock(&xs->mutex);
1430 		return err;
1431 	}
1432 	default:
1433 		break;
1434 	}
1435 
1436 	return -ENOPROTOOPT;
1437 }
1438 
1439 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1440 {
1441 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1442 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1443 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1444 }
1445 
1446 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1447 {
1448 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1449 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1450 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1451 }
1452 
1453 struct xdp_statistics_v1 {
1454 	__u64 rx_dropped;
1455 	__u64 rx_invalid_descs;
1456 	__u64 tx_invalid_descs;
1457 };
1458 
1459 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1460 			  char __user *optval, int __user *optlen)
1461 {
1462 	struct sock *sk = sock->sk;
1463 	struct xdp_sock *xs = xdp_sk(sk);
1464 	int len;
1465 
1466 	if (level != SOL_XDP)
1467 		return -ENOPROTOOPT;
1468 
1469 	if (get_user(len, optlen))
1470 		return -EFAULT;
1471 	if (len < 0)
1472 		return -EINVAL;
1473 
1474 	switch (optname) {
1475 	case XDP_STATISTICS:
1476 	{
1477 		struct xdp_statistics stats = {};
1478 		bool extra_stats = true;
1479 		size_t stats_size;
1480 
1481 		if (len < sizeof(struct xdp_statistics_v1)) {
1482 			return -EINVAL;
1483 		} else if (len < sizeof(stats)) {
1484 			extra_stats = false;
1485 			stats_size = sizeof(struct xdp_statistics_v1);
1486 		} else {
1487 			stats_size = sizeof(stats);
1488 		}
1489 
1490 		mutex_lock(&xs->mutex);
1491 		stats.rx_dropped = xs->rx_dropped;
1492 		if (extra_stats) {
1493 			stats.rx_ring_full = xs->rx_queue_full;
1494 			stats.rx_fill_ring_empty_descs =
1495 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1496 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1497 		} else {
1498 			stats.rx_dropped += xs->rx_queue_full;
1499 		}
1500 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1501 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1502 		mutex_unlock(&xs->mutex);
1503 
1504 		if (copy_to_user(optval, &stats, stats_size))
1505 			return -EFAULT;
1506 		if (put_user(stats_size, optlen))
1507 			return -EFAULT;
1508 
1509 		return 0;
1510 	}
1511 	case XDP_MMAP_OFFSETS:
1512 	{
1513 		struct xdp_mmap_offsets off;
1514 		struct xdp_mmap_offsets_v1 off_v1;
1515 		bool flags_supported = true;
1516 		void *to_copy;
1517 
1518 		if (len < sizeof(off_v1))
1519 			return -EINVAL;
1520 		else if (len < sizeof(off))
1521 			flags_supported = false;
1522 
1523 		if (flags_supported) {
1524 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1525 			 * except for the flags field added to the end.
1526 			 */
1527 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1528 					       &off.rx);
1529 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1530 					       &off.tx);
1531 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1532 					       &off.fr);
1533 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1534 					       &off.cr);
1535 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1536 						ptrs.flags);
1537 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1538 						ptrs.flags);
1539 			off.fr.flags = offsetof(struct xdp_umem_ring,
1540 						ptrs.flags);
1541 			off.cr.flags = offsetof(struct xdp_umem_ring,
1542 						ptrs.flags);
1543 
1544 			len = sizeof(off);
1545 			to_copy = &off;
1546 		} else {
1547 			xsk_enter_rxtx_offsets(&off_v1.rx);
1548 			xsk_enter_rxtx_offsets(&off_v1.tx);
1549 			xsk_enter_umem_offsets(&off_v1.fr);
1550 			xsk_enter_umem_offsets(&off_v1.cr);
1551 
1552 			len = sizeof(off_v1);
1553 			to_copy = &off_v1;
1554 		}
1555 
1556 		if (copy_to_user(optval, to_copy, len))
1557 			return -EFAULT;
1558 		if (put_user(len, optlen))
1559 			return -EFAULT;
1560 
1561 		return 0;
1562 	}
1563 	case XDP_OPTIONS:
1564 	{
1565 		struct xdp_options opts = {};
1566 
1567 		if (len < sizeof(opts))
1568 			return -EINVAL;
1569 
1570 		mutex_lock(&xs->mutex);
1571 		if (xs->zc)
1572 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1573 		mutex_unlock(&xs->mutex);
1574 
1575 		len = sizeof(opts);
1576 		if (copy_to_user(optval, &opts, len))
1577 			return -EFAULT;
1578 		if (put_user(len, optlen))
1579 			return -EFAULT;
1580 
1581 		return 0;
1582 	}
1583 	default:
1584 		break;
1585 	}
1586 
1587 	return -EOPNOTSUPP;
1588 }
1589 
1590 static int xsk_mmap(struct file *file, struct socket *sock,
1591 		    struct vm_area_struct *vma)
1592 {
1593 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1594 	unsigned long size = vma->vm_end - vma->vm_start;
1595 	struct xdp_sock *xs = xdp_sk(sock->sk);
1596 	int state = READ_ONCE(xs->state);
1597 	struct xsk_queue *q = NULL;
1598 
1599 	if (state != XSK_READY && state != XSK_BOUND)
1600 		return -EBUSY;
1601 
1602 	if (offset == XDP_PGOFF_RX_RING) {
1603 		q = READ_ONCE(xs->rx);
1604 	} else if (offset == XDP_PGOFF_TX_RING) {
1605 		q = READ_ONCE(xs->tx);
1606 	} else {
1607 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1608 		smp_rmb();
1609 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1610 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1611 						 READ_ONCE(xs->pool->fq);
1612 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1613 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1614 						 READ_ONCE(xs->pool->cq);
1615 	}
1616 
1617 	if (!q)
1618 		return -EINVAL;
1619 
1620 	/* Matches the smp_wmb() in xsk_init_queue */
1621 	smp_rmb();
1622 	if (size > q->ring_vmalloc_size)
1623 		return -EINVAL;
1624 
1625 	return remap_vmalloc_range(vma, q->ring, 0);
1626 }
1627 
1628 static int xsk_notifier(struct notifier_block *this,
1629 			unsigned long msg, void *ptr)
1630 {
1631 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1632 	struct net *net = dev_net(dev);
1633 	struct sock *sk;
1634 
1635 	switch (msg) {
1636 	case NETDEV_UNREGISTER:
1637 		mutex_lock(&net->xdp.lock);
1638 		sk_for_each(sk, &net->xdp.list) {
1639 			struct xdp_sock *xs = xdp_sk(sk);
1640 
1641 			mutex_lock(&xs->mutex);
1642 			if (xs->dev == dev) {
1643 				sk->sk_err = ENETDOWN;
1644 				if (!sock_flag(sk, SOCK_DEAD))
1645 					sk_error_report(sk);
1646 
1647 				xsk_unbind_dev(xs);
1648 
1649 				/* Clear device references. */
1650 				xp_clear_dev(xs->pool);
1651 			}
1652 			mutex_unlock(&xs->mutex);
1653 		}
1654 		mutex_unlock(&net->xdp.lock);
1655 		break;
1656 	}
1657 	return NOTIFY_DONE;
1658 }
1659 
1660 static struct proto xsk_proto = {
1661 	.name =		"XDP",
1662 	.owner =	THIS_MODULE,
1663 	.obj_size =	sizeof(struct xdp_sock),
1664 };
1665 
1666 static const struct proto_ops xsk_proto_ops = {
1667 	.family		= PF_XDP,
1668 	.owner		= THIS_MODULE,
1669 	.release	= xsk_release,
1670 	.bind		= xsk_bind,
1671 	.connect	= sock_no_connect,
1672 	.socketpair	= sock_no_socketpair,
1673 	.accept		= sock_no_accept,
1674 	.getname	= sock_no_getname,
1675 	.poll		= xsk_poll,
1676 	.ioctl		= sock_no_ioctl,
1677 	.listen		= sock_no_listen,
1678 	.shutdown	= sock_no_shutdown,
1679 	.setsockopt	= xsk_setsockopt,
1680 	.getsockopt	= xsk_getsockopt,
1681 	.sendmsg	= xsk_sendmsg,
1682 	.recvmsg	= xsk_recvmsg,
1683 	.mmap		= xsk_mmap,
1684 };
1685 
1686 static void xsk_destruct(struct sock *sk)
1687 {
1688 	struct xdp_sock *xs = xdp_sk(sk);
1689 
1690 	if (!sock_flag(sk, SOCK_DEAD))
1691 		return;
1692 
1693 	if (!xp_put_pool(xs->pool))
1694 		xdp_put_umem(xs->umem, !xs->pool);
1695 }
1696 
1697 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1698 		      int kern)
1699 {
1700 	struct xdp_sock *xs;
1701 	struct sock *sk;
1702 
1703 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1704 		return -EPERM;
1705 	if (sock->type != SOCK_RAW)
1706 		return -ESOCKTNOSUPPORT;
1707 
1708 	if (protocol)
1709 		return -EPROTONOSUPPORT;
1710 
1711 	sock->state = SS_UNCONNECTED;
1712 
1713 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1714 	if (!sk)
1715 		return -ENOBUFS;
1716 
1717 	sock->ops = &xsk_proto_ops;
1718 
1719 	sock_init_data(sock, sk);
1720 
1721 	sk->sk_family = PF_XDP;
1722 
1723 	sk->sk_destruct = xsk_destruct;
1724 
1725 	sock_set_flag(sk, SOCK_RCU_FREE);
1726 
1727 	xs = xdp_sk(sk);
1728 	xs->state = XSK_READY;
1729 	mutex_init(&xs->mutex);
1730 	spin_lock_init(&xs->rx_lock);
1731 
1732 	INIT_LIST_HEAD(&xs->map_list);
1733 	spin_lock_init(&xs->map_list_lock);
1734 
1735 	mutex_lock(&net->xdp.lock);
1736 	sk_add_node_rcu(sk, &net->xdp.list);
1737 	mutex_unlock(&net->xdp.lock);
1738 
1739 	sock_prot_inuse_add(net, &xsk_proto, 1);
1740 
1741 	return 0;
1742 }
1743 
1744 static const struct net_proto_family xsk_family_ops = {
1745 	.family = PF_XDP,
1746 	.create = xsk_create,
1747 	.owner	= THIS_MODULE,
1748 };
1749 
1750 static struct notifier_block xsk_netdev_notifier = {
1751 	.notifier_call	= xsk_notifier,
1752 };
1753 
1754 static int __net_init xsk_net_init(struct net *net)
1755 {
1756 	mutex_init(&net->xdp.lock);
1757 	INIT_HLIST_HEAD(&net->xdp.list);
1758 	return 0;
1759 }
1760 
1761 static void __net_exit xsk_net_exit(struct net *net)
1762 {
1763 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1764 }
1765 
1766 static struct pernet_operations xsk_net_ops = {
1767 	.init = xsk_net_init,
1768 	.exit = xsk_net_exit,
1769 };
1770 
1771 static int __init xsk_init(void)
1772 {
1773 	int err;
1774 
1775 	err = proto_register(&xsk_proto, 0 /* no slab */);
1776 	if (err)
1777 		goto out;
1778 
1779 	err = sock_register(&xsk_family_ops);
1780 	if (err)
1781 		goto out_proto;
1782 
1783 	err = register_pernet_subsys(&xsk_net_ops);
1784 	if (err)
1785 		goto out_sk;
1786 
1787 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1788 	if (err)
1789 		goto out_pernet;
1790 
1791 	return 0;
1792 
1793 out_pernet:
1794 	unregister_pernet_subsys(&xsk_net_ops);
1795 out_sk:
1796 	sock_unregister(PF_XDP);
1797 out_proto:
1798 	proto_unregister(&xsk_proto);
1799 out:
1800 	return err;
1801 }
1802 
1803 fs_initcall(xsk_init);
1804