xref: /linux/net/xdp/xsk.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30 
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34 
35 #define TX_BATCH_SIZE 32
36 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
37 
38 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
39 {
40 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
41 		return;
42 
43 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
44 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
45 }
46 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
47 
48 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
49 {
50 	struct xdp_sock *xs;
51 
52 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
53 		return;
54 
55 	rcu_read_lock();
56 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
57 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
58 	}
59 	rcu_read_unlock();
60 
61 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
62 }
63 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
64 
65 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
66 {
67 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
68 		return;
69 
70 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
71 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
72 }
73 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
74 
75 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
76 {
77 	struct xdp_sock *xs;
78 
79 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
80 		return;
81 
82 	rcu_read_lock();
83 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
84 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
85 	}
86 	rcu_read_unlock();
87 
88 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
89 }
90 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
91 
92 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
93 {
94 	return pool->uses_need_wakeup;
95 }
96 EXPORT_SYMBOL(xsk_uses_need_wakeup);
97 
98 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
99 					    u16 queue_id)
100 {
101 	if (queue_id < dev->real_num_rx_queues)
102 		return dev->_rx[queue_id].pool;
103 	if (queue_id < dev->real_num_tx_queues)
104 		return dev->_tx[queue_id].pool;
105 
106 	return NULL;
107 }
108 EXPORT_SYMBOL(xsk_get_pool_from_qid);
109 
110 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
111 {
112 	if (queue_id < dev->num_rx_queues)
113 		dev->_rx[queue_id].pool = NULL;
114 	if (queue_id < dev->num_tx_queues)
115 		dev->_tx[queue_id].pool = NULL;
116 }
117 
118 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
119  * not know if the device has more tx queues than rx, or the opposite.
120  * This might also change during run time.
121  */
122 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
123 			u16 queue_id)
124 {
125 	if (queue_id >= max_t(unsigned int,
126 			      dev->real_num_rx_queues,
127 			      dev->real_num_tx_queues))
128 		return -EINVAL;
129 
130 	if (queue_id < dev->real_num_rx_queues)
131 		dev->_rx[queue_id].pool = pool;
132 	if (queue_id < dev->real_num_tx_queues)
133 		dev->_tx[queue_id].pool = pool;
134 
135 	return 0;
136 }
137 
138 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
139 			u32 flags)
140 {
141 	u64 addr;
142 	int err;
143 
144 	addr = xp_get_handle(xskb, xskb->pool);
145 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
146 	if (err) {
147 		xs->rx_queue_full++;
148 		return err;
149 	}
150 
151 	xp_release(xskb);
152 	return 0;
153 }
154 
155 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
156 {
157 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
158 	u32 frags = xdp_buff_has_frags(xdp);
159 	struct xdp_buff_xsk *pos, *tmp;
160 	struct list_head *xskb_list;
161 	u32 contd = 0;
162 	int err;
163 
164 	if (frags)
165 		contd = XDP_PKT_CONTD;
166 
167 	err = __xsk_rcv_zc(xs, xskb, len, contd);
168 	if (err)
169 		goto err;
170 	if (likely(!frags))
171 		return 0;
172 
173 	xskb_list = &xskb->pool->xskb_list;
174 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
175 		if (list_is_singular(xskb_list))
176 			contd = 0;
177 		len = pos->xdp.data_end - pos->xdp.data;
178 		err = __xsk_rcv_zc(xs, pos, len, contd);
179 		if (err)
180 			goto err;
181 		list_del(&pos->list_node);
182 	}
183 
184 	return 0;
185 err:
186 	xsk_buff_free(xdp);
187 	return err;
188 }
189 
190 static void *xsk_copy_xdp_start(struct xdp_buff *from)
191 {
192 	if (unlikely(xdp_data_meta_unsupported(from)))
193 		return from->data;
194 	else
195 		return from->data_meta;
196 }
197 
198 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
199 			u32 *from_len, skb_frag_t **frag, u32 rem)
200 {
201 	u32 copied = 0;
202 
203 	while (1) {
204 		u32 copy_len = min_t(u32, *from_len, to_len);
205 
206 		memcpy(to, *from, copy_len);
207 		copied += copy_len;
208 		if (rem == copied)
209 			return copied;
210 
211 		if (*from_len == copy_len) {
212 			*from = skb_frag_address(*frag);
213 			*from_len = skb_frag_size((*frag)++);
214 		} else {
215 			*from += copy_len;
216 			*from_len -= copy_len;
217 		}
218 		if (to_len == copy_len)
219 			return copied;
220 
221 		to_len -= copy_len;
222 		to += copy_len;
223 	}
224 }
225 
226 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
227 {
228 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
229 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
230 	u32 from_len, meta_len, rem, num_desc;
231 	struct xdp_buff_xsk *xskb;
232 	struct xdp_buff *xsk_xdp;
233 	skb_frag_t *frag;
234 
235 	from_len = xdp->data_end - copy_from;
236 	meta_len = xdp->data - copy_from;
237 	rem = len + meta_len;
238 
239 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
240 		int err;
241 
242 		xsk_xdp = xsk_buff_alloc(xs->pool);
243 		if (!xsk_xdp) {
244 			xs->rx_dropped++;
245 			return -ENOMEM;
246 		}
247 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
248 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
249 		err = __xsk_rcv_zc(xs, xskb, len, 0);
250 		if (err) {
251 			xsk_buff_free(xsk_xdp);
252 			return err;
253 		}
254 
255 		return 0;
256 	}
257 
258 	num_desc = (len - 1) / frame_size + 1;
259 
260 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
261 		xs->rx_dropped++;
262 		return -ENOMEM;
263 	}
264 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
265 		xs->rx_queue_full++;
266 		return -ENOBUFS;
267 	}
268 
269 	if (xdp_buff_has_frags(xdp)) {
270 		struct skb_shared_info *sinfo;
271 
272 		sinfo = xdp_get_shared_info_from_buff(xdp);
273 		frag =  &sinfo->frags[0];
274 	}
275 
276 	do {
277 		u32 to_len = frame_size + meta_len;
278 		u32 copied;
279 
280 		xsk_xdp = xsk_buff_alloc(xs->pool);
281 		copy_to = xsk_xdp->data - meta_len;
282 
283 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
284 		rem -= copied;
285 
286 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
287 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
288 		meta_len = 0;
289 	} while (rem);
290 
291 	return 0;
292 }
293 
294 static bool xsk_tx_writeable(struct xdp_sock *xs)
295 {
296 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
297 		return false;
298 
299 	return true;
300 }
301 
302 static bool xsk_is_bound(struct xdp_sock *xs)
303 {
304 	if (READ_ONCE(xs->state) == XSK_BOUND) {
305 		/* Matches smp_wmb() in bind(). */
306 		smp_rmb();
307 		return true;
308 	}
309 	return false;
310 }
311 
312 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
313 {
314 	if (!xsk_is_bound(xs))
315 		return -ENXIO;
316 
317 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
318 		return -EINVAL;
319 
320 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
321 		xs->rx_dropped++;
322 		return -ENOSPC;
323 	}
324 
325 	return 0;
326 }
327 
328 static void xsk_flush(struct xdp_sock *xs)
329 {
330 	xskq_prod_submit(xs->rx);
331 	__xskq_cons_release(xs->pool->fq);
332 	sock_def_readable(&xs->sk);
333 }
334 
335 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
336 {
337 	u32 len = xdp_get_buff_len(xdp);
338 	int err;
339 
340 	spin_lock_bh(&xs->rx_lock);
341 	err = xsk_rcv_check(xs, xdp, len);
342 	if (!err) {
343 		err = __xsk_rcv(xs, xdp, len);
344 		xsk_flush(xs);
345 	}
346 	spin_unlock_bh(&xs->rx_lock);
347 	return err;
348 }
349 
350 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
351 {
352 	u32 len = xdp_get_buff_len(xdp);
353 	int err;
354 
355 	err = xsk_rcv_check(xs, xdp, len);
356 	if (err)
357 		return err;
358 
359 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
360 		len = xdp->data_end - xdp->data;
361 		return xsk_rcv_zc(xs, xdp, len);
362 	}
363 
364 	err = __xsk_rcv(xs, xdp, len);
365 	if (!err)
366 		xdp_return_buff(xdp);
367 	return err;
368 }
369 
370 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
371 {
372 	int err;
373 
374 	err = xsk_rcv(xs, xdp);
375 	if (err)
376 		return err;
377 
378 	if (!xs->flush_node.prev) {
379 		struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
380 
381 		list_add(&xs->flush_node, flush_list);
382 	}
383 
384 	return 0;
385 }
386 
387 void __xsk_map_flush(struct list_head *flush_list)
388 {
389 	struct xdp_sock *xs, *tmp;
390 
391 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
392 		xsk_flush(xs);
393 		__list_del_clearprev(&xs->flush_node);
394 	}
395 }
396 
397 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
398 {
399 	xskq_prod_submit_n(pool->cq, nb_entries);
400 }
401 EXPORT_SYMBOL(xsk_tx_completed);
402 
403 void xsk_tx_release(struct xsk_buff_pool *pool)
404 {
405 	struct xdp_sock *xs;
406 
407 	rcu_read_lock();
408 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
409 		__xskq_cons_release(xs->tx);
410 		if (xsk_tx_writeable(xs))
411 			xs->sk.sk_write_space(&xs->sk);
412 	}
413 	rcu_read_unlock();
414 }
415 EXPORT_SYMBOL(xsk_tx_release);
416 
417 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
418 {
419 	bool budget_exhausted = false;
420 	struct xdp_sock *xs;
421 
422 	rcu_read_lock();
423 again:
424 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
425 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
426 			budget_exhausted = true;
427 			continue;
428 		}
429 
430 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
431 			if (xskq_has_descs(xs->tx))
432 				xskq_cons_release(xs->tx);
433 			continue;
434 		}
435 
436 		xs->tx_budget_spent++;
437 
438 		/* This is the backpressure mechanism for the Tx path.
439 		 * Reserve space in the completion queue and only proceed
440 		 * if there is space in it. This avoids having to implement
441 		 * any buffering in the Tx path.
442 		 */
443 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
444 			goto out;
445 
446 		xskq_cons_release(xs->tx);
447 		rcu_read_unlock();
448 		return true;
449 	}
450 
451 	if (budget_exhausted) {
452 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
453 			xs->tx_budget_spent = 0;
454 
455 		budget_exhausted = false;
456 		goto again;
457 	}
458 
459 out:
460 	rcu_read_unlock();
461 	return false;
462 }
463 EXPORT_SYMBOL(xsk_tx_peek_desc);
464 
465 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
466 {
467 	struct xdp_desc *descs = pool->tx_descs;
468 	u32 nb_pkts = 0;
469 
470 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
471 		nb_pkts++;
472 
473 	xsk_tx_release(pool);
474 	return nb_pkts;
475 }
476 
477 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
478 {
479 	struct xdp_sock *xs;
480 
481 	rcu_read_lock();
482 	if (!list_is_singular(&pool->xsk_tx_list)) {
483 		/* Fallback to the non-batched version */
484 		rcu_read_unlock();
485 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
486 	}
487 
488 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
489 	if (!xs) {
490 		nb_pkts = 0;
491 		goto out;
492 	}
493 
494 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
495 
496 	/* This is the backpressure mechanism for the Tx path. Try to
497 	 * reserve space in the completion queue for all packets, but
498 	 * if there are fewer slots available, just process that many
499 	 * packets. This avoids having to implement any buffering in
500 	 * the Tx path.
501 	 */
502 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
503 	if (!nb_pkts)
504 		goto out;
505 
506 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
507 	if (!nb_pkts) {
508 		xs->tx->queue_empty_descs++;
509 		goto out;
510 	}
511 
512 	__xskq_cons_release(xs->tx);
513 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
514 	xs->sk.sk_write_space(&xs->sk);
515 
516 out:
517 	rcu_read_unlock();
518 	return nb_pkts;
519 }
520 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
521 
522 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
523 {
524 	struct net_device *dev = xs->dev;
525 
526 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
527 }
528 
529 static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
530 {
531 	unsigned long flags;
532 	int ret;
533 
534 	spin_lock_irqsave(&pool->cq_lock, flags);
535 	ret = xskq_prod_reserve_addr(pool->cq, addr);
536 	spin_unlock_irqrestore(&pool->cq_lock, flags);
537 
538 	return ret;
539 }
540 
541 static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
542 {
543 	unsigned long flags;
544 
545 	spin_lock_irqsave(&pool->cq_lock, flags);
546 	xskq_prod_submit_n(pool->cq, n);
547 	spin_unlock_irqrestore(&pool->cq_lock, flags);
548 }
549 
550 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
551 {
552 	unsigned long flags;
553 
554 	spin_lock_irqsave(&pool->cq_lock, flags);
555 	xskq_prod_cancel_n(pool->cq, n);
556 	spin_unlock_irqrestore(&pool->cq_lock, flags);
557 }
558 
559 static u32 xsk_get_num_desc(struct sk_buff *skb)
560 {
561 	return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
562 }
563 
564 static void xsk_destruct_skb(struct sk_buff *skb)
565 {
566 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
567 
568 	if (compl->tx_timestamp) {
569 		/* sw completion timestamp, not a real one */
570 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
571 	}
572 
573 	xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
574 	sock_wfree(skb);
575 }
576 
577 static void xsk_set_destructor_arg(struct sk_buff *skb)
578 {
579 	long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
580 
581 	skb_shinfo(skb)->destructor_arg = (void *)num;
582 }
583 
584 static void xsk_consume_skb(struct sk_buff *skb)
585 {
586 	struct xdp_sock *xs = xdp_sk(skb->sk);
587 
588 	skb->destructor = sock_wfree;
589 	xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
590 	/* Free skb without triggering the perf drop trace */
591 	consume_skb(skb);
592 	xs->skb = NULL;
593 }
594 
595 static void xsk_drop_skb(struct sk_buff *skb)
596 {
597 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
598 	xsk_consume_skb(skb);
599 }
600 
601 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
602 					      struct xdp_desc *desc)
603 {
604 	struct xsk_buff_pool *pool = xs->pool;
605 	u32 hr, len, ts, offset, copy, copied;
606 	struct sk_buff *skb = xs->skb;
607 	struct page *page;
608 	void *buffer;
609 	int err, i;
610 	u64 addr;
611 
612 	if (!skb) {
613 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
614 
615 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
616 		if (unlikely(!skb))
617 			return ERR_PTR(err);
618 
619 		skb_reserve(skb, hr);
620 	}
621 
622 	addr = desc->addr;
623 	len = desc->len;
624 	ts = pool->unaligned ? len : pool->chunk_size;
625 
626 	buffer = xsk_buff_raw_get_data(pool, addr);
627 	offset = offset_in_page(buffer);
628 	addr = buffer - pool->addrs;
629 
630 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
631 		if (unlikely(i >= MAX_SKB_FRAGS))
632 			return ERR_PTR(-EOVERFLOW);
633 
634 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
635 		get_page(page);
636 
637 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
638 		skb_fill_page_desc(skb, i, page, offset, copy);
639 
640 		copied += copy;
641 		addr += copy;
642 		offset = 0;
643 	}
644 
645 	skb->len += len;
646 	skb->data_len += len;
647 	skb->truesize += ts;
648 
649 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
650 
651 	return skb;
652 }
653 
654 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
655 				     struct xdp_desc *desc)
656 {
657 	struct xsk_tx_metadata *meta = NULL;
658 	struct net_device *dev = xs->dev;
659 	struct sk_buff *skb = xs->skb;
660 	bool first_frag = false;
661 	int err;
662 
663 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
664 		skb = xsk_build_skb_zerocopy(xs, desc);
665 		if (IS_ERR(skb)) {
666 			err = PTR_ERR(skb);
667 			goto free_err;
668 		}
669 	} else {
670 		u32 hr, tr, len;
671 		void *buffer;
672 
673 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
674 		len = desc->len;
675 
676 		if (!skb) {
677 			first_frag = true;
678 
679 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
680 			tr = dev->needed_tailroom;
681 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
682 			if (unlikely(!skb))
683 				goto free_err;
684 
685 			skb_reserve(skb, hr);
686 			skb_put(skb, len);
687 
688 			err = skb_store_bits(skb, 0, buffer, len);
689 			if (unlikely(err))
690 				goto free_err;
691 		} else {
692 			int nr_frags = skb_shinfo(skb)->nr_frags;
693 			struct page *page;
694 			u8 *vaddr;
695 
696 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
697 				err = -EOVERFLOW;
698 				goto free_err;
699 			}
700 
701 			page = alloc_page(xs->sk.sk_allocation);
702 			if (unlikely(!page)) {
703 				err = -EAGAIN;
704 				goto free_err;
705 			}
706 
707 			vaddr = kmap_local_page(page);
708 			memcpy(vaddr, buffer, len);
709 			kunmap_local(vaddr);
710 
711 			skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
712 			refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
713 		}
714 
715 		if (first_frag && desc->options & XDP_TX_METADATA) {
716 			if (unlikely(xs->pool->tx_metadata_len == 0)) {
717 				err = -EINVAL;
718 				goto free_err;
719 			}
720 
721 			meta = buffer - xs->pool->tx_metadata_len;
722 			if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
723 				err = -EINVAL;
724 				goto free_err;
725 			}
726 
727 			if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
728 				if (unlikely(meta->request.csum_start +
729 					     meta->request.csum_offset +
730 					     sizeof(__sum16) > len)) {
731 					err = -EINVAL;
732 					goto free_err;
733 				}
734 
735 				skb->csum_start = hr + meta->request.csum_start;
736 				skb->csum_offset = meta->request.csum_offset;
737 				skb->ip_summed = CHECKSUM_PARTIAL;
738 
739 				if (unlikely(xs->pool->tx_sw_csum)) {
740 					err = skb_checksum_help(skb);
741 					if (err)
742 						goto free_err;
743 				}
744 			}
745 		}
746 	}
747 
748 	skb->dev = dev;
749 	skb->priority = READ_ONCE(xs->sk.sk_priority);
750 	skb->mark = READ_ONCE(xs->sk.sk_mark);
751 	skb->destructor = xsk_destruct_skb;
752 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
753 	xsk_set_destructor_arg(skb);
754 
755 	return skb;
756 
757 free_err:
758 	if (first_frag && skb)
759 		kfree_skb(skb);
760 
761 	if (err == -EOVERFLOW) {
762 		/* Drop the packet */
763 		xsk_set_destructor_arg(xs->skb);
764 		xsk_drop_skb(xs->skb);
765 		xskq_cons_release(xs->tx);
766 	} else {
767 		/* Let application retry */
768 		xsk_cq_cancel_locked(xs->pool, 1);
769 	}
770 
771 	return ERR_PTR(err);
772 }
773 
774 static int __xsk_generic_xmit(struct sock *sk)
775 {
776 	struct xdp_sock *xs = xdp_sk(sk);
777 	u32 max_batch = TX_BATCH_SIZE;
778 	bool sent_frame = false;
779 	struct xdp_desc desc;
780 	struct sk_buff *skb;
781 	int err = 0;
782 
783 	mutex_lock(&xs->mutex);
784 
785 	/* Since we dropped the RCU read lock, the socket state might have changed. */
786 	if (unlikely(!xsk_is_bound(xs))) {
787 		err = -ENXIO;
788 		goto out;
789 	}
790 
791 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
792 		goto out;
793 
794 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
795 		if (max_batch-- == 0) {
796 			err = -EAGAIN;
797 			goto out;
798 		}
799 
800 		/* This is the backpressure mechanism for the Tx path.
801 		 * Reserve space in the completion queue and only proceed
802 		 * if there is space in it. This avoids having to implement
803 		 * any buffering in the Tx path.
804 		 */
805 		if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
806 			goto out;
807 
808 		skb = xsk_build_skb(xs, &desc);
809 		if (IS_ERR(skb)) {
810 			err = PTR_ERR(skb);
811 			if (err != -EOVERFLOW)
812 				goto out;
813 			err = 0;
814 			continue;
815 		}
816 
817 		xskq_cons_release(xs->tx);
818 
819 		if (xp_mb_desc(&desc)) {
820 			xs->skb = skb;
821 			continue;
822 		}
823 
824 		err = __dev_direct_xmit(skb, xs->queue_id);
825 		if  (err == NETDEV_TX_BUSY) {
826 			/* Tell user-space to retry the send */
827 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
828 			xsk_consume_skb(skb);
829 			err = -EAGAIN;
830 			goto out;
831 		}
832 
833 		/* Ignore NET_XMIT_CN as packet might have been sent */
834 		if (err == NET_XMIT_DROP) {
835 			/* SKB completed but not sent */
836 			err = -EBUSY;
837 			xs->skb = NULL;
838 			goto out;
839 		}
840 
841 		sent_frame = true;
842 		xs->skb = NULL;
843 	}
844 
845 	if (xskq_has_descs(xs->tx)) {
846 		if (xs->skb)
847 			xsk_drop_skb(xs->skb);
848 		xskq_cons_release(xs->tx);
849 	}
850 
851 out:
852 	if (sent_frame)
853 		if (xsk_tx_writeable(xs))
854 			sk->sk_write_space(sk);
855 
856 	mutex_unlock(&xs->mutex);
857 	return err;
858 }
859 
860 static int xsk_generic_xmit(struct sock *sk)
861 {
862 	int ret;
863 
864 	/* Drop the RCU lock since the SKB path might sleep. */
865 	rcu_read_unlock();
866 	ret = __xsk_generic_xmit(sk);
867 	/* Reaquire RCU lock before going into common code. */
868 	rcu_read_lock();
869 
870 	return ret;
871 }
872 
873 static bool xsk_no_wakeup(struct sock *sk)
874 {
875 #ifdef CONFIG_NET_RX_BUSY_POLL
876 	/* Prefer busy-polling, skip the wakeup. */
877 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
878 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
879 #else
880 	return false;
881 #endif
882 }
883 
884 static int xsk_check_common(struct xdp_sock *xs)
885 {
886 	if (unlikely(!xsk_is_bound(xs)))
887 		return -ENXIO;
888 	if (unlikely(!(xs->dev->flags & IFF_UP)))
889 		return -ENETDOWN;
890 
891 	return 0;
892 }
893 
894 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
895 {
896 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
897 	struct sock *sk = sock->sk;
898 	struct xdp_sock *xs = xdp_sk(sk);
899 	struct xsk_buff_pool *pool;
900 	int err;
901 
902 	err = xsk_check_common(xs);
903 	if (err)
904 		return err;
905 	if (unlikely(need_wait))
906 		return -EOPNOTSUPP;
907 	if (unlikely(!xs->tx))
908 		return -ENOBUFS;
909 
910 	if (sk_can_busy_loop(sk))
911 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
912 
913 	if (xs->zc && xsk_no_wakeup(sk))
914 		return 0;
915 
916 	pool = xs->pool;
917 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
918 		if (xs->zc)
919 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
920 		return xsk_generic_xmit(sk);
921 	}
922 	return 0;
923 }
924 
925 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
926 {
927 	int ret;
928 
929 	rcu_read_lock();
930 	ret = __xsk_sendmsg(sock, m, total_len);
931 	rcu_read_unlock();
932 
933 	return ret;
934 }
935 
936 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
937 {
938 	bool need_wait = !(flags & MSG_DONTWAIT);
939 	struct sock *sk = sock->sk;
940 	struct xdp_sock *xs = xdp_sk(sk);
941 	int err;
942 
943 	err = xsk_check_common(xs);
944 	if (err)
945 		return err;
946 	if (unlikely(!xs->rx))
947 		return -ENOBUFS;
948 	if (unlikely(need_wait))
949 		return -EOPNOTSUPP;
950 
951 	if (sk_can_busy_loop(sk))
952 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
953 
954 	if (xsk_no_wakeup(sk))
955 		return 0;
956 
957 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
958 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
959 	return 0;
960 }
961 
962 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
963 {
964 	int ret;
965 
966 	rcu_read_lock();
967 	ret = __xsk_recvmsg(sock, m, len, flags);
968 	rcu_read_unlock();
969 
970 	return ret;
971 }
972 
973 static __poll_t xsk_poll(struct file *file, struct socket *sock,
974 			     struct poll_table_struct *wait)
975 {
976 	__poll_t mask = 0;
977 	struct sock *sk = sock->sk;
978 	struct xdp_sock *xs = xdp_sk(sk);
979 	struct xsk_buff_pool *pool;
980 
981 	sock_poll_wait(file, sock, wait);
982 
983 	rcu_read_lock();
984 	if (xsk_check_common(xs))
985 		goto out;
986 
987 	pool = xs->pool;
988 
989 	if (pool->cached_need_wakeup) {
990 		if (xs->zc)
991 			xsk_wakeup(xs, pool->cached_need_wakeup);
992 		else if (xs->tx)
993 			/* Poll needs to drive Tx also in copy mode */
994 			xsk_generic_xmit(sk);
995 	}
996 
997 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
998 		mask |= EPOLLIN | EPOLLRDNORM;
999 	if (xs->tx && xsk_tx_writeable(xs))
1000 		mask |= EPOLLOUT | EPOLLWRNORM;
1001 out:
1002 	rcu_read_unlock();
1003 	return mask;
1004 }
1005 
1006 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1007 			  bool umem_queue)
1008 {
1009 	struct xsk_queue *q;
1010 
1011 	if (entries == 0 || *queue || !is_power_of_2(entries))
1012 		return -EINVAL;
1013 
1014 	q = xskq_create(entries, umem_queue);
1015 	if (!q)
1016 		return -ENOMEM;
1017 
1018 	/* Make sure queue is ready before it can be seen by others */
1019 	smp_wmb();
1020 	WRITE_ONCE(*queue, q);
1021 	return 0;
1022 }
1023 
1024 static void xsk_unbind_dev(struct xdp_sock *xs)
1025 {
1026 	struct net_device *dev = xs->dev;
1027 
1028 	if (xs->state != XSK_BOUND)
1029 		return;
1030 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1031 
1032 	/* Wait for driver to stop using the xdp socket. */
1033 	xp_del_xsk(xs->pool, xs);
1034 	synchronize_net();
1035 	dev_put(dev);
1036 }
1037 
1038 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1039 					      struct xdp_sock __rcu ***map_entry)
1040 {
1041 	struct xsk_map *map = NULL;
1042 	struct xsk_map_node *node;
1043 
1044 	*map_entry = NULL;
1045 
1046 	spin_lock_bh(&xs->map_list_lock);
1047 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1048 					node);
1049 	if (node) {
1050 		bpf_map_inc(&node->map->map);
1051 		map = node->map;
1052 		*map_entry = node->map_entry;
1053 	}
1054 	spin_unlock_bh(&xs->map_list_lock);
1055 	return map;
1056 }
1057 
1058 static void xsk_delete_from_maps(struct xdp_sock *xs)
1059 {
1060 	/* This function removes the current XDP socket from all the
1061 	 * maps it resides in. We need to take extra care here, due to
1062 	 * the two locks involved. Each map has a lock synchronizing
1063 	 * updates to the entries, and each socket has a lock that
1064 	 * synchronizes access to the list of maps (map_list). For
1065 	 * deadlock avoidance the locks need to be taken in the order
1066 	 * "map lock"->"socket map list lock". We start off by
1067 	 * accessing the socket map list, and take a reference to the
1068 	 * map to guarantee existence between the
1069 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1070 	 * calls. Then we ask the map to remove the socket, which
1071 	 * tries to remove the socket from the map. Note that there
1072 	 * might be updates to the map between
1073 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1074 	 */
1075 	struct xdp_sock __rcu **map_entry = NULL;
1076 	struct xsk_map *map;
1077 
1078 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1079 		xsk_map_try_sock_delete(map, xs, map_entry);
1080 		bpf_map_put(&map->map);
1081 	}
1082 }
1083 
1084 static int xsk_release(struct socket *sock)
1085 {
1086 	struct sock *sk = sock->sk;
1087 	struct xdp_sock *xs = xdp_sk(sk);
1088 	struct net *net;
1089 
1090 	if (!sk)
1091 		return 0;
1092 
1093 	net = sock_net(sk);
1094 
1095 	if (xs->skb)
1096 		xsk_drop_skb(xs->skb);
1097 
1098 	mutex_lock(&net->xdp.lock);
1099 	sk_del_node_init_rcu(sk);
1100 	mutex_unlock(&net->xdp.lock);
1101 
1102 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1103 
1104 	xsk_delete_from_maps(xs);
1105 	mutex_lock(&xs->mutex);
1106 	xsk_unbind_dev(xs);
1107 	mutex_unlock(&xs->mutex);
1108 
1109 	xskq_destroy(xs->rx);
1110 	xskq_destroy(xs->tx);
1111 	xskq_destroy(xs->fq_tmp);
1112 	xskq_destroy(xs->cq_tmp);
1113 
1114 	sock_orphan(sk);
1115 	sock->sk = NULL;
1116 
1117 	sock_put(sk);
1118 
1119 	return 0;
1120 }
1121 
1122 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1123 {
1124 	struct socket *sock;
1125 	int err;
1126 
1127 	sock = sockfd_lookup(fd, &err);
1128 	if (!sock)
1129 		return ERR_PTR(-ENOTSOCK);
1130 
1131 	if (sock->sk->sk_family != PF_XDP) {
1132 		sockfd_put(sock);
1133 		return ERR_PTR(-ENOPROTOOPT);
1134 	}
1135 
1136 	return sock;
1137 }
1138 
1139 static bool xsk_validate_queues(struct xdp_sock *xs)
1140 {
1141 	return xs->fq_tmp && xs->cq_tmp;
1142 }
1143 
1144 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1145 {
1146 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1147 	struct sock *sk = sock->sk;
1148 	struct xdp_sock *xs = xdp_sk(sk);
1149 	struct net_device *dev;
1150 	int bound_dev_if;
1151 	u32 flags, qid;
1152 	int err = 0;
1153 
1154 	if (addr_len < sizeof(struct sockaddr_xdp))
1155 		return -EINVAL;
1156 	if (sxdp->sxdp_family != AF_XDP)
1157 		return -EINVAL;
1158 
1159 	flags = sxdp->sxdp_flags;
1160 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1161 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1162 		return -EINVAL;
1163 
1164 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1165 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1166 		return -EINVAL;
1167 
1168 	rtnl_lock();
1169 	mutex_lock(&xs->mutex);
1170 	if (xs->state != XSK_READY) {
1171 		err = -EBUSY;
1172 		goto out_release;
1173 	}
1174 
1175 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1176 	if (!dev) {
1177 		err = -ENODEV;
1178 		goto out_release;
1179 	}
1180 
1181 	if (!xs->rx && !xs->tx) {
1182 		err = -EINVAL;
1183 		goto out_unlock;
1184 	}
1185 
1186 	qid = sxdp->sxdp_queue_id;
1187 
1188 	if (flags & XDP_SHARED_UMEM) {
1189 		struct xdp_sock *umem_xs;
1190 		struct socket *sock;
1191 
1192 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1193 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1194 			/* Cannot specify flags for shared sockets. */
1195 			err = -EINVAL;
1196 			goto out_unlock;
1197 		}
1198 
1199 		if (xs->umem) {
1200 			/* We have already our own. */
1201 			err = -EINVAL;
1202 			goto out_unlock;
1203 		}
1204 
1205 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1206 		if (IS_ERR(sock)) {
1207 			err = PTR_ERR(sock);
1208 			goto out_unlock;
1209 		}
1210 
1211 		umem_xs = xdp_sk(sock->sk);
1212 		if (!xsk_is_bound(umem_xs)) {
1213 			err = -EBADF;
1214 			sockfd_put(sock);
1215 			goto out_unlock;
1216 		}
1217 
1218 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1219 			/* Share the umem with another socket on another qid
1220 			 * and/or device.
1221 			 */
1222 			xs->pool = xp_create_and_assign_umem(xs,
1223 							     umem_xs->umem);
1224 			if (!xs->pool) {
1225 				err = -ENOMEM;
1226 				sockfd_put(sock);
1227 				goto out_unlock;
1228 			}
1229 
1230 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1231 						   qid);
1232 			if (err) {
1233 				xp_destroy(xs->pool);
1234 				xs->pool = NULL;
1235 				sockfd_put(sock);
1236 				goto out_unlock;
1237 			}
1238 		} else {
1239 			/* Share the buffer pool with the other socket. */
1240 			if (xs->fq_tmp || xs->cq_tmp) {
1241 				/* Do not allow setting your own fq or cq. */
1242 				err = -EINVAL;
1243 				sockfd_put(sock);
1244 				goto out_unlock;
1245 			}
1246 
1247 			xp_get_pool(umem_xs->pool);
1248 			xs->pool = umem_xs->pool;
1249 
1250 			/* If underlying shared umem was created without Tx
1251 			 * ring, allocate Tx descs array that Tx batching API
1252 			 * utilizes
1253 			 */
1254 			if (xs->tx && !xs->pool->tx_descs) {
1255 				err = xp_alloc_tx_descs(xs->pool, xs);
1256 				if (err) {
1257 					xp_put_pool(xs->pool);
1258 					xs->pool = NULL;
1259 					sockfd_put(sock);
1260 					goto out_unlock;
1261 				}
1262 			}
1263 		}
1264 
1265 		xdp_get_umem(umem_xs->umem);
1266 		WRITE_ONCE(xs->umem, umem_xs->umem);
1267 		sockfd_put(sock);
1268 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1269 		err = -EINVAL;
1270 		goto out_unlock;
1271 	} else {
1272 		/* This xsk has its own umem. */
1273 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1274 		if (!xs->pool) {
1275 			err = -ENOMEM;
1276 			goto out_unlock;
1277 		}
1278 
1279 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1280 		if (err) {
1281 			xp_destroy(xs->pool);
1282 			xs->pool = NULL;
1283 			goto out_unlock;
1284 		}
1285 	}
1286 
1287 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1288 	xs->fq_tmp = NULL;
1289 	xs->cq_tmp = NULL;
1290 
1291 	xs->dev = dev;
1292 	xs->zc = xs->umem->zc;
1293 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1294 	xs->queue_id = qid;
1295 	xp_add_xsk(xs->pool, xs);
1296 
1297 	if (xs->zc && qid < dev->real_num_rx_queues) {
1298 		struct netdev_rx_queue *rxq;
1299 
1300 		rxq = __netif_get_rx_queue(dev, qid);
1301 		if (rxq->napi)
1302 			__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1303 	}
1304 
1305 out_unlock:
1306 	if (err) {
1307 		dev_put(dev);
1308 	} else {
1309 		/* Matches smp_rmb() in bind() for shared umem
1310 		 * sockets, and xsk_is_bound().
1311 		 */
1312 		smp_wmb();
1313 		WRITE_ONCE(xs->state, XSK_BOUND);
1314 	}
1315 out_release:
1316 	mutex_unlock(&xs->mutex);
1317 	rtnl_unlock();
1318 	return err;
1319 }
1320 
1321 struct xdp_umem_reg_v1 {
1322 	__u64 addr; /* Start of packet data area */
1323 	__u64 len; /* Length of packet data area */
1324 	__u32 chunk_size;
1325 	__u32 headroom;
1326 };
1327 
1328 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1329 			  sockptr_t optval, unsigned int optlen)
1330 {
1331 	struct sock *sk = sock->sk;
1332 	struct xdp_sock *xs = xdp_sk(sk);
1333 	int err;
1334 
1335 	if (level != SOL_XDP)
1336 		return -ENOPROTOOPT;
1337 
1338 	switch (optname) {
1339 	case XDP_RX_RING:
1340 	case XDP_TX_RING:
1341 	{
1342 		struct xsk_queue **q;
1343 		int entries;
1344 
1345 		if (optlen < sizeof(entries))
1346 			return -EINVAL;
1347 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1348 			return -EFAULT;
1349 
1350 		mutex_lock(&xs->mutex);
1351 		if (xs->state != XSK_READY) {
1352 			mutex_unlock(&xs->mutex);
1353 			return -EBUSY;
1354 		}
1355 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1356 		err = xsk_init_queue(entries, q, false);
1357 		if (!err && optname == XDP_TX_RING)
1358 			/* Tx needs to be explicitly woken up the first time */
1359 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1360 		mutex_unlock(&xs->mutex);
1361 		return err;
1362 	}
1363 	case XDP_UMEM_REG:
1364 	{
1365 		size_t mr_size = sizeof(struct xdp_umem_reg);
1366 		struct xdp_umem_reg mr = {};
1367 		struct xdp_umem *umem;
1368 
1369 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1370 			return -EINVAL;
1371 		else if (optlen < sizeof(mr))
1372 			mr_size = sizeof(struct xdp_umem_reg_v1);
1373 
1374 		BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
1375 
1376 		/* Make sure the last field of the struct doesn't have
1377 		 * uninitialized padding. All padding has to be explicit
1378 		 * and has to be set to zero by the userspace to make
1379 		 * struct xdp_umem_reg extensible in the future.
1380 		 */
1381 		BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
1382 			     sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
1383 			     sizeof(struct xdp_umem_reg));
1384 
1385 		if (copy_from_sockptr(&mr, optval, mr_size))
1386 			return -EFAULT;
1387 
1388 		mutex_lock(&xs->mutex);
1389 		if (xs->state != XSK_READY || xs->umem) {
1390 			mutex_unlock(&xs->mutex);
1391 			return -EBUSY;
1392 		}
1393 
1394 		umem = xdp_umem_create(&mr);
1395 		if (IS_ERR(umem)) {
1396 			mutex_unlock(&xs->mutex);
1397 			return PTR_ERR(umem);
1398 		}
1399 
1400 		/* Make sure umem is ready before it can be seen by others */
1401 		smp_wmb();
1402 		WRITE_ONCE(xs->umem, umem);
1403 		mutex_unlock(&xs->mutex);
1404 		return 0;
1405 	}
1406 	case XDP_UMEM_FILL_RING:
1407 	case XDP_UMEM_COMPLETION_RING:
1408 	{
1409 		struct xsk_queue **q;
1410 		int entries;
1411 
1412 		if (optlen < sizeof(entries))
1413 			return -EINVAL;
1414 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1415 			return -EFAULT;
1416 
1417 		mutex_lock(&xs->mutex);
1418 		if (xs->state != XSK_READY) {
1419 			mutex_unlock(&xs->mutex);
1420 			return -EBUSY;
1421 		}
1422 
1423 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1424 			&xs->cq_tmp;
1425 		err = xsk_init_queue(entries, q, true);
1426 		mutex_unlock(&xs->mutex);
1427 		return err;
1428 	}
1429 	default:
1430 		break;
1431 	}
1432 
1433 	return -ENOPROTOOPT;
1434 }
1435 
1436 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1437 {
1438 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1439 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1440 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1441 }
1442 
1443 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1444 {
1445 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1446 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1447 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1448 }
1449 
1450 struct xdp_statistics_v1 {
1451 	__u64 rx_dropped;
1452 	__u64 rx_invalid_descs;
1453 	__u64 tx_invalid_descs;
1454 };
1455 
1456 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1457 			  char __user *optval, int __user *optlen)
1458 {
1459 	struct sock *sk = sock->sk;
1460 	struct xdp_sock *xs = xdp_sk(sk);
1461 	int len;
1462 
1463 	if (level != SOL_XDP)
1464 		return -ENOPROTOOPT;
1465 
1466 	if (get_user(len, optlen))
1467 		return -EFAULT;
1468 	if (len < 0)
1469 		return -EINVAL;
1470 
1471 	switch (optname) {
1472 	case XDP_STATISTICS:
1473 	{
1474 		struct xdp_statistics stats = {};
1475 		bool extra_stats = true;
1476 		size_t stats_size;
1477 
1478 		if (len < sizeof(struct xdp_statistics_v1)) {
1479 			return -EINVAL;
1480 		} else if (len < sizeof(stats)) {
1481 			extra_stats = false;
1482 			stats_size = sizeof(struct xdp_statistics_v1);
1483 		} else {
1484 			stats_size = sizeof(stats);
1485 		}
1486 
1487 		mutex_lock(&xs->mutex);
1488 		stats.rx_dropped = xs->rx_dropped;
1489 		if (extra_stats) {
1490 			stats.rx_ring_full = xs->rx_queue_full;
1491 			stats.rx_fill_ring_empty_descs =
1492 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1493 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1494 		} else {
1495 			stats.rx_dropped += xs->rx_queue_full;
1496 		}
1497 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1498 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1499 		mutex_unlock(&xs->mutex);
1500 
1501 		if (copy_to_user(optval, &stats, stats_size))
1502 			return -EFAULT;
1503 		if (put_user(stats_size, optlen))
1504 			return -EFAULT;
1505 
1506 		return 0;
1507 	}
1508 	case XDP_MMAP_OFFSETS:
1509 	{
1510 		struct xdp_mmap_offsets off;
1511 		struct xdp_mmap_offsets_v1 off_v1;
1512 		bool flags_supported = true;
1513 		void *to_copy;
1514 
1515 		if (len < sizeof(off_v1))
1516 			return -EINVAL;
1517 		else if (len < sizeof(off))
1518 			flags_supported = false;
1519 
1520 		if (flags_supported) {
1521 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1522 			 * except for the flags field added to the end.
1523 			 */
1524 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1525 					       &off.rx);
1526 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1527 					       &off.tx);
1528 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1529 					       &off.fr);
1530 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1531 					       &off.cr);
1532 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1533 						ptrs.flags);
1534 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1535 						ptrs.flags);
1536 			off.fr.flags = offsetof(struct xdp_umem_ring,
1537 						ptrs.flags);
1538 			off.cr.flags = offsetof(struct xdp_umem_ring,
1539 						ptrs.flags);
1540 
1541 			len = sizeof(off);
1542 			to_copy = &off;
1543 		} else {
1544 			xsk_enter_rxtx_offsets(&off_v1.rx);
1545 			xsk_enter_rxtx_offsets(&off_v1.tx);
1546 			xsk_enter_umem_offsets(&off_v1.fr);
1547 			xsk_enter_umem_offsets(&off_v1.cr);
1548 
1549 			len = sizeof(off_v1);
1550 			to_copy = &off_v1;
1551 		}
1552 
1553 		if (copy_to_user(optval, to_copy, len))
1554 			return -EFAULT;
1555 		if (put_user(len, optlen))
1556 			return -EFAULT;
1557 
1558 		return 0;
1559 	}
1560 	case XDP_OPTIONS:
1561 	{
1562 		struct xdp_options opts = {};
1563 
1564 		if (len < sizeof(opts))
1565 			return -EINVAL;
1566 
1567 		mutex_lock(&xs->mutex);
1568 		if (xs->zc)
1569 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1570 		mutex_unlock(&xs->mutex);
1571 
1572 		len = sizeof(opts);
1573 		if (copy_to_user(optval, &opts, len))
1574 			return -EFAULT;
1575 		if (put_user(len, optlen))
1576 			return -EFAULT;
1577 
1578 		return 0;
1579 	}
1580 	default:
1581 		break;
1582 	}
1583 
1584 	return -EOPNOTSUPP;
1585 }
1586 
1587 static int xsk_mmap(struct file *file, struct socket *sock,
1588 		    struct vm_area_struct *vma)
1589 {
1590 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1591 	unsigned long size = vma->vm_end - vma->vm_start;
1592 	struct xdp_sock *xs = xdp_sk(sock->sk);
1593 	int state = READ_ONCE(xs->state);
1594 	struct xsk_queue *q = NULL;
1595 
1596 	if (state != XSK_READY && state != XSK_BOUND)
1597 		return -EBUSY;
1598 
1599 	if (offset == XDP_PGOFF_RX_RING) {
1600 		q = READ_ONCE(xs->rx);
1601 	} else if (offset == XDP_PGOFF_TX_RING) {
1602 		q = READ_ONCE(xs->tx);
1603 	} else {
1604 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1605 		smp_rmb();
1606 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1607 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1608 						 READ_ONCE(xs->pool->fq);
1609 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1610 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1611 						 READ_ONCE(xs->pool->cq);
1612 	}
1613 
1614 	if (!q)
1615 		return -EINVAL;
1616 
1617 	/* Matches the smp_wmb() in xsk_init_queue */
1618 	smp_rmb();
1619 	if (size > q->ring_vmalloc_size)
1620 		return -EINVAL;
1621 
1622 	return remap_vmalloc_range(vma, q->ring, 0);
1623 }
1624 
1625 static int xsk_notifier(struct notifier_block *this,
1626 			unsigned long msg, void *ptr)
1627 {
1628 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1629 	struct net *net = dev_net(dev);
1630 	struct sock *sk;
1631 
1632 	switch (msg) {
1633 	case NETDEV_UNREGISTER:
1634 		mutex_lock(&net->xdp.lock);
1635 		sk_for_each(sk, &net->xdp.list) {
1636 			struct xdp_sock *xs = xdp_sk(sk);
1637 
1638 			mutex_lock(&xs->mutex);
1639 			if (xs->dev == dev) {
1640 				sk->sk_err = ENETDOWN;
1641 				if (!sock_flag(sk, SOCK_DEAD))
1642 					sk_error_report(sk);
1643 
1644 				xsk_unbind_dev(xs);
1645 
1646 				/* Clear device references. */
1647 				xp_clear_dev(xs->pool);
1648 			}
1649 			mutex_unlock(&xs->mutex);
1650 		}
1651 		mutex_unlock(&net->xdp.lock);
1652 		break;
1653 	}
1654 	return NOTIFY_DONE;
1655 }
1656 
1657 static struct proto xsk_proto = {
1658 	.name =		"XDP",
1659 	.owner =	THIS_MODULE,
1660 	.obj_size =	sizeof(struct xdp_sock),
1661 };
1662 
1663 static const struct proto_ops xsk_proto_ops = {
1664 	.family		= PF_XDP,
1665 	.owner		= THIS_MODULE,
1666 	.release	= xsk_release,
1667 	.bind		= xsk_bind,
1668 	.connect	= sock_no_connect,
1669 	.socketpair	= sock_no_socketpair,
1670 	.accept		= sock_no_accept,
1671 	.getname	= sock_no_getname,
1672 	.poll		= xsk_poll,
1673 	.ioctl		= sock_no_ioctl,
1674 	.listen		= sock_no_listen,
1675 	.shutdown	= sock_no_shutdown,
1676 	.setsockopt	= xsk_setsockopt,
1677 	.getsockopt	= xsk_getsockopt,
1678 	.sendmsg	= xsk_sendmsg,
1679 	.recvmsg	= xsk_recvmsg,
1680 	.mmap		= xsk_mmap,
1681 };
1682 
1683 static void xsk_destruct(struct sock *sk)
1684 {
1685 	struct xdp_sock *xs = xdp_sk(sk);
1686 
1687 	if (!sock_flag(sk, SOCK_DEAD))
1688 		return;
1689 
1690 	if (!xp_put_pool(xs->pool))
1691 		xdp_put_umem(xs->umem, !xs->pool);
1692 }
1693 
1694 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1695 		      int kern)
1696 {
1697 	struct xdp_sock *xs;
1698 	struct sock *sk;
1699 
1700 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1701 		return -EPERM;
1702 	if (sock->type != SOCK_RAW)
1703 		return -ESOCKTNOSUPPORT;
1704 
1705 	if (protocol)
1706 		return -EPROTONOSUPPORT;
1707 
1708 	sock->state = SS_UNCONNECTED;
1709 
1710 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1711 	if (!sk)
1712 		return -ENOBUFS;
1713 
1714 	sock->ops = &xsk_proto_ops;
1715 
1716 	sock_init_data(sock, sk);
1717 
1718 	sk->sk_family = PF_XDP;
1719 
1720 	sk->sk_destruct = xsk_destruct;
1721 
1722 	sock_set_flag(sk, SOCK_RCU_FREE);
1723 
1724 	xs = xdp_sk(sk);
1725 	xs->state = XSK_READY;
1726 	mutex_init(&xs->mutex);
1727 	spin_lock_init(&xs->rx_lock);
1728 
1729 	INIT_LIST_HEAD(&xs->map_list);
1730 	spin_lock_init(&xs->map_list_lock);
1731 
1732 	mutex_lock(&net->xdp.lock);
1733 	sk_add_node_rcu(sk, &net->xdp.list);
1734 	mutex_unlock(&net->xdp.lock);
1735 
1736 	sock_prot_inuse_add(net, &xsk_proto, 1);
1737 
1738 	return 0;
1739 }
1740 
1741 static const struct net_proto_family xsk_family_ops = {
1742 	.family = PF_XDP,
1743 	.create = xsk_create,
1744 	.owner	= THIS_MODULE,
1745 };
1746 
1747 static struct notifier_block xsk_netdev_notifier = {
1748 	.notifier_call	= xsk_notifier,
1749 };
1750 
1751 static int __net_init xsk_net_init(struct net *net)
1752 {
1753 	mutex_init(&net->xdp.lock);
1754 	INIT_HLIST_HEAD(&net->xdp.list);
1755 	return 0;
1756 }
1757 
1758 static void __net_exit xsk_net_exit(struct net *net)
1759 {
1760 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1761 }
1762 
1763 static struct pernet_operations xsk_net_ops = {
1764 	.init = xsk_net_init,
1765 	.exit = xsk_net_exit,
1766 };
1767 
1768 static int __init xsk_init(void)
1769 {
1770 	int err;
1771 
1772 	err = proto_register(&xsk_proto, 0 /* no slab */);
1773 	if (err)
1774 		goto out;
1775 
1776 	err = sock_register(&xsk_family_ops);
1777 	if (err)
1778 		goto out_proto;
1779 
1780 	err = register_pernet_subsys(&xsk_net_ops);
1781 	if (err)
1782 		goto out_sk;
1783 
1784 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1785 	if (err)
1786 		goto out_pernet;
1787 
1788 	return 0;
1789 
1790 out_pernet:
1791 	unregister_pernet_subsys(&xsk_net_ops);
1792 out_sk:
1793 	sock_unregister(PF_XDP);
1794 out_proto:
1795 	proto_unregister(&xsk_proto);
1796 out:
1797 	return err;
1798 }
1799 
1800 fs_initcall(xsk_init);
1801