xref: /linux/net/xdp/xsk.c (revision 1f5e808aa63af61ec0d6a14909056d6668813e86)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_lock.h>
29 #include <net/netdev_rx_queue.h>
30 #include <net/xdp.h>
31 
32 #include "xsk_queue.h"
33 #include "xdp_umem.h"
34 #include "xsk.h"
35 
36 #define TX_BATCH_SIZE 32
37 #define MAX_PER_SOCKET_BUDGET 32
38 
39 struct xsk_addrs {
40 	u32 num_descs;
41 	u64 addrs[MAX_SKB_FRAGS + 1];
42 };
43 
44 static struct kmem_cache *xsk_tx_generic_cache;
45 
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)46 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
47 {
48 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
49 		return;
50 
51 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
52 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
53 }
54 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
55 
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)56 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
57 {
58 	struct xdp_sock *xs;
59 
60 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
61 		return;
62 
63 	rcu_read_lock();
64 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
65 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
66 	}
67 	rcu_read_unlock();
68 
69 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
70 }
71 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
72 
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)73 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
74 {
75 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
76 		return;
77 
78 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
79 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
80 }
81 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
82 
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)83 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
84 {
85 	struct xdp_sock *xs;
86 
87 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
88 		return;
89 
90 	rcu_read_lock();
91 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
92 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
93 	}
94 	rcu_read_unlock();
95 
96 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
97 }
98 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
99 
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)100 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
101 {
102 	return pool->uses_need_wakeup;
103 }
104 EXPORT_SYMBOL(xsk_uses_need_wakeup);
105 
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)106 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
107 					    u16 queue_id)
108 {
109 	if (queue_id < dev->real_num_rx_queues)
110 		return dev->_rx[queue_id].pool;
111 	if (queue_id < dev->real_num_tx_queues)
112 		return dev->_tx[queue_id].pool;
113 
114 	return NULL;
115 }
116 EXPORT_SYMBOL(xsk_get_pool_from_qid);
117 
xsk_clear_pool_at_qid(struct net_device * dev,u16 queue_id)118 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
119 {
120 	if (queue_id < dev->num_rx_queues)
121 		dev->_rx[queue_id].pool = NULL;
122 	if (queue_id < dev->num_tx_queues)
123 		dev->_tx[queue_id].pool = NULL;
124 }
125 
126 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
127  * not know if the device has more tx queues than rx, or the opposite.
128  * This might also change during run time.
129  */
xsk_reg_pool_at_qid(struct net_device * dev,struct xsk_buff_pool * pool,u16 queue_id)130 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
131 			u16 queue_id)
132 {
133 	if (queue_id >= max_t(unsigned int,
134 			      dev->real_num_rx_queues,
135 			      dev->real_num_tx_queues))
136 		return -EINVAL;
137 
138 	if (queue_id < dev->real_num_rx_queues)
139 		dev->_rx[queue_id].pool = pool;
140 	if (queue_id < dev->real_num_tx_queues)
141 		dev->_tx[queue_id].pool = pool;
142 
143 	return 0;
144 }
145 
__xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff_xsk * xskb,u32 len,u32 flags)146 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
147 			u32 flags)
148 {
149 	u64 addr;
150 	int err;
151 
152 	addr = xp_get_handle(xskb, xskb->pool);
153 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
154 	if (err) {
155 		xs->rx_queue_full++;
156 		return err;
157 	}
158 
159 	xp_release(xskb);
160 	return 0;
161 }
162 
xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)163 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
164 {
165 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
166 	u32 frags = xdp_buff_has_frags(xdp);
167 	struct xdp_buff_xsk *pos, *tmp;
168 	struct list_head *xskb_list;
169 	u32 contd = 0;
170 	int err;
171 
172 	if (frags)
173 		contd = XDP_PKT_CONTD;
174 
175 	err = __xsk_rcv_zc(xs, xskb, len, contd);
176 	if (err)
177 		goto err;
178 	if (likely(!frags))
179 		return 0;
180 
181 	xskb_list = &xskb->pool->xskb_list;
182 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
183 		if (list_is_singular(xskb_list))
184 			contd = 0;
185 		len = pos->xdp.data_end - pos->xdp.data;
186 		err = __xsk_rcv_zc(xs, pos, len, contd);
187 		if (err)
188 			goto err;
189 		list_del(&pos->list_node);
190 	}
191 
192 	return 0;
193 err:
194 	xsk_buff_free(xdp);
195 	return err;
196 }
197 
xsk_copy_xdp_start(struct xdp_buff * from)198 static void *xsk_copy_xdp_start(struct xdp_buff *from)
199 {
200 	if (unlikely(xdp_data_meta_unsupported(from)))
201 		return from->data;
202 	else
203 		return from->data_meta;
204 }
205 
xsk_copy_xdp(void * to,void ** from,u32 to_len,u32 * from_len,skb_frag_t ** frag,u32 rem)206 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
207 			u32 *from_len, skb_frag_t **frag, u32 rem)
208 {
209 	u32 copied = 0;
210 
211 	while (1) {
212 		u32 copy_len = min_t(u32, *from_len, to_len);
213 
214 		memcpy(to, *from, copy_len);
215 		copied += copy_len;
216 		if (rem == copied)
217 			return copied;
218 
219 		if (*from_len == copy_len) {
220 			*from = skb_frag_address(*frag);
221 			*from_len = skb_frag_size((*frag)++);
222 		} else {
223 			*from += copy_len;
224 			*from_len -= copy_len;
225 		}
226 		if (to_len == copy_len)
227 			return copied;
228 
229 		to_len -= copy_len;
230 		to += copy_len;
231 	}
232 }
233 
__xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)234 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
235 {
236 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
237 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
238 	u32 from_len, meta_len, rem, num_desc;
239 	struct xdp_buff_xsk *xskb;
240 	struct xdp_buff *xsk_xdp;
241 	skb_frag_t *frag;
242 
243 	from_len = xdp->data_end - copy_from;
244 	meta_len = xdp->data - copy_from;
245 	rem = len + meta_len;
246 
247 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
248 		int err;
249 
250 		xsk_xdp = xsk_buff_alloc(xs->pool);
251 		if (!xsk_xdp) {
252 			xs->rx_dropped++;
253 			return -ENOMEM;
254 		}
255 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
256 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
257 		err = __xsk_rcv_zc(xs, xskb, len, 0);
258 		if (err) {
259 			xsk_buff_free(xsk_xdp);
260 			return err;
261 		}
262 
263 		return 0;
264 	}
265 
266 	num_desc = (len - 1) / frame_size + 1;
267 
268 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
269 		xs->rx_dropped++;
270 		return -ENOMEM;
271 	}
272 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
273 		xs->rx_queue_full++;
274 		return -ENOBUFS;
275 	}
276 
277 	if (xdp_buff_has_frags(xdp)) {
278 		struct skb_shared_info *sinfo;
279 
280 		sinfo = xdp_get_shared_info_from_buff(xdp);
281 		frag =  &sinfo->frags[0];
282 	}
283 
284 	do {
285 		u32 to_len = frame_size + meta_len;
286 		u32 copied;
287 
288 		xsk_xdp = xsk_buff_alloc(xs->pool);
289 		copy_to = xsk_xdp->data - meta_len;
290 
291 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
292 		rem -= copied;
293 
294 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
295 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
296 		meta_len = 0;
297 	} while (rem);
298 
299 	return 0;
300 }
301 
xsk_tx_writeable(struct xdp_sock * xs)302 static bool xsk_tx_writeable(struct xdp_sock *xs)
303 {
304 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
305 		return false;
306 
307 	return true;
308 }
309 
__xsk_tx_release(struct xdp_sock * xs)310 static void __xsk_tx_release(struct xdp_sock *xs)
311 {
312 	__xskq_cons_release(xs->tx);
313 	if (xsk_tx_writeable(xs))
314 		xs->sk.sk_write_space(&xs->sk);
315 }
316 
xsk_is_bound(struct xdp_sock * xs)317 static bool xsk_is_bound(struct xdp_sock *xs)
318 {
319 	if (READ_ONCE(xs->state) == XSK_BOUND) {
320 		/* Matches smp_wmb() in bind(). */
321 		smp_rmb();
322 		return true;
323 	}
324 	return false;
325 }
326 
xsk_rcv_check(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)327 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
328 {
329 	if (!xsk_is_bound(xs))
330 		return -ENXIO;
331 
332 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
333 		return -EINVAL;
334 
335 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
336 		xs->rx_dropped++;
337 		return -ENOSPC;
338 	}
339 
340 	return 0;
341 }
342 
xsk_flush(struct xdp_sock * xs)343 static void xsk_flush(struct xdp_sock *xs)
344 {
345 	xskq_prod_submit(xs->rx);
346 	__xskq_cons_release(xs->pool->fq);
347 	sock_def_readable(&xs->sk);
348 }
349 
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)350 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
351 {
352 	u32 len = xdp_get_buff_len(xdp);
353 	int err;
354 
355 	err = xsk_rcv_check(xs, xdp, len);
356 	if (!err) {
357 		spin_lock_bh(&xs->pool->rx_lock);
358 		err = __xsk_rcv(xs, xdp, len);
359 		xsk_flush(xs);
360 		spin_unlock_bh(&xs->pool->rx_lock);
361 	}
362 
363 	return err;
364 }
365 
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)366 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
367 {
368 	u32 len = xdp_get_buff_len(xdp);
369 	int err;
370 
371 	err = xsk_rcv_check(xs, xdp, len);
372 	if (err)
373 		return err;
374 
375 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
376 		len = xdp->data_end - xdp->data;
377 		return xsk_rcv_zc(xs, xdp, len);
378 	}
379 
380 	err = __xsk_rcv(xs, xdp, len);
381 	if (!err)
382 		xdp_return_buff(xdp);
383 	return err;
384 }
385 
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)386 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
387 {
388 	int err;
389 
390 	err = xsk_rcv(xs, xdp);
391 	if (err)
392 		return err;
393 
394 	if (!xs->flush_node.prev) {
395 		struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
396 
397 		list_add(&xs->flush_node, flush_list);
398 	}
399 
400 	return 0;
401 }
402 
__xsk_map_flush(struct list_head * flush_list)403 void __xsk_map_flush(struct list_head *flush_list)
404 {
405 	struct xdp_sock *xs, *tmp;
406 
407 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
408 		xsk_flush(xs);
409 		__list_del_clearprev(&xs->flush_node);
410 	}
411 }
412 
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)413 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
414 {
415 	xskq_prod_submit_n(pool->cq, nb_entries);
416 }
417 EXPORT_SYMBOL(xsk_tx_completed);
418 
xsk_tx_release(struct xsk_buff_pool * pool)419 void xsk_tx_release(struct xsk_buff_pool *pool)
420 {
421 	struct xdp_sock *xs;
422 
423 	rcu_read_lock();
424 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
425 		__xsk_tx_release(xs);
426 	rcu_read_unlock();
427 }
428 EXPORT_SYMBOL(xsk_tx_release);
429 
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)430 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
431 {
432 	bool budget_exhausted = false;
433 	struct xdp_sock *xs;
434 
435 	rcu_read_lock();
436 again:
437 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
438 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
439 			budget_exhausted = true;
440 			continue;
441 		}
442 
443 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
444 			if (xskq_has_descs(xs->tx))
445 				xskq_cons_release(xs->tx);
446 			continue;
447 		}
448 
449 		xs->tx_budget_spent++;
450 
451 		/* This is the backpressure mechanism for the Tx path.
452 		 * Reserve space in the completion queue and only proceed
453 		 * if there is space in it. This avoids having to implement
454 		 * any buffering in the Tx path.
455 		 */
456 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
457 			goto out;
458 
459 		xskq_cons_release(xs->tx);
460 		rcu_read_unlock();
461 		return true;
462 	}
463 
464 	if (budget_exhausted) {
465 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
466 			xs->tx_budget_spent = 0;
467 
468 		budget_exhausted = false;
469 		goto again;
470 	}
471 
472 out:
473 	rcu_read_unlock();
474 	return false;
475 }
476 EXPORT_SYMBOL(xsk_tx_peek_desc);
477 
xsk_tx_peek_release_fallback(struct xsk_buff_pool * pool,u32 max_entries)478 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
479 {
480 	struct xdp_desc *descs = pool->tx_descs;
481 	u32 nb_pkts = 0;
482 
483 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
484 		nb_pkts++;
485 
486 	xsk_tx_release(pool);
487 	return nb_pkts;
488 }
489 
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 nb_pkts)490 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
491 {
492 	struct xdp_sock *xs;
493 
494 	rcu_read_lock();
495 	if (!list_is_singular(&pool->xsk_tx_list)) {
496 		/* Fallback to the non-batched version */
497 		rcu_read_unlock();
498 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
499 	}
500 
501 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
502 	if (!xs) {
503 		nb_pkts = 0;
504 		goto out;
505 	}
506 
507 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
508 
509 	/* This is the backpressure mechanism for the Tx path. Try to
510 	 * reserve space in the completion queue for all packets, but
511 	 * if there are fewer slots available, just process that many
512 	 * packets. This avoids having to implement any buffering in
513 	 * the Tx path.
514 	 */
515 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
516 	if (!nb_pkts)
517 		goto out;
518 
519 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
520 	if (!nb_pkts) {
521 		xs->tx->queue_empty_descs++;
522 		goto out;
523 	}
524 
525 	__xskq_cons_release(xs->tx);
526 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
527 	xs->sk.sk_write_space(&xs->sk);
528 
529 out:
530 	rcu_read_unlock();
531 	return nb_pkts;
532 }
533 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
534 
xsk_wakeup(struct xdp_sock * xs,u8 flags)535 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
536 {
537 	struct net_device *dev = xs->dev;
538 
539 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
540 }
541 
xsk_cq_reserve_locked(struct xsk_buff_pool * pool)542 static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
543 {
544 	unsigned long flags;
545 	int ret;
546 
547 	spin_lock_irqsave(&pool->cq_lock, flags);
548 	ret = xskq_prod_reserve(pool->cq);
549 	spin_unlock_irqrestore(&pool->cq_lock, flags);
550 
551 	return ret;
552 }
553 
xsk_skb_destructor_is_addr(struct sk_buff * skb)554 static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
555 {
556 	return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
557 }
558 
xsk_skb_destructor_get_addr(struct sk_buff * skb)559 static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
560 {
561 	return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
562 }
563 
xsk_skb_destructor_set_addr(struct sk_buff * skb,u64 addr)564 static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
565 {
566 	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
567 }
568 
xsk_inc_num_desc(struct sk_buff * skb)569 static void xsk_inc_num_desc(struct sk_buff *skb)
570 {
571 	struct xsk_addrs *xsk_addr;
572 
573 	if (!xsk_skb_destructor_is_addr(skb)) {
574 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
575 		xsk_addr->num_descs++;
576 	}
577 }
578 
xsk_get_num_desc(struct sk_buff * skb)579 static u32 xsk_get_num_desc(struct sk_buff *skb)
580 {
581 	struct xsk_addrs *xsk_addr;
582 
583 	if (xsk_skb_destructor_is_addr(skb))
584 		return 1;
585 
586 	xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
587 
588 	return xsk_addr->num_descs;
589 }
590 
xsk_cq_submit_addr_locked(struct xsk_buff_pool * pool,struct sk_buff * skb)591 static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
592 				      struct sk_buff *skb)
593 {
594 	u32 num_descs = xsk_get_num_desc(skb);
595 	struct xsk_addrs *xsk_addr;
596 	u32 descs_processed = 0;
597 	unsigned long flags;
598 	u32 idx, i;
599 
600 	spin_lock_irqsave(&pool->cq_lock, flags);
601 	idx = xskq_get_prod(pool->cq);
602 
603 	if (unlikely(num_descs > 1)) {
604 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
605 
606 		for (i = 0; i < num_descs; i++) {
607 			xskq_prod_write_addr(pool->cq, idx + descs_processed,
608 					     xsk_addr->addrs[i]);
609 			descs_processed++;
610 		}
611 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
612 	} else {
613 		xskq_prod_write_addr(pool->cq, idx,
614 				     xsk_skb_destructor_get_addr(skb));
615 		descs_processed++;
616 	}
617 	xskq_prod_submit_n(pool->cq, descs_processed);
618 	spin_unlock_irqrestore(&pool->cq_lock, flags);
619 }
620 
xsk_cq_cancel_locked(struct xsk_buff_pool * pool,u32 n)621 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
622 {
623 	unsigned long flags;
624 
625 	spin_lock_irqsave(&pool->cq_lock, flags);
626 	xskq_prod_cancel_n(pool->cq, n);
627 	spin_unlock_irqrestore(&pool->cq_lock, flags);
628 }
629 
xsk_destruct_skb(struct sk_buff * skb)630 static void xsk_destruct_skb(struct sk_buff *skb)
631 {
632 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
633 
634 	if (compl->tx_timestamp) {
635 		/* sw completion timestamp, not a real one */
636 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
637 	}
638 
639 	xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
640 	sock_wfree(skb);
641 }
642 
xsk_skb_init_misc(struct sk_buff * skb,struct xdp_sock * xs,u64 addr)643 static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
644 			      u64 addr)
645 {
646 	skb->dev = xs->dev;
647 	skb->priority = READ_ONCE(xs->sk.sk_priority);
648 	skb->mark = READ_ONCE(xs->sk.sk_mark);
649 	skb->destructor = xsk_destruct_skb;
650 	xsk_skb_destructor_set_addr(skb, addr);
651 }
652 
xsk_consume_skb(struct sk_buff * skb)653 static void xsk_consume_skb(struct sk_buff *skb)
654 {
655 	struct xdp_sock *xs = xdp_sk(skb->sk);
656 	u32 num_descs = xsk_get_num_desc(skb);
657 	struct xsk_addrs *xsk_addr;
658 
659 	if (unlikely(num_descs > 1)) {
660 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
661 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
662 	}
663 
664 	skb->destructor = sock_wfree;
665 	xsk_cq_cancel_locked(xs->pool, num_descs);
666 	/* Free skb without triggering the perf drop trace */
667 	consume_skb(skb);
668 	xs->skb = NULL;
669 }
670 
xsk_drop_skb(struct sk_buff * skb)671 static void xsk_drop_skb(struct sk_buff *skb)
672 {
673 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
674 	xsk_consume_skb(skb);
675 }
676 
xsk_skb_metadata(struct sk_buff * skb,void * buffer,struct xdp_desc * desc,struct xsk_buff_pool * pool,u32 hr)677 static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
678 			    struct xdp_desc *desc, struct xsk_buff_pool *pool,
679 			    u32 hr)
680 {
681 	struct xsk_tx_metadata *meta = NULL;
682 
683 	if (unlikely(pool->tx_metadata_len == 0))
684 		return -EINVAL;
685 
686 	meta = buffer - pool->tx_metadata_len;
687 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
688 		return -EINVAL;
689 
690 	if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
691 		if (unlikely(meta->request.csum_start +
692 			     meta->request.csum_offset +
693 			     sizeof(__sum16) > desc->len))
694 			return -EINVAL;
695 
696 		skb->csum_start = hr + meta->request.csum_start;
697 		skb->csum_offset = meta->request.csum_offset;
698 		skb->ip_summed = CHECKSUM_PARTIAL;
699 
700 		if (unlikely(pool->tx_sw_csum)) {
701 			int err;
702 
703 			err = skb_checksum_help(skb);
704 			if (err)
705 				return err;
706 		}
707 	}
708 
709 	if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
710 		skb->skb_mstamp_ns = meta->request.launch_time;
711 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
712 
713 	return 0;
714 }
715 
xsk_build_skb_zerocopy(struct xdp_sock * xs,struct xdp_desc * desc)716 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
717 					      struct xdp_desc *desc)
718 {
719 	struct xsk_buff_pool *pool = xs->pool;
720 	u32 hr, len, ts, offset, copy, copied;
721 	struct sk_buff *skb = xs->skb;
722 	struct page *page;
723 	void *buffer;
724 	int err, i;
725 	u64 addr;
726 
727 	addr = desc->addr;
728 	buffer = xsk_buff_raw_get_data(pool, addr);
729 
730 	if (!skb) {
731 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
732 
733 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
734 		if (unlikely(!skb))
735 			return ERR_PTR(err);
736 
737 		skb_reserve(skb, hr);
738 
739 		xsk_skb_init_misc(skb, xs, desc->addr);
740 		if (desc->options & XDP_TX_METADATA) {
741 			err = xsk_skb_metadata(skb, buffer, desc, pool, hr);
742 			if (unlikely(err))
743 				return ERR_PTR(err);
744 		}
745 	} else {
746 		struct xsk_addrs *xsk_addr;
747 
748 		if (xsk_skb_destructor_is_addr(skb)) {
749 			xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
750 						     GFP_KERNEL);
751 			if (!xsk_addr)
752 				return ERR_PTR(-ENOMEM);
753 
754 			xsk_addr->num_descs = 1;
755 			xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
756 			skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
757 		} else {
758 			xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
759 		}
760 
761 		/* in case of -EOVERFLOW that could happen below,
762 		 * xsk_consume_skb() will release this node as whole skb
763 		 * would be dropped, which implies freeing all list elements
764 		 */
765 		xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
766 	}
767 
768 	len = desc->len;
769 	ts = pool->unaligned ? len : pool->chunk_size;
770 
771 	offset = offset_in_page(buffer);
772 	addr = buffer - pool->addrs;
773 
774 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
775 		if (unlikely(i >= MAX_SKB_FRAGS))
776 			return ERR_PTR(-EOVERFLOW);
777 
778 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
779 		get_page(page);
780 
781 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
782 		skb_fill_page_desc(skb, i, page, offset, copy);
783 
784 		copied += copy;
785 		addr += copy;
786 		offset = 0;
787 	}
788 
789 	skb->len += len;
790 	skb->data_len += len;
791 	skb->truesize += ts;
792 
793 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
794 
795 	return skb;
796 }
797 
xsk_build_skb(struct xdp_sock * xs,struct xdp_desc * desc)798 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
799 				     struct xdp_desc *desc)
800 {
801 	struct net_device *dev = xs->dev;
802 	struct sk_buff *skb = xs->skb;
803 	int err;
804 
805 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
806 		skb = xsk_build_skb_zerocopy(xs, desc);
807 		if (IS_ERR(skb)) {
808 			err = PTR_ERR(skb);
809 			skb = NULL;
810 			goto free_err;
811 		}
812 	} else {
813 		u32 hr, tr, len;
814 		void *buffer;
815 
816 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
817 		len = desc->len;
818 
819 		if (!skb) {
820 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
821 			tr = dev->needed_tailroom;
822 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
823 			if (unlikely(!skb))
824 				goto free_err;
825 
826 			skb_reserve(skb, hr);
827 			skb_put(skb, len);
828 
829 			err = skb_store_bits(skb, 0, buffer, len);
830 			if (unlikely(err))
831 				goto free_err;
832 
833 			xsk_skb_init_misc(skb, xs, desc->addr);
834 			if (desc->options & XDP_TX_METADATA) {
835 				err = xsk_skb_metadata(skb, buffer, desc,
836 						       xs->pool, hr);
837 				if (unlikely(err))
838 					goto free_err;
839 			}
840 		} else {
841 			int nr_frags = skb_shinfo(skb)->nr_frags;
842 			struct xsk_addrs *xsk_addr;
843 			struct page *page;
844 			u8 *vaddr;
845 
846 			if (xsk_skb_destructor_is_addr(skb)) {
847 				xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
848 							     GFP_KERNEL);
849 				if (!xsk_addr) {
850 					err = -ENOMEM;
851 					goto free_err;
852 				}
853 
854 				xsk_addr->num_descs = 1;
855 				xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
856 				skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
857 			} else {
858 				xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
859 			}
860 
861 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
862 				err = -EOVERFLOW;
863 				goto free_err;
864 			}
865 
866 			page = alloc_page(xs->sk.sk_allocation);
867 			if (unlikely(!page)) {
868 				err = -EAGAIN;
869 				goto free_err;
870 			}
871 
872 			vaddr = kmap_local_page(page);
873 			memcpy(vaddr, buffer, len);
874 			kunmap_local(vaddr);
875 
876 			skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
877 			refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
878 
879 			xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
880 		}
881 	}
882 
883 	xsk_inc_num_desc(skb);
884 
885 	return skb;
886 
887 free_err:
888 	if (skb && !skb_shinfo(skb)->nr_frags)
889 		kfree_skb(skb);
890 
891 	if (err == -EOVERFLOW) {
892 		/* Drop the packet */
893 		xsk_inc_num_desc(xs->skb);
894 		xsk_drop_skb(xs->skb);
895 		xskq_cons_release(xs->tx);
896 	} else {
897 		/* Let application retry */
898 		xsk_cq_cancel_locked(xs->pool, 1);
899 	}
900 
901 	return ERR_PTR(err);
902 }
903 
__xsk_generic_xmit(struct sock * sk)904 static int __xsk_generic_xmit(struct sock *sk)
905 {
906 	struct xdp_sock *xs = xdp_sk(sk);
907 	bool sent_frame = false;
908 	struct xdp_desc desc;
909 	struct sk_buff *skb;
910 	u32 max_batch;
911 	int err = 0;
912 
913 	mutex_lock(&xs->mutex);
914 
915 	/* Since we dropped the RCU read lock, the socket state might have changed. */
916 	if (unlikely(!xsk_is_bound(xs))) {
917 		err = -ENXIO;
918 		goto out;
919 	}
920 
921 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
922 		goto out;
923 
924 	max_batch = READ_ONCE(xs->max_tx_budget);
925 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
926 		if (max_batch-- == 0) {
927 			err = -EAGAIN;
928 			goto out;
929 		}
930 
931 		/* This is the backpressure mechanism for the Tx path.
932 		 * Reserve space in the completion queue and only proceed
933 		 * if there is space in it. This avoids having to implement
934 		 * any buffering in the Tx path.
935 		 */
936 		err = xsk_cq_reserve_locked(xs->pool);
937 		if (err) {
938 			err = -EAGAIN;
939 			goto out;
940 		}
941 
942 		skb = xsk_build_skb(xs, &desc);
943 		if (IS_ERR(skb)) {
944 			err = PTR_ERR(skb);
945 			if (err != -EOVERFLOW)
946 				goto out;
947 			err = 0;
948 			continue;
949 		}
950 
951 		xskq_cons_release(xs->tx);
952 
953 		if (xp_mb_desc(&desc)) {
954 			xs->skb = skb;
955 			continue;
956 		}
957 
958 		err = __dev_direct_xmit(skb, xs->queue_id);
959 		if  (err == NETDEV_TX_BUSY) {
960 			/* Tell user-space to retry the send */
961 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
962 			xsk_consume_skb(skb);
963 			err = -EAGAIN;
964 			goto out;
965 		}
966 
967 		/* Ignore NET_XMIT_CN as packet might have been sent */
968 		if (err == NET_XMIT_DROP) {
969 			/* SKB completed but not sent */
970 			err = -EBUSY;
971 			xs->skb = NULL;
972 			goto out;
973 		}
974 
975 		sent_frame = true;
976 		xs->skb = NULL;
977 	}
978 
979 	if (xskq_has_descs(xs->tx)) {
980 		if (xs->skb)
981 			xsk_drop_skb(xs->skb);
982 		xskq_cons_release(xs->tx);
983 	}
984 
985 out:
986 	if (sent_frame)
987 		__xsk_tx_release(xs);
988 
989 	mutex_unlock(&xs->mutex);
990 	return err;
991 }
992 
xsk_generic_xmit(struct sock * sk)993 static int xsk_generic_xmit(struct sock *sk)
994 {
995 	int ret;
996 
997 	/* Drop the RCU lock since the SKB path might sleep. */
998 	rcu_read_unlock();
999 	ret = __xsk_generic_xmit(sk);
1000 	/* Reaquire RCU lock before going into common code. */
1001 	rcu_read_lock();
1002 
1003 	return ret;
1004 }
1005 
xsk_no_wakeup(struct sock * sk)1006 static bool xsk_no_wakeup(struct sock *sk)
1007 {
1008 #ifdef CONFIG_NET_RX_BUSY_POLL
1009 	/* Prefer busy-polling, skip the wakeup. */
1010 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
1011 		napi_id_valid(READ_ONCE(sk->sk_napi_id));
1012 #else
1013 	return false;
1014 #endif
1015 }
1016 
xsk_check_common(struct xdp_sock * xs)1017 static int xsk_check_common(struct xdp_sock *xs)
1018 {
1019 	if (unlikely(!xsk_is_bound(xs)))
1020 		return -ENXIO;
1021 	if (unlikely(!(xs->dev->flags & IFF_UP)))
1022 		return -ENETDOWN;
1023 
1024 	return 0;
1025 }
1026 
__xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1027 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1028 {
1029 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
1030 	struct sock *sk = sock->sk;
1031 	struct xdp_sock *xs = xdp_sk(sk);
1032 	struct xsk_buff_pool *pool;
1033 	int err;
1034 
1035 	err = xsk_check_common(xs);
1036 	if (err)
1037 		return err;
1038 	if (unlikely(need_wait))
1039 		return -EOPNOTSUPP;
1040 	if (unlikely(!xs->tx))
1041 		return -ENOBUFS;
1042 
1043 	if (sk_can_busy_loop(sk))
1044 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1045 
1046 	if (xs->zc && xsk_no_wakeup(sk))
1047 		return 0;
1048 
1049 	pool = xs->pool;
1050 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
1051 		if (xs->zc)
1052 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
1053 		return xsk_generic_xmit(sk);
1054 	}
1055 	return 0;
1056 }
1057 
xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1058 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1059 {
1060 	int ret;
1061 
1062 	rcu_read_lock();
1063 	ret = __xsk_sendmsg(sock, m, total_len);
1064 	rcu_read_unlock();
1065 
1066 	return ret;
1067 }
1068 
__xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)1069 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1070 {
1071 	bool need_wait = !(flags & MSG_DONTWAIT);
1072 	struct sock *sk = sock->sk;
1073 	struct xdp_sock *xs = xdp_sk(sk);
1074 	int err;
1075 
1076 	err = xsk_check_common(xs);
1077 	if (err)
1078 		return err;
1079 	if (unlikely(!xs->rx))
1080 		return -ENOBUFS;
1081 	if (unlikely(need_wait))
1082 		return -EOPNOTSUPP;
1083 
1084 	if (sk_can_busy_loop(sk))
1085 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1086 
1087 	if (xsk_no_wakeup(sk))
1088 		return 0;
1089 
1090 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
1091 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
1092 	return 0;
1093 }
1094 
xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)1095 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1096 {
1097 	int ret;
1098 
1099 	rcu_read_lock();
1100 	ret = __xsk_recvmsg(sock, m, len, flags);
1101 	rcu_read_unlock();
1102 
1103 	return ret;
1104 }
1105 
xsk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)1106 static __poll_t xsk_poll(struct file *file, struct socket *sock,
1107 			     struct poll_table_struct *wait)
1108 {
1109 	__poll_t mask = 0;
1110 	struct sock *sk = sock->sk;
1111 	struct xdp_sock *xs = xdp_sk(sk);
1112 	struct xsk_buff_pool *pool;
1113 
1114 	sock_poll_wait(file, sock, wait);
1115 
1116 	rcu_read_lock();
1117 	if (xsk_check_common(xs))
1118 		goto out;
1119 
1120 	pool = xs->pool;
1121 
1122 	if (pool->cached_need_wakeup) {
1123 		if (xs->zc)
1124 			xsk_wakeup(xs, pool->cached_need_wakeup);
1125 		else if (xs->tx)
1126 			/* Poll needs to drive Tx also in copy mode */
1127 			xsk_generic_xmit(sk);
1128 	}
1129 
1130 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
1131 		mask |= EPOLLIN | EPOLLRDNORM;
1132 	if (xs->tx && xsk_tx_writeable(xs))
1133 		mask |= EPOLLOUT | EPOLLWRNORM;
1134 out:
1135 	rcu_read_unlock();
1136 	return mask;
1137 }
1138 
xsk_init_queue(u32 entries,struct xsk_queue ** queue,bool umem_queue)1139 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1140 			  bool umem_queue)
1141 {
1142 	struct xsk_queue *q;
1143 
1144 	if (entries == 0 || *queue || !is_power_of_2(entries))
1145 		return -EINVAL;
1146 
1147 	q = xskq_create(entries, umem_queue);
1148 	if (!q)
1149 		return -ENOMEM;
1150 
1151 	/* Make sure queue is ready before it can be seen by others */
1152 	smp_wmb();
1153 	WRITE_ONCE(*queue, q);
1154 	return 0;
1155 }
1156 
xsk_unbind_dev(struct xdp_sock * xs)1157 static void xsk_unbind_dev(struct xdp_sock *xs)
1158 {
1159 	struct net_device *dev = xs->dev;
1160 
1161 	if (xs->state != XSK_BOUND)
1162 		return;
1163 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1164 
1165 	/* Wait for driver to stop using the xdp socket. */
1166 	xp_del_xsk(xs->pool, xs);
1167 	synchronize_net();
1168 	dev_put(dev);
1169 }
1170 
xsk_get_map_list_entry(struct xdp_sock * xs,struct xdp_sock __rcu *** map_entry)1171 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1172 					      struct xdp_sock __rcu ***map_entry)
1173 {
1174 	struct xsk_map *map = NULL;
1175 	struct xsk_map_node *node;
1176 
1177 	*map_entry = NULL;
1178 
1179 	spin_lock_bh(&xs->map_list_lock);
1180 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1181 					node);
1182 	if (node) {
1183 		bpf_map_inc(&node->map->map);
1184 		map = node->map;
1185 		*map_entry = node->map_entry;
1186 	}
1187 	spin_unlock_bh(&xs->map_list_lock);
1188 	return map;
1189 }
1190 
xsk_delete_from_maps(struct xdp_sock * xs)1191 static void xsk_delete_from_maps(struct xdp_sock *xs)
1192 {
1193 	/* This function removes the current XDP socket from all the
1194 	 * maps it resides in. We need to take extra care here, due to
1195 	 * the two locks involved. Each map has a lock synchronizing
1196 	 * updates to the entries, and each socket has a lock that
1197 	 * synchronizes access to the list of maps (map_list). For
1198 	 * deadlock avoidance the locks need to be taken in the order
1199 	 * "map lock"->"socket map list lock". We start off by
1200 	 * accessing the socket map list, and take a reference to the
1201 	 * map to guarantee existence between the
1202 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1203 	 * calls. Then we ask the map to remove the socket, which
1204 	 * tries to remove the socket from the map. Note that there
1205 	 * might be updates to the map between
1206 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1207 	 */
1208 	struct xdp_sock __rcu **map_entry = NULL;
1209 	struct xsk_map *map;
1210 
1211 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1212 		xsk_map_try_sock_delete(map, xs, map_entry);
1213 		bpf_map_put(&map->map);
1214 	}
1215 }
1216 
xsk_release(struct socket * sock)1217 static int xsk_release(struct socket *sock)
1218 {
1219 	struct sock *sk = sock->sk;
1220 	struct xdp_sock *xs = xdp_sk(sk);
1221 	struct net *net;
1222 
1223 	if (!sk)
1224 		return 0;
1225 
1226 	net = sock_net(sk);
1227 
1228 	if (xs->skb)
1229 		xsk_drop_skb(xs->skb);
1230 
1231 	mutex_lock(&net->xdp.lock);
1232 	sk_del_node_init_rcu(sk);
1233 	mutex_unlock(&net->xdp.lock);
1234 
1235 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1236 
1237 	xsk_delete_from_maps(xs);
1238 	mutex_lock(&xs->mutex);
1239 	xsk_unbind_dev(xs);
1240 	mutex_unlock(&xs->mutex);
1241 
1242 	xskq_destroy(xs->rx);
1243 	xskq_destroy(xs->tx);
1244 	xskq_destroy(xs->fq_tmp);
1245 	xskq_destroy(xs->cq_tmp);
1246 
1247 	sock_orphan(sk);
1248 	sock->sk = NULL;
1249 
1250 	sock_put(sk);
1251 
1252 	return 0;
1253 }
1254 
xsk_lookup_xsk_from_fd(int fd)1255 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1256 {
1257 	struct socket *sock;
1258 	int err;
1259 
1260 	sock = sockfd_lookup(fd, &err);
1261 	if (!sock)
1262 		return ERR_PTR(-ENOTSOCK);
1263 
1264 	if (sock->sk->sk_family != PF_XDP) {
1265 		sockfd_put(sock);
1266 		return ERR_PTR(-ENOPROTOOPT);
1267 	}
1268 
1269 	return sock;
1270 }
1271 
xsk_validate_queues(struct xdp_sock * xs)1272 static bool xsk_validate_queues(struct xdp_sock *xs)
1273 {
1274 	return xs->fq_tmp && xs->cq_tmp;
1275 }
1276 
xsk_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1277 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1278 {
1279 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1280 	struct sock *sk = sock->sk;
1281 	struct xdp_sock *xs = xdp_sk(sk);
1282 	struct net_device *dev;
1283 	int bound_dev_if;
1284 	u32 flags, qid;
1285 	int err = 0;
1286 
1287 	if (addr_len < sizeof(struct sockaddr_xdp))
1288 		return -EINVAL;
1289 	if (sxdp->sxdp_family != AF_XDP)
1290 		return -EINVAL;
1291 
1292 	flags = sxdp->sxdp_flags;
1293 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1294 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1295 		return -EINVAL;
1296 
1297 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1298 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1299 		return -EINVAL;
1300 
1301 	rtnl_lock();
1302 	mutex_lock(&xs->mutex);
1303 	if (xs->state != XSK_READY) {
1304 		err = -EBUSY;
1305 		goto out_release;
1306 	}
1307 
1308 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1309 	if (!dev) {
1310 		err = -ENODEV;
1311 		goto out_release;
1312 	}
1313 
1314 	netdev_lock_ops(dev);
1315 
1316 	if (!xs->rx && !xs->tx) {
1317 		err = -EINVAL;
1318 		goto out_unlock;
1319 	}
1320 
1321 	qid = sxdp->sxdp_queue_id;
1322 
1323 	if (flags & XDP_SHARED_UMEM) {
1324 		struct xdp_sock *umem_xs;
1325 		struct socket *sock;
1326 
1327 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1328 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1329 			/* Cannot specify flags for shared sockets. */
1330 			err = -EINVAL;
1331 			goto out_unlock;
1332 		}
1333 
1334 		if (xs->umem) {
1335 			/* We have already our own. */
1336 			err = -EINVAL;
1337 			goto out_unlock;
1338 		}
1339 
1340 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1341 		if (IS_ERR(sock)) {
1342 			err = PTR_ERR(sock);
1343 			goto out_unlock;
1344 		}
1345 
1346 		umem_xs = xdp_sk(sock->sk);
1347 		if (!xsk_is_bound(umem_xs)) {
1348 			err = -EBADF;
1349 			sockfd_put(sock);
1350 			goto out_unlock;
1351 		}
1352 
1353 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1354 			/* Share the umem with another socket on another qid
1355 			 * and/or device.
1356 			 */
1357 			xs->pool = xp_create_and_assign_umem(xs,
1358 							     umem_xs->umem);
1359 			if (!xs->pool) {
1360 				err = -ENOMEM;
1361 				sockfd_put(sock);
1362 				goto out_unlock;
1363 			}
1364 
1365 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1366 						   qid);
1367 			if (err) {
1368 				xp_destroy(xs->pool);
1369 				xs->pool = NULL;
1370 				sockfd_put(sock);
1371 				goto out_unlock;
1372 			}
1373 		} else {
1374 			/* Share the buffer pool with the other socket. */
1375 			if (xs->fq_tmp || xs->cq_tmp) {
1376 				/* Do not allow setting your own fq or cq. */
1377 				err = -EINVAL;
1378 				sockfd_put(sock);
1379 				goto out_unlock;
1380 			}
1381 
1382 			xp_get_pool(umem_xs->pool);
1383 			xs->pool = umem_xs->pool;
1384 
1385 			/* If underlying shared umem was created without Tx
1386 			 * ring, allocate Tx descs array that Tx batching API
1387 			 * utilizes
1388 			 */
1389 			if (xs->tx && !xs->pool->tx_descs) {
1390 				err = xp_alloc_tx_descs(xs->pool, xs);
1391 				if (err) {
1392 					xp_put_pool(xs->pool);
1393 					xs->pool = NULL;
1394 					sockfd_put(sock);
1395 					goto out_unlock;
1396 				}
1397 			}
1398 		}
1399 
1400 		xdp_get_umem(umem_xs->umem);
1401 		WRITE_ONCE(xs->umem, umem_xs->umem);
1402 		sockfd_put(sock);
1403 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1404 		err = -EINVAL;
1405 		goto out_unlock;
1406 	} else {
1407 		/* This xsk has its own umem. */
1408 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1409 		if (!xs->pool) {
1410 			err = -ENOMEM;
1411 			goto out_unlock;
1412 		}
1413 
1414 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1415 		if (err) {
1416 			xp_destroy(xs->pool);
1417 			xs->pool = NULL;
1418 			goto out_unlock;
1419 		}
1420 	}
1421 
1422 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1423 	xs->fq_tmp = NULL;
1424 	xs->cq_tmp = NULL;
1425 
1426 	xs->dev = dev;
1427 	xs->zc = xs->umem->zc;
1428 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1429 	xs->queue_id = qid;
1430 	xp_add_xsk(xs->pool, xs);
1431 
1432 	if (qid < dev->real_num_rx_queues) {
1433 		struct netdev_rx_queue *rxq;
1434 
1435 		rxq = __netif_get_rx_queue(dev, qid);
1436 		if (rxq->napi)
1437 			__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1438 	}
1439 
1440 out_unlock:
1441 	if (err) {
1442 		dev_put(dev);
1443 	} else {
1444 		/* Matches smp_rmb() in bind() for shared umem
1445 		 * sockets, and xsk_is_bound().
1446 		 */
1447 		smp_wmb();
1448 		WRITE_ONCE(xs->state, XSK_BOUND);
1449 	}
1450 	netdev_unlock_ops(dev);
1451 out_release:
1452 	mutex_unlock(&xs->mutex);
1453 	rtnl_unlock();
1454 	return err;
1455 }
1456 
1457 struct xdp_umem_reg_v1 {
1458 	__u64 addr; /* Start of packet data area */
1459 	__u64 len; /* Length of packet data area */
1460 	__u32 chunk_size;
1461 	__u32 headroom;
1462 };
1463 
xsk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1464 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1465 			  sockptr_t optval, unsigned int optlen)
1466 {
1467 	struct sock *sk = sock->sk;
1468 	struct xdp_sock *xs = xdp_sk(sk);
1469 	int err;
1470 
1471 	if (level != SOL_XDP)
1472 		return -ENOPROTOOPT;
1473 
1474 	switch (optname) {
1475 	case XDP_RX_RING:
1476 	case XDP_TX_RING:
1477 	{
1478 		struct xsk_queue **q;
1479 		int entries;
1480 
1481 		if (optlen < sizeof(entries))
1482 			return -EINVAL;
1483 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1484 			return -EFAULT;
1485 
1486 		mutex_lock(&xs->mutex);
1487 		if (xs->state != XSK_READY) {
1488 			mutex_unlock(&xs->mutex);
1489 			return -EBUSY;
1490 		}
1491 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1492 		err = xsk_init_queue(entries, q, false);
1493 		if (!err && optname == XDP_TX_RING)
1494 			/* Tx needs to be explicitly woken up the first time */
1495 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1496 		mutex_unlock(&xs->mutex);
1497 		return err;
1498 	}
1499 	case XDP_UMEM_REG:
1500 	{
1501 		size_t mr_size = sizeof(struct xdp_umem_reg);
1502 		struct xdp_umem_reg mr = {};
1503 		struct xdp_umem *umem;
1504 
1505 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1506 			return -EINVAL;
1507 		else if (optlen < sizeof(mr))
1508 			mr_size = sizeof(struct xdp_umem_reg_v1);
1509 
1510 		BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
1511 
1512 		/* Make sure the last field of the struct doesn't have
1513 		 * uninitialized padding. All padding has to be explicit
1514 		 * and has to be set to zero by the userspace to make
1515 		 * struct xdp_umem_reg extensible in the future.
1516 		 */
1517 		BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
1518 			     sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
1519 			     sizeof(struct xdp_umem_reg));
1520 
1521 		if (copy_from_sockptr(&mr, optval, mr_size))
1522 			return -EFAULT;
1523 
1524 		mutex_lock(&xs->mutex);
1525 		if (xs->state != XSK_READY || xs->umem) {
1526 			mutex_unlock(&xs->mutex);
1527 			return -EBUSY;
1528 		}
1529 
1530 		umem = xdp_umem_create(&mr);
1531 		if (IS_ERR(umem)) {
1532 			mutex_unlock(&xs->mutex);
1533 			return PTR_ERR(umem);
1534 		}
1535 
1536 		/* Make sure umem is ready before it can be seen by others */
1537 		smp_wmb();
1538 		WRITE_ONCE(xs->umem, umem);
1539 		mutex_unlock(&xs->mutex);
1540 		return 0;
1541 	}
1542 	case XDP_UMEM_FILL_RING:
1543 	case XDP_UMEM_COMPLETION_RING:
1544 	{
1545 		struct xsk_queue **q;
1546 		int entries;
1547 
1548 		if (optlen < sizeof(entries))
1549 			return -EINVAL;
1550 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1551 			return -EFAULT;
1552 
1553 		mutex_lock(&xs->mutex);
1554 		if (xs->state != XSK_READY) {
1555 			mutex_unlock(&xs->mutex);
1556 			return -EBUSY;
1557 		}
1558 
1559 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1560 			&xs->cq_tmp;
1561 		err = xsk_init_queue(entries, q, true);
1562 		mutex_unlock(&xs->mutex);
1563 		return err;
1564 	}
1565 	case XDP_MAX_TX_SKB_BUDGET:
1566 	{
1567 		unsigned int budget;
1568 
1569 		if (optlen != sizeof(budget))
1570 			return -EINVAL;
1571 		if (copy_from_sockptr(&budget, optval, sizeof(budget)))
1572 			return -EFAULT;
1573 		if (!xs->tx ||
1574 		    budget < TX_BATCH_SIZE || budget > xs->tx->nentries)
1575 			return -EACCES;
1576 
1577 		WRITE_ONCE(xs->max_tx_budget, budget);
1578 		return 0;
1579 	}
1580 	default:
1581 		break;
1582 	}
1583 
1584 	return -ENOPROTOOPT;
1585 }
1586 
xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 * ring)1587 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1588 {
1589 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1590 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1591 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1592 }
1593 
xsk_enter_umem_offsets(struct xdp_ring_offset_v1 * ring)1594 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1595 {
1596 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1597 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1598 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1599 }
1600 
1601 struct xdp_statistics_v1 {
1602 	__u64 rx_dropped;
1603 	__u64 rx_invalid_descs;
1604 	__u64 tx_invalid_descs;
1605 };
1606 
xsk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1607 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1608 			  char __user *optval, int __user *optlen)
1609 {
1610 	struct sock *sk = sock->sk;
1611 	struct xdp_sock *xs = xdp_sk(sk);
1612 	int len;
1613 
1614 	if (level != SOL_XDP)
1615 		return -ENOPROTOOPT;
1616 
1617 	if (get_user(len, optlen))
1618 		return -EFAULT;
1619 	if (len < 0)
1620 		return -EINVAL;
1621 
1622 	switch (optname) {
1623 	case XDP_STATISTICS:
1624 	{
1625 		struct xdp_statistics stats = {};
1626 		bool extra_stats = true;
1627 		size_t stats_size;
1628 
1629 		if (len < sizeof(struct xdp_statistics_v1)) {
1630 			return -EINVAL;
1631 		} else if (len < sizeof(stats)) {
1632 			extra_stats = false;
1633 			stats_size = sizeof(struct xdp_statistics_v1);
1634 		} else {
1635 			stats_size = sizeof(stats);
1636 		}
1637 
1638 		mutex_lock(&xs->mutex);
1639 		stats.rx_dropped = xs->rx_dropped;
1640 		if (extra_stats) {
1641 			stats.rx_ring_full = xs->rx_queue_full;
1642 			stats.rx_fill_ring_empty_descs =
1643 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1644 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1645 		} else {
1646 			stats.rx_dropped += xs->rx_queue_full;
1647 		}
1648 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1649 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1650 		mutex_unlock(&xs->mutex);
1651 
1652 		if (copy_to_user(optval, &stats, stats_size))
1653 			return -EFAULT;
1654 		if (put_user(stats_size, optlen))
1655 			return -EFAULT;
1656 
1657 		return 0;
1658 	}
1659 	case XDP_MMAP_OFFSETS:
1660 	{
1661 		struct xdp_mmap_offsets off;
1662 		struct xdp_mmap_offsets_v1 off_v1;
1663 		bool flags_supported = true;
1664 		void *to_copy;
1665 
1666 		if (len < sizeof(off_v1))
1667 			return -EINVAL;
1668 		else if (len < sizeof(off))
1669 			flags_supported = false;
1670 
1671 		if (flags_supported) {
1672 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1673 			 * except for the flags field added to the end.
1674 			 */
1675 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1676 					       &off.rx);
1677 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1678 					       &off.tx);
1679 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1680 					       &off.fr);
1681 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1682 					       &off.cr);
1683 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1684 						ptrs.flags);
1685 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1686 						ptrs.flags);
1687 			off.fr.flags = offsetof(struct xdp_umem_ring,
1688 						ptrs.flags);
1689 			off.cr.flags = offsetof(struct xdp_umem_ring,
1690 						ptrs.flags);
1691 
1692 			len = sizeof(off);
1693 			to_copy = &off;
1694 		} else {
1695 			xsk_enter_rxtx_offsets(&off_v1.rx);
1696 			xsk_enter_rxtx_offsets(&off_v1.tx);
1697 			xsk_enter_umem_offsets(&off_v1.fr);
1698 			xsk_enter_umem_offsets(&off_v1.cr);
1699 
1700 			len = sizeof(off_v1);
1701 			to_copy = &off_v1;
1702 		}
1703 
1704 		if (copy_to_user(optval, to_copy, len))
1705 			return -EFAULT;
1706 		if (put_user(len, optlen))
1707 			return -EFAULT;
1708 
1709 		return 0;
1710 	}
1711 	case XDP_OPTIONS:
1712 	{
1713 		struct xdp_options opts = {};
1714 
1715 		if (len < sizeof(opts))
1716 			return -EINVAL;
1717 
1718 		mutex_lock(&xs->mutex);
1719 		if (xs->zc)
1720 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1721 		mutex_unlock(&xs->mutex);
1722 
1723 		len = sizeof(opts);
1724 		if (copy_to_user(optval, &opts, len))
1725 			return -EFAULT;
1726 		if (put_user(len, optlen))
1727 			return -EFAULT;
1728 
1729 		return 0;
1730 	}
1731 	default:
1732 		break;
1733 	}
1734 
1735 	return -EOPNOTSUPP;
1736 }
1737 
xsk_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)1738 static int xsk_mmap(struct file *file, struct socket *sock,
1739 		    struct vm_area_struct *vma)
1740 {
1741 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1742 	unsigned long size = vma->vm_end - vma->vm_start;
1743 	struct xdp_sock *xs = xdp_sk(sock->sk);
1744 	int state = READ_ONCE(xs->state);
1745 	struct xsk_queue *q = NULL;
1746 
1747 	if (state != XSK_READY && state != XSK_BOUND)
1748 		return -EBUSY;
1749 
1750 	if (offset == XDP_PGOFF_RX_RING) {
1751 		q = READ_ONCE(xs->rx);
1752 	} else if (offset == XDP_PGOFF_TX_RING) {
1753 		q = READ_ONCE(xs->tx);
1754 	} else {
1755 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1756 		smp_rmb();
1757 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1758 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1759 						 READ_ONCE(xs->pool->fq);
1760 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1761 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1762 						 READ_ONCE(xs->pool->cq);
1763 	}
1764 
1765 	if (!q)
1766 		return -EINVAL;
1767 
1768 	/* Matches the smp_wmb() in xsk_init_queue */
1769 	smp_rmb();
1770 	if (size > q->ring_vmalloc_size)
1771 		return -EINVAL;
1772 
1773 	return remap_vmalloc_range(vma, q->ring, 0);
1774 }
1775 
xsk_notifier(struct notifier_block * this,unsigned long msg,void * ptr)1776 static int xsk_notifier(struct notifier_block *this,
1777 			unsigned long msg, void *ptr)
1778 {
1779 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1780 	struct net *net = dev_net(dev);
1781 	struct sock *sk;
1782 
1783 	switch (msg) {
1784 	case NETDEV_UNREGISTER:
1785 		mutex_lock(&net->xdp.lock);
1786 		sk_for_each(sk, &net->xdp.list) {
1787 			struct xdp_sock *xs = xdp_sk(sk);
1788 
1789 			mutex_lock(&xs->mutex);
1790 			if (xs->dev == dev) {
1791 				sk->sk_err = ENETDOWN;
1792 				if (!sock_flag(sk, SOCK_DEAD))
1793 					sk_error_report(sk);
1794 
1795 				xsk_unbind_dev(xs);
1796 
1797 				/* Clear device references. */
1798 				xp_clear_dev(xs->pool);
1799 			}
1800 			mutex_unlock(&xs->mutex);
1801 		}
1802 		mutex_unlock(&net->xdp.lock);
1803 		break;
1804 	}
1805 	return NOTIFY_DONE;
1806 }
1807 
1808 static struct proto xsk_proto = {
1809 	.name =		"XDP",
1810 	.owner =	THIS_MODULE,
1811 	.obj_size =	sizeof(struct xdp_sock),
1812 };
1813 
1814 static const struct proto_ops xsk_proto_ops = {
1815 	.family		= PF_XDP,
1816 	.owner		= THIS_MODULE,
1817 	.release	= xsk_release,
1818 	.bind		= xsk_bind,
1819 	.connect	= sock_no_connect,
1820 	.socketpair	= sock_no_socketpair,
1821 	.accept		= sock_no_accept,
1822 	.getname	= sock_no_getname,
1823 	.poll		= xsk_poll,
1824 	.ioctl		= sock_no_ioctl,
1825 	.listen		= sock_no_listen,
1826 	.shutdown	= sock_no_shutdown,
1827 	.setsockopt	= xsk_setsockopt,
1828 	.getsockopt	= xsk_getsockopt,
1829 	.sendmsg	= xsk_sendmsg,
1830 	.recvmsg	= xsk_recvmsg,
1831 	.mmap		= xsk_mmap,
1832 };
1833 
xsk_destruct(struct sock * sk)1834 static void xsk_destruct(struct sock *sk)
1835 {
1836 	struct xdp_sock *xs = xdp_sk(sk);
1837 
1838 	if (!sock_flag(sk, SOCK_DEAD))
1839 		return;
1840 
1841 	if (!xp_put_pool(xs->pool))
1842 		xdp_put_umem(xs->umem, !xs->pool);
1843 }
1844 
xsk_create(struct net * net,struct socket * sock,int protocol,int kern)1845 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1846 		      int kern)
1847 {
1848 	struct xdp_sock *xs;
1849 	struct sock *sk;
1850 
1851 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1852 		return -EPERM;
1853 	if (sock->type != SOCK_RAW)
1854 		return -ESOCKTNOSUPPORT;
1855 
1856 	if (protocol)
1857 		return -EPROTONOSUPPORT;
1858 
1859 	sock->state = SS_UNCONNECTED;
1860 
1861 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1862 	if (!sk)
1863 		return -ENOBUFS;
1864 
1865 	sock->ops = &xsk_proto_ops;
1866 
1867 	sock_init_data(sock, sk);
1868 
1869 	sk->sk_family = PF_XDP;
1870 
1871 	sk->sk_destruct = xsk_destruct;
1872 
1873 	sock_set_flag(sk, SOCK_RCU_FREE);
1874 
1875 	xs = xdp_sk(sk);
1876 	xs->state = XSK_READY;
1877 	xs->max_tx_budget = TX_BATCH_SIZE;
1878 	mutex_init(&xs->mutex);
1879 
1880 	INIT_LIST_HEAD(&xs->map_list);
1881 	spin_lock_init(&xs->map_list_lock);
1882 
1883 	mutex_lock(&net->xdp.lock);
1884 	sk_add_node_rcu(sk, &net->xdp.list);
1885 	mutex_unlock(&net->xdp.lock);
1886 
1887 	sock_prot_inuse_add(net, &xsk_proto, 1);
1888 
1889 	return 0;
1890 }
1891 
1892 static const struct net_proto_family xsk_family_ops = {
1893 	.family = PF_XDP,
1894 	.create = xsk_create,
1895 	.owner	= THIS_MODULE,
1896 };
1897 
1898 static struct notifier_block xsk_netdev_notifier = {
1899 	.notifier_call	= xsk_notifier,
1900 };
1901 
xsk_net_init(struct net * net)1902 static int __net_init xsk_net_init(struct net *net)
1903 {
1904 	mutex_init(&net->xdp.lock);
1905 	INIT_HLIST_HEAD(&net->xdp.list);
1906 	return 0;
1907 }
1908 
xsk_net_exit(struct net * net)1909 static void __net_exit xsk_net_exit(struct net *net)
1910 {
1911 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1912 }
1913 
1914 static struct pernet_operations xsk_net_ops = {
1915 	.init = xsk_net_init,
1916 	.exit = xsk_net_exit,
1917 };
1918 
xsk_init(void)1919 static int __init xsk_init(void)
1920 {
1921 	int err;
1922 
1923 	err = proto_register(&xsk_proto, 0 /* no slab */);
1924 	if (err)
1925 		goto out;
1926 
1927 	err = sock_register(&xsk_family_ops);
1928 	if (err)
1929 		goto out_proto;
1930 
1931 	err = register_pernet_subsys(&xsk_net_ops);
1932 	if (err)
1933 		goto out_sk;
1934 
1935 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1936 	if (err)
1937 		goto out_pernet;
1938 
1939 	xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
1940 						 sizeof(struct xsk_addrs),
1941 						 0, SLAB_HWCACHE_ALIGN, NULL);
1942 	if (!xsk_tx_generic_cache) {
1943 		err = -ENOMEM;
1944 		goto out_unreg_notif;
1945 	}
1946 
1947 	return 0;
1948 
1949 out_unreg_notif:
1950 	unregister_netdevice_notifier(&xsk_netdev_notifier);
1951 out_pernet:
1952 	unregister_pernet_subsys(&xsk_net_ops);
1953 out_sk:
1954 	sock_unregister(PF_XDP);
1955 out_proto:
1956 	proto_unregister(&xsk_proto);
1957 out:
1958 	return err;
1959 }
1960 
1961 fs_initcall(xsk_init);
1962