xref: /linux/net/xdp/xsk.c (revision abacaf559950eec0d99d37ff6b92049409af5943)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_lock.h>
29 #include <net/netdev_rx_queue.h>
30 #include <net/xdp.h>
31 
32 #include "xsk_queue.h"
33 #include "xdp_umem.h"
34 #include "xsk.h"
35 
36 #define TX_BATCH_SIZE 32
37 #define MAX_PER_SOCKET_BUDGET 32
38 
39 struct xsk_addrs {
40 	u32 num_descs;
41 	u64 addrs[MAX_SKB_FRAGS + 1];
42 };
43 
44 static struct kmem_cache *xsk_tx_generic_cache;
45 
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)46 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
47 {
48 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
49 		return;
50 
51 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
52 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
53 }
54 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
55 
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)56 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
57 {
58 	struct xdp_sock *xs;
59 
60 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
61 		return;
62 
63 	rcu_read_lock();
64 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
65 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
66 	}
67 	rcu_read_unlock();
68 
69 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
70 }
71 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
72 
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)73 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
74 {
75 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
76 		return;
77 
78 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
79 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
80 }
81 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
82 
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)83 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
84 {
85 	struct xdp_sock *xs;
86 
87 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
88 		return;
89 
90 	rcu_read_lock();
91 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
92 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
93 	}
94 	rcu_read_unlock();
95 
96 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
97 }
98 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
99 
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)100 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
101 {
102 	return pool->uses_need_wakeup;
103 }
104 EXPORT_SYMBOL(xsk_uses_need_wakeup);
105 
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)106 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
107 					    u16 queue_id)
108 {
109 	if (queue_id < dev->real_num_rx_queues)
110 		return dev->_rx[queue_id].pool;
111 	if (queue_id < dev->real_num_tx_queues)
112 		return dev->_tx[queue_id].pool;
113 
114 	return NULL;
115 }
116 EXPORT_SYMBOL(xsk_get_pool_from_qid);
117 
xsk_clear_pool_at_qid(struct net_device * dev,u16 queue_id)118 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
119 {
120 	if (queue_id < dev->num_rx_queues)
121 		dev->_rx[queue_id].pool = NULL;
122 	if (queue_id < dev->num_tx_queues)
123 		dev->_tx[queue_id].pool = NULL;
124 }
125 
126 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
127  * not know if the device has more tx queues than rx, or the opposite.
128  * This might also change during run time.
129  */
xsk_reg_pool_at_qid(struct net_device * dev,struct xsk_buff_pool * pool,u16 queue_id)130 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
131 			u16 queue_id)
132 {
133 	if (queue_id >= max_t(unsigned int,
134 			      dev->real_num_rx_queues,
135 			      dev->real_num_tx_queues))
136 		return -EINVAL;
137 
138 	if (queue_id < dev->real_num_rx_queues)
139 		dev->_rx[queue_id].pool = pool;
140 	if (queue_id < dev->real_num_tx_queues)
141 		dev->_tx[queue_id].pool = pool;
142 
143 	return 0;
144 }
145 
__xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff_xsk * xskb,u32 len,u32 flags)146 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
147 			u32 flags)
148 {
149 	u64 addr;
150 	int err;
151 
152 	addr = xp_get_handle(xskb, xskb->pool);
153 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
154 	if (err) {
155 		xs->rx_queue_full++;
156 		return err;
157 	}
158 
159 	xp_release(xskb);
160 	return 0;
161 }
162 
xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)163 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
164 {
165 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
166 	u32 frags = xdp_buff_has_frags(xdp);
167 	struct xdp_buff_xsk *pos, *tmp;
168 	struct list_head *xskb_list;
169 	u32 contd = 0;
170 	u32 num_desc;
171 	int err;
172 
173 	if (likely(!frags)) {
174 		err = __xsk_rcv_zc(xs, xskb, len, contd);
175 		if (err)
176 			goto err;
177 		return 0;
178 	}
179 
180 	contd = XDP_PKT_CONTD;
181 	num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
182 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
183 		xs->rx_queue_full++;
184 		err = -ENOBUFS;
185 		goto err;
186 	}
187 
188 	__xsk_rcv_zc(xs, xskb, len, contd);
189 	xskb_list = &xskb->pool->xskb_list;
190 	list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
191 		if (list_is_singular(xskb_list))
192 			contd = 0;
193 		len = pos->xdp.data_end - pos->xdp.data;
194 		__xsk_rcv_zc(xs, pos, len, contd);
195 		list_del_init(&pos->list_node);
196 	}
197 
198 	return 0;
199 err:
200 	xsk_buff_free(xdp);
201 	return err;
202 }
203 
xsk_copy_xdp_start(struct xdp_buff * from)204 static void *xsk_copy_xdp_start(struct xdp_buff *from)
205 {
206 	if (unlikely(xdp_data_meta_unsupported(from)))
207 		return from->data;
208 	else
209 		return from->data_meta;
210 }
211 
xsk_copy_xdp(void * to,void ** from,u32 to_len,u32 * from_len,skb_frag_t ** frag,u32 rem)212 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
213 			u32 *from_len, skb_frag_t **frag, u32 rem)
214 {
215 	u32 copied = 0;
216 
217 	while (1) {
218 		u32 copy_len = min_t(u32, *from_len, to_len);
219 
220 		memcpy(to, *from, copy_len);
221 		copied += copy_len;
222 		if (rem == copied)
223 			return copied;
224 
225 		if (*from_len == copy_len) {
226 			*from = skb_frag_address(*frag);
227 			*from_len = skb_frag_size((*frag)++);
228 		} else {
229 			*from += copy_len;
230 			*from_len -= copy_len;
231 		}
232 		if (to_len == copy_len)
233 			return copied;
234 
235 		to_len -= copy_len;
236 		to += copy_len;
237 	}
238 }
239 
__xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)240 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
241 {
242 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
243 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
244 	u32 from_len, meta_len, rem, num_desc;
245 	struct xdp_buff_xsk *xskb;
246 	struct xdp_buff *xsk_xdp;
247 	skb_frag_t *frag;
248 
249 	from_len = xdp->data_end - copy_from;
250 	meta_len = xdp->data - copy_from;
251 	rem = len + meta_len;
252 
253 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
254 		int err;
255 
256 		xsk_xdp = xsk_buff_alloc(xs->pool);
257 		if (!xsk_xdp) {
258 			xs->rx_dropped++;
259 			return -ENOMEM;
260 		}
261 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
262 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
263 		err = __xsk_rcv_zc(xs, xskb, len, 0);
264 		if (err) {
265 			xsk_buff_free(xsk_xdp);
266 			return err;
267 		}
268 
269 		return 0;
270 	}
271 
272 	num_desc = (len - 1) / frame_size + 1;
273 
274 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
275 		xs->rx_dropped++;
276 		return -ENOMEM;
277 	}
278 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
279 		xs->rx_queue_full++;
280 		return -ENOBUFS;
281 	}
282 
283 	if (xdp_buff_has_frags(xdp)) {
284 		struct skb_shared_info *sinfo;
285 
286 		sinfo = xdp_get_shared_info_from_buff(xdp);
287 		frag =  &sinfo->frags[0];
288 	}
289 
290 	do {
291 		u32 to_len = frame_size + meta_len;
292 		u32 copied;
293 
294 		xsk_xdp = xsk_buff_alloc(xs->pool);
295 		copy_to = xsk_xdp->data - meta_len;
296 
297 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
298 		rem -= copied;
299 
300 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
301 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
302 		meta_len = 0;
303 	} while (rem);
304 
305 	return 0;
306 }
307 
xsk_tx_writeable(struct xdp_sock * xs)308 static bool xsk_tx_writeable(struct xdp_sock *xs)
309 {
310 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
311 		return false;
312 
313 	return true;
314 }
315 
__xsk_tx_release(struct xdp_sock * xs)316 static void __xsk_tx_release(struct xdp_sock *xs)
317 {
318 	__xskq_cons_release(xs->tx);
319 	if (xsk_tx_writeable(xs))
320 		xs->sk.sk_write_space(&xs->sk);
321 }
322 
xsk_is_bound(struct xdp_sock * xs)323 static bool xsk_is_bound(struct xdp_sock *xs)
324 {
325 	if (READ_ONCE(xs->state) == XSK_BOUND) {
326 		/* Matches smp_wmb() in bind(). */
327 		smp_rmb();
328 		return true;
329 	}
330 	return false;
331 }
332 
xsk_rcv_check(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)333 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
334 {
335 	if (!xsk_is_bound(xs))
336 		return -ENXIO;
337 
338 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
339 		return -EINVAL;
340 
341 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
342 		xs->rx_dropped++;
343 		return -ENOSPC;
344 	}
345 
346 	return 0;
347 }
348 
xsk_flush(struct xdp_sock * xs)349 static void xsk_flush(struct xdp_sock *xs)
350 {
351 	xskq_prod_submit(xs->rx);
352 	__xskq_cons_release(xs->pool->fq);
353 	sock_def_readable(&xs->sk);
354 }
355 
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)356 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
357 {
358 	u32 len = xdp_get_buff_len(xdp);
359 	int err;
360 
361 	err = xsk_rcv_check(xs, xdp, len);
362 	if (!err) {
363 		spin_lock_bh(&xs->pool->rx_lock);
364 		err = __xsk_rcv(xs, xdp, len);
365 		xsk_flush(xs);
366 		spin_unlock_bh(&xs->pool->rx_lock);
367 	}
368 
369 	return err;
370 }
371 
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)372 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
373 {
374 	u32 len = xdp_get_buff_len(xdp);
375 	int err;
376 
377 	err = xsk_rcv_check(xs, xdp, len);
378 	if (err)
379 		return err;
380 
381 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
382 		len = xdp->data_end - xdp->data;
383 		return xsk_rcv_zc(xs, xdp, len);
384 	}
385 
386 	err = __xsk_rcv(xs, xdp, len);
387 	if (!err)
388 		xdp_return_buff(xdp);
389 	return err;
390 }
391 
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)392 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
393 {
394 	int err;
395 
396 	err = xsk_rcv(xs, xdp);
397 	if (err)
398 		return err;
399 
400 	if (!xs->flush_node.prev) {
401 		struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
402 
403 		list_add(&xs->flush_node, flush_list);
404 	}
405 
406 	return 0;
407 }
408 
__xsk_map_flush(struct list_head * flush_list)409 void __xsk_map_flush(struct list_head *flush_list)
410 {
411 	struct xdp_sock *xs, *tmp;
412 
413 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
414 		xsk_flush(xs);
415 		__list_del_clearprev(&xs->flush_node);
416 	}
417 }
418 
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)419 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
420 {
421 	xskq_prod_submit_n(pool->cq, nb_entries);
422 }
423 EXPORT_SYMBOL(xsk_tx_completed);
424 
xsk_tx_release(struct xsk_buff_pool * pool)425 void xsk_tx_release(struct xsk_buff_pool *pool)
426 {
427 	struct xdp_sock *xs;
428 
429 	rcu_read_lock();
430 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
431 		__xsk_tx_release(xs);
432 	rcu_read_unlock();
433 }
434 EXPORT_SYMBOL(xsk_tx_release);
435 
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)436 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
437 {
438 	bool budget_exhausted = false;
439 	struct xdp_sock *xs;
440 
441 	rcu_read_lock();
442 again:
443 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
444 		if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
445 			budget_exhausted = true;
446 			continue;
447 		}
448 
449 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
450 			if (xskq_has_descs(xs->tx))
451 				xskq_cons_release(xs->tx);
452 			continue;
453 		}
454 
455 		xs->tx_budget_spent++;
456 
457 		/* This is the backpressure mechanism for the Tx path.
458 		 * Reserve space in the completion queue and only proceed
459 		 * if there is space in it. This avoids having to implement
460 		 * any buffering in the Tx path.
461 		 */
462 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
463 			goto out;
464 
465 		xskq_cons_release(xs->tx);
466 		rcu_read_unlock();
467 		return true;
468 	}
469 
470 	if (budget_exhausted) {
471 		list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
472 			xs->tx_budget_spent = 0;
473 
474 		budget_exhausted = false;
475 		goto again;
476 	}
477 
478 out:
479 	rcu_read_unlock();
480 	return false;
481 }
482 EXPORT_SYMBOL(xsk_tx_peek_desc);
483 
xsk_tx_peek_release_fallback(struct xsk_buff_pool * pool,u32 max_entries)484 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
485 {
486 	struct xdp_desc *descs = pool->tx_descs;
487 	u32 nb_pkts = 0;
488 
489 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
490 		nb_pkts++;
491 
492 	xsk_tx_release(pool);
493 	return nb_pkts;
494 }
495 
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 nb_pkts)496 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
497 {
498 	struct xdp_sock *xs;
499 
500 	rcu_read_lock();
501 	if (!list_is_singular(&pool->xsk_tx_list)) {
502 		/* Fallback to the non-batched version */
503 		rcu_read_unlock();
504 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
505 	}
506 
507 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
508 	if (!xs) {
509 		nb_pkts = 0;
510 		goto out;
511 	}
512 
513 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
514 
515 	/* This is the backpressure mechanism for the Tx path. Try to
516 	 * reserve space in the completion queue for all packets, but
517 	 * if there are fewer slots available, just process that many
518 	 * packets. This avoids having to implement any buffering in
519 	 * the Tx path.
520 	 */
521 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
522 	if (!nb_pkts)
523 		goto out;
524 
525 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
526 	if (!nb_pkts) {
527 		xs->tx->queue_empty_descs++;
528 		goto out;
529 	}
530 
531 	__xskq_cons_release(xs->tx);
532 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
533 	xs->sk.sk_write_space(&xs->sk);
534 
535 out:
536 	rcu_read_unlock();
537 	return nb_pkts;
538 }
539 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
540 
xsk_wakeup(struct xdp_sock * xs,u8 flags)541 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
542 {
543 	struct net_device *dev = xs->dev;
544 
545 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
546 }
547 
xsk_cq_reserve_locked(struct xsk_buff_pool * pool)548 static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
549 {
550 	int ret;
551 
552 	spin_lock(&pool->cq->cq_cached_prod_lock);
553 	ret = xskq_prod_reserve(pool->cq);
554 	spin_unlock(&pool->cq->cq_cached_prod_lock);
555 
556 	return ret;
557 }
558 
xsk_skb_destructor_is_addr(struct sk_buff * skb)559 static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
560 {
561 	return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
562 }
563 
xsk_skb_destructor_get_addr(struct sk_buff * skb)564 static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
565 {
566 	return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
567 }
568 
xsk_skb_destructor_set_addr(struct sk_buff * skb,u64 addr)569 static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
570 {
571 	skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
572 }
573 
xsk_inc_num_desc(struct sk_buff * skb)574 static void xsk_inc_num_desc(struct sk_buff *skb)
575 {
576 	struct xsk_addrs *xsk_addr;
577 
578 	if (!xsk_skb_destructor_is_addr(skb)) {
579 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
580 		xsk_addr->num_descs++;
581 	}
582 }
583 
xsk_get_num_desc(struct sk_buff * skb)584 static u32 xsk_get_num_desc(struct sk_buff *skb)
585 {
586 	struct xsk_addrs *xsk_addr;
587 
588 	if (xsk_skb_destructor_is_addr(skb))
589 		return 1;
590 
591 	xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
592 
593 	return xsk_addr->num_descs;
594 }
595 
xsk_cq_submit_addr_locked(struct xsk_buff_pool * pool,struct sk_buff * skb)596 static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
597 				      struct sk_buff *skb)
598 {
599 	u32 num_descs = xsk_get_num_desc(skb);
600 	struct xsk_addrs *xsk_addr;
601 	u32 descs_processed = 0;
602 	unsigned long flags;
603 	u32 idx, i;
604 
605 	spin_lock_irqsave(&pool->cq_prod_lock, flags);
606 	idx = xskq_get_prod(pool->cq);
607 
608 	if (unlikely(num_descs > 1)) {
609 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
610 
611 		for (i = 0; i < num_descs; i++) {
612 			xskq_prod_write_addr(pool->cq, idx + descs_processed,
613 					     xsk_addr->addrs[i]);
614 			descs_processed++;
615 		}
616 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
617 	} else {
618 		xskq_prod_write_addr(pool->cq, idx,
619 				     xsk_skb_destructor_get_addr(skb));
620 		descs_processed++;
621 	}
622 	xskq_prod_submit_n(pool->cq, descs_processed);
623 	spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
624 }
625 
xsk_cq_cancel_locked(struct xsk_buff_pool * pool,u32 n)626 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
627 {
628 	spin_lock(&pool->cq->cq_cached_prod_lock);
629 	xskq_prod_cancel_n(pool->cq, n);
630 	spin_unlock(&pool->cq->cq_cached_prod_lock);
631 }
632 
633 INDIRECT_CALLABLE_SCOPE
xsk_destruct_skb(struct sk_buff * skb)634 void xsk_destruct_skb(struct sk_buff *skb)
635 {
636 	struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
637 
638 	if (compl->tx_timestamp) {
639 		/* sw completion timestamp, not a real one */
640 		*compl->tx_timestamp = ktime_get_tai_fast_ns();
641 	}
642 
643 	xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
644 	sock_wfree(skb);
645 }
646 
xsk_skb_init_misc(struct sk_buff * skb,struct xdp_sock * xs,u64 addr)647 static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
648 			      u64 addr)
649 {
650 	skb->dev = xs->dev;
651 	skb->priority = READ_ONCE(xs->sk.sk_priority);
652 	skb->mark = READ_ONCE(xs->sk.sk_mark);
653 	skb->destructor = xsk_destruct_skb;
654 	xsk_skb_destructor_set_addr(skb, addr);
655 }
656 
xsk_consume_skb(struct sk_buff * skb)657 static void xsk_consume_skb(struct sk_buff *skb)
658 {
659 	struct xdp_sock *xs = xdp_sk(skb->sk);
660 	u32 num_descs = xsk_get_num_desc(skb);
661 	struct xsk_addrs *xsk_addr;
662 
663 	if (unlikely(num_descs > 1)) {
664 		xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
665 		kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
666 	}
667 
668 	skb->destructor = sock_wfree;
669 	xsk_cq_cancel_locked(xs->pool, num_descs);
670 	/* Free skb without triggering the perf drop trace */
671 	consume_skb(skb);
672 	xs->skb = NULL;
673 }
674 
xsk_drop_skb(struct sk_buff * skb)675 static void xsk_drop_skb(struct sk_buff *skb)
676 {
677 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
678 	xsk_consume_skb(skb);
679 }
680 
xsk_skb_metadata(struct sk_buff * skb,void * buffer,struct xdp_desc * desc,struct xsk_buff_pool * pool,u32 hr)681 static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
682 			    struct xdp_desc *desc, struct xsk_buff_pool *pool,
683 			    u32 hr)
684 {
685 	struct xsk_tx_metadata *meta = NULL;
686 
687 	if (unlikely(pool->tx_metadata_len == 0))
688 		return -EINVAL;
689 
690 	meta = buffer - pool->tx_metadata_len;
691 	if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
692 		return -EINVAL;
693 
694 	if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
695 		if (unlikely(meta->request.csum_start +
696 			     meta->request.csum_offset +
697 			     sizeof(__sum16) > desc->len))
698 			return -EINVAL;
699 
700 		skb->csum_start = hr + meta->request.csum_start;
701 		skb->csum_offset = meta->request.csum_offset;
702 		skb->ip_summed = CHECKSUM_PARTIAL;
703 
704 		if (unlikely(pool->tx_sw_csum)) {
705 			int err;
706 
707 			err = skb_checksum_help(skb);
708 			if (err)
709 				return err;
710 		}
711 	}
712 
713 	if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
714 		skb->skb_mstamp_ns = meta->request.launch_time;
715 	xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
716 
717 	return 0;
718 }
719 
xsk_build_skb_zerocopy(struct xdp_sock * xs,struct xdp_desc * desc)720 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
721 					      struct xdp_desc *desc)
722 {
723 	struct xsk_buff_pool *pool = xs->pool;
724 	u32 hr, len, ts, offset, copy, copied;
725 	struct sk_buff *skb = xs->skb;
726 	struct page *page;
727 	void *buffer;
728 	int err, i;
729 	u64 addr;
730 
731 	addr = desc->addr;
732 	buffer = xsk_buff_raw_get_data(pool, addr);
733 
734 	if (!skb) {
735 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
736 
737 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
738 		if (unlikely(!skb))
739 			return ERR_PTR(err);
740 
741 		skb_reserve(skb, hr);
742 
743 		xsk_skb_init_misc(skb, xs, desc->addr);
744 		if (desc->options & XDP_TX_METADATA) {
745 			err = xsk_skb_metadata(skb, buffer, desc, pool, hr);
746 			if (unlikely(err))
747 				return ERR_PTR(err);
748 		}
749 	} else {
750 		struct xsk_addrs *xsk_addr;
751 
752 		if (xsk_skb_destructor_is_addr(skb)) {
753 			xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
754 						     GFP_KERNEL);
755 			if (!xsk_addr)
756 				return ERR_PTR(-ENOMEM);
757 
758 			xsk_addr->num_descs = 1;
759 			xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
760 			skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
761 		} else {
762 			xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
763 		}
764 
765 		/* in case of -EOVERFLOW that could happen below,
766 		 * xsk_consume_skb() will release this node as whole skb
767 		 * would be dropped, which implies freeing all list elements
768 		 */
769 		xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
770 	}
771 
772 	len = desc->len;
773 	ts = pool->unaligned ? len : pool->chunk_size;
774 
775 	offset = offset_in_page(buffer);
776 	addr = buffer - pool->addrs;
777 
778 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
779 		if (unlikely(i >= MAX_SKB_FRAGS))
780 			return ERR_PTR(-EOVERFLOW);
781 
782 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
783 		get_page(page);
784 
785 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
786 		skb_fill_page_desc(skb, i, page, offset, copy);
787 
788 		copied += copy;
789 		addr += copy;
790 		offset = 0;
791 	}
792 
793 	skb->len += len;
794 	skb->data_len += len;
795 	skb->truesize += ts;
796 
797 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
798 
799 	return skb;
800 }
801 
xsk_build_skb(struct xdp_sock * xs,struct xdp_desc * desc)802 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
803 				     struct xdp_desc *desc)
804 {
805 	struct net_device *dev = xs->dev;
806 	struct sk_buff *skb = xs->skb;
807 	int err;
808 
809 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
810 		skb = xsk_build_skb_zerocopy(xs, desc);
811 		if (IS_ERR(skb)) {
812 			err = PTR_ERR(skb);
813 			skb = NULL;
814 			goto free_err;
815 		}
816 	} else {
817 		u32 hr, tr, len;
818 		void *buffer;
819 
820 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
821 		len = desc->len;
822 
823 		if (!skb) {
824 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
825 			tr = dev->needed_tailroom;
826 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
827 			if (unlikely(!skb))
828 				goto free_err;
829 
830 			skb_reserve(skb, hr);
831 			skb_put(skb, len);
832 
833 			err = skb_store_bits(skb, 0, buffer, len);
834 			if (unlikely(err))
835 				goto free_err;
836 
837 			xsk_skb_init_misc(skb, xs, desc->addr);
838 			if (desc->options & XDP_TX_METADATA) {
839 				err = xsk_skb_metadata(skb, buffer, desc,
840 						       xs->pool, hr);
841 				if (unlikely(err))
842 					goto free_err;
843 			}
844 		} else {
845 			int nr_frags = skb_shinfo(skb)->nr_frags;
846 			struct xsk_addrs *xsk_addr;
847 			struct page *page;
848 			u8 *vaddr;
849 
850 			if (xsk_skb_destructor_is_addr(skb)) {
851 				xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
852 							     GFP_KERNEL);
853 				if (!xsk_addr) {
854 					err = -ENOMEM;
855 					goto free_err;
856 				}
857 
858 				xsk_addr->num_descs = 1;
859 				xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
860 				skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
861 			} else {
862 				xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
863 			}
864 
865 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
866 				err = -EOVERFLOW;
867 				goto free_err;
868 			}
869 
870 			page = alloc_page(xs->sk.sk_allocation);
871 			if (unlikely(!page)) {
872 				err = -EAGAIN;
873 				goto free_err;
874 			}
875 
876 			vaddr = kmap_local_page(page);
877 			memcpy(vaddr, buffer, len);
878 			kunmap_local(vaddr);
879 
880 			skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
881 			refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
882 
883 			xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
884 		}
885 	}
886 
887 	xsk_inc_num_desc(skb);
888 
889 	return skb;
890 
891 free_err:
892 	if (skb && !skb_shinfo(skb)->nr_frags)
893 		kfree_skb(skb);
894 
895 	if (err == -EOVERFLOW) {
896 		/* Drop the packet */
897 		xsk_inc_num_desc(xs->skb);
898 		xsk_drop_skb(xs->skb);
899 		xskq_cons_release(xs->tx);
900 	} else {
901 		/* Let application retry */
902 		xsk_cq_cancel_locked(xs->pool, 1);
903 	}
904 
905 	return ERR_PTR(err);
906 }
907 
__xsk_generic_xmit(struct sock * sk)908 static int __xsk_generic_xmit(struct sock *sk)
909 {
910 	struct xdp_sock *xs = xdp_sk(sk);
911 	bool sent_frame = false;
912 	struct xdp_desc desc;
913 	struct sk_buff *skb;
914 	u32 max_batch;
915 	int err = 0;
916 
917 	mutex_lock(&xs->mutex);
918 
919 	/* Since we dropped the RCU read lock, the socket state might have changed. */
920 	if (unlikely(!xsk_is_bound(xs))) {
921 		err = -ENXIO;
922 		goto out;
923 	}
924 
925 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
926 		goto out;
927 
928 	max_batch = READ_ONCE(xs->max_tx_budget);
929 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
930 		if (max_batch-- == 0) {
931 			err = -EAGAIN;
932 			goto out;
933 		}
934 
935 		/* This is the backpressure mechanism for the Tx path.
936 		 * Reserve space in the completion queue and only proceed
937 		 * if there is space in it. This avoids having to implement
938 		 * any buffering in the Tx path.
939 		 */
940 		err = xsk_cq_reserve_locked(xs->pool);
941 		if (err) {
942 			err = -EAGAIN;
943 			goto out;
944 		}
945 
946 		skb = xsk_build_skb(xs, &desc);
947 		if (IS_ERR(skb)) {
948 			err = PTR_ERR(skb);
949 			if (err != -EOVERFLOW)
950 				goto out;
951 			err = 0;
952 			continue;
953 		}
954 
955 		xskq_cons_release(xs->tx);
956 
957 		if (xp_mb_desc(&desc)) {
958 			xs->skb = skb;
959 			continue;
960 		}
961 
962 		err = __dev_direct_xmit(skb, xs->queue_id);
963 		if  (err == NETDEV_TX_BUSY) {
964 			/* Tell user-space to retry the send */
965 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
966 			xsk_consume_skb(skb);
967 			err = -EAGAIN;
968 			goto out;
969 		}
970 
971 		/* Ignore NET_XMIT_CN as packet might have been sent */
972 		if (err == NET_XMIT_DROP) {
973 			/* SKB completed but not sent */
974 			err = -EBUSY;
975 			xs->skb = NULL;
976 			goto out;
977 		}
978 
979 		sent_frame = true;
980 		xs->skb = NULL;
981 	}
982 
983 	if (xskq_has_descs(xs->tx)) {
984 		if (xs->skb)
985 			xsk_drop_skb(xs->skb);
986 		xskq_cons_release(xs->tx);
987 	}
988 
989 out:
990 	if (sent_frame)
991 		__xsk_tx_release(xs);
992 
993 	mutex_unlock(&xs->mutex);
994 	return err;
995 }
996 
xsk_generic_xmit(struct sock * sk)997 static int xsk_generic_xmit(struct sock *sk)
998 {
999 	int ret;
1000 
1001 	/* Drop the RCU lock since the SKB path might sleep. */
1002 	rcu_read_unlock();
1003 	ret = __xsk_generic_xmit(sk);
1004 	/* Reaquire RCU lock before going into common code. */
1005 	rcu_read_lock();
1006 
1007 	return ret;
1008 }
1009 
xsk_no_wakeup(struct sock * sk)1010 static bool xsk_no_wakeup(struct sock *sk)
1011 {
1012 #ifdef CONFIG_NET_RX_BUSY_POLL
1013 	/* Prefer busy-polling, skip the wakeup. */
1014 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
1015 		napi_id_valid(READ_ONCE(sk->sk_napi_id));
1016 #else
1017 	return false;
1018 #endif
1019 }
1020 
xsk_check_common(struct xdp_sock * xs)1021 static int xsk_check_common(struct xdp_sock *xs)
1022 {
1023 	if (unlikely(!xsk_is_bound(xs)))
1024 		return -ENXIO;
1025 	if (unlikely(!(xs->dev->flags & IFF_UP)))
1026 		return -ENETDOWN;
1027 
1028 	return 0;
1029 }
1030 
__xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1031 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1032 {
1033 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
1034 	struct sock *sk = sock->sk;
1035 	struct xdp_sock *xs = xdp_sk(sk);
1036 	struct xsk_buff_pool *pool;
1037 	int err;
1038 
1039 	err = xsk_check_common(xs);
1040 	if (err)
1041 		return err;
1042 	if (unlikely(need_wait))
1043 		return -EOPNOTSUPP;
1044 	if (unlikely(!xs->tx))
1045 		return -ENOBUFS;
1046 
1047 	if (sk_can_busy_loop(sk))
1048 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1049 
1050 	if (xs->zc && xsk_no_wakeup(sk))
1051 		return 0;
1052 
1053 	pool = xs->pool;
1054 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
1055 		if (xs->zc)
1056 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
1057 		return xsk_generic_xmit(sk);
1058 	}
1059 	return 0;
1060 }
1061 
xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1062 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
1063 {
1064 	int ret;
1065 
1066 	rcu_read_lock();
1067 	ret = __xsk_sendmsg(sock, m, total_len);
1068 	rcu_read_unlock();
1069 
1070 	return ret;
1071 }
1072 
__xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)1073 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1074 {
1075 	bool need_wait = !(flags & MSG_DONTWAIT);
1076 	struct sock *sk = sock->sk;
1077 	struct xdp_sock *xs = xdp_sk(sk);
1078 	int err;
1079 
1080 	err = xsk_check_common(xs);
1081 	if (err)
1082 		return err;
1083 	if (unlikely(!xs->rx))
1084 		return -ENOBUFS;
1085 	if (unlikely(need_wait))
1086 		return -EOPNOTSUPP;
1087 
1088 	if (sk_can_busy_loop(sk))
1089 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
1090 
1091 	if (xsk_no_wakeup(sk))
1092 		return 0;
1093 
1094 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
1095 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
1096 	return 0;
1097 }
1098 
xsk_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)1099 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
1100 {
1101 	int ret;
1102 
1103 	rcu_read_lock();
1104 	ret = __xsk_recvmsg(sock, m, len, flags);
1105 	rcu_read_unlock();
1106 
1107 	return ret;
1108 }
1109 
xsk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)1110 static __poll_t xsk_poll(struct file *file, struct socket *sock,
1111 			     struct poll_table_struct *wait)
1112 {
1113 	__poll_t mask = 0;
1114 	struct sock *sk = sock->sk;
1115 	struct xdp_sock *xs = xdp_sk(sk);
1116 	struct xsk_buff_pool *pool;
1117 
1118 	sock_poll_wait(file, sock, wait);
1119 
1120 	rcu_read_lock();
1121 	if (xsk_check_common(xs))
1122 		goto out;
1123 
1124 	pool = xs->pool;
1125 
1126 	if (pool->cached_need_wakeup) {
1127 		if (xs->zc)
1128 			xsk_wakeup(xs, pool->cached_need_wakeup);
1129 		else if (xs->tx)
1130 			/* Poll needs to drive Tx also in copy mode */
1131 			xsk_generic_xmit(sk);
1132 	}
1133 
1134 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
1135 		mask |= EPOLLIN | EPOLLRDNORM;
1136 	if (xs->tx && xsk_tx_writeable(xs))
1137 		mask |= EPOLLOUT | EPOLLWRNORM;
1138 out:
1139 	rcu_read_unlock();
1140 	return mask;
1141 }
1142 
xsk_init_queue(u32 entries,struct xsk_queue ** queue,bool umem_queue)1143 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1144 			  bool umem_queue)
1145 {
1146 	struct xsk_queue *q;
1147 
1148 	if (entries == 0 || *queue || !is_power_of_2(entries))
1149 		return -EINVAL;
1150 
1151 	q = xskq_create(entries, umem_queue);
1152 	if (!q)
1153 		return -ENOMEM;
1154 
1155 	/* Make sure queue is ready before it can be seen by others */
1156 	smp_wmb();
1157 	WRITE_ONCE(*queue, q);
1158 	return 0;
1159 }
1160 
xsk_unbind_dev(struct xdp_sock * xs)1161 static void xsk_unbind_dev(struct xdp_sock *xs)
1162 {
1163 	struct net_device *dev = xs->dev;
1164 
1165 	if (xs->state != XSK_BOUND)
1166 		return;
1167 	WRITE_ONCE(xs->state, XSK_UNBOUND);
1168 
1169 	/* Wait for driver to stop using the xdp socket. */
1170 	xp_del_xsk(xs->pool, xs);
1171 	synchronize_net();
1172 	dev_put(dev);
1173 }
1174 
xsk_get_map_list_entry(struct xdp_sock * xs,struct xdp_sock __rcu *** map_entry)1175 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
1176 					      struct xdp_sock __rcu ***map_entry)
1177 {
1178 	struct xsk_map *map = NULL;
1179 	struct xsk_map_node *node;
1180 
1181 	*map_entry = NULL;
1182 
1183 	spin_lock_bh(&xs->map_list_lock);
1184 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1185 					node);
1186 	if (node) {
1187 		bpf_map_inc(&node->map->map);
1188 		map = node->map;
1189 		*map_entry = node->map_entry;
1190 	}
1191 	spin_unlock_bh(&xs->map_list_lock);
1192 	return map;
1193 }
1194 
xsk_delete_from_maps(struct xdp_sock * xs)1195 static void xsk_delete_from_maps(struct xdp_sock *xs)
1196 {
1197 	/* This function removes the current XDP socket from all the
1198 	 * maps it resides in. We need to take extra care here, due to
1199 	 * the two locks involved. Each map has a lock synchronizing
1200 	 * updates to the entries, and each socket has a lock that
1201 	 * synchronizes access to the list of maps (map_list). For
1202 	 * deadlock avoidance the locks need to be taken in the order
1203 	 * "map lock"->"socket map list lock". We start off by
1204 	 * accessing the socket map list, and take a reference to the
1205 	 * map to guarantee existence between the
1206 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1207 	 * calls. Then we ask the map to remove the socket, which
1208 	 * tries to remove the socket from the map. Note that there
1209 	 * might be updates to the map between
1210 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1211 	 */
1212 	struct xdp_sock __rcu **map_entry = NULL;
1213 	struct xsk_map *map;
1214 
1215 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1216 		xsk_map_try_sock_delete(map, xs, map_entry);
1217 		bpf_map_put(&map->map);
1218 	}
1219 }
1220 
xsk_release(struct socket * sock)1221 static int xsk_release(struct socket *sock)
1222 {
1223 	struct sock *sk = sock->sk;
1224 	struct xdp_sock *xs = xdp_sk(sk);
1225 	struct net *net;
1226 
1227 	if (!sk)
1228 		return 0;
1229 
1230 	net = sock_net(sk);
1231 
1232 	if (xs->skb)
1233 		xsk_drop_skb(xs->skb);
1234 
1235 	mutex_lock(&net->xdp.lock);
1236 	sk_del_node_init_rcu(sk);
1237 	mutex_unlock(&net->xdp.lock);
1238 
1239 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1240 
1241 	xsk_delete_from_maps(xs);
1242 	mutex_lock(&xs->mutex);
1243 	xsk_unbind_dev(xs);
1244 	mutex_unlock(&xs->mutex);
1245 
1246 	xskq_destroy(xs->rx);
1247 	xskq_destroy(xs->tx);
1248 	xskq_destroy(xs->fq_tmp);
1249 	xskq_destroy(xs->cq_tmp);
1250 
1251 	sock_orphan(sk);
1252 	sock->sk = NULL;
1253 
1254 	sock_put(sk);
1255 
1256 	return 0;
1257 }
1258 
xsk_lookup_xsk_from_fd(int fd)1259 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1260 {
1261 	struct socket *sock;
1262 	int err;
1263 
1264 	sock = sockfd_lookup(fd, &err);
1265 	if (!sock)
1266 		return ERR_PTR(-ENOTSOCK);
1267 
1268 	if (sock->sk->sk_family != PF_XDP) {
1269 		sockfd_put(sock);
1270 		return ERR_PTR(-ENOPROTOOPT);
1271 	}
1272 
1273 	return sock;
1274 }
1275 
xsk_validate_queues(struct xdp_sock * xs)1276 static bool xsk_validate_queues(struct xdp_sock *xs)
1277 {
1278 	return xs->fq_tmp && xs->cq_tmp;
1279 }
1280 
xsk_bind(struct socket * sock,struct sockaddr_unsized * addr,int addr_len)1281 static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
1282 {
1283 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1284 	struct sock *sk = sock->sk;
1285 	struct xdp_sock *xs = xdp_sk(sk);
1286 	struct net_device *dev;
1287 	int bound_dev_if;
1288 	u32 flags, qid;
1289 	int err = 0;
1290 
1291 	if (addr_len < sizeof(struct sockaddr_xdp))
1292 		return -EINVAL;
1293 	if (sxdp->sxdp_family != AF_XDP)
1294 		return -EINVAL;
1295 
1296 	flags = sxdp->sxdp_flags;
1297 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1298 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1299 		return -EINVAL;
1300 
1301 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1302 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1303 		return -EINVAL;
1304 
1305 	rtnl_lock();
1306 	mutex_lock(&xs->mutex);
1307 	if (xs->state != XSK_READY) {
1308 		err = -EBUSY;
1309 		goto out_release;
1310 	}
1311 
1312 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1313 	if (!dev) {
1314 		err = -ENODEV;
1315 		goto out_release;
1316 	}
1317 
1318 	netdev_lock_ops(dev);
1319 
1320 	if (!xs->rx && !xs->tx) {
1321 		err = -EINVAL;
1322 		goto out_unlock;
1323 	}
1324 
1325 	qid = sxdp->sxdp_queue_id;
1326 
1327 	if (flags & XDP_SHARED_UMEM) {
1328 		struct xdp_sock *umem_xs;
1329 		struct socket *sock;
1330 
1331 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1332 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1333 			/* Cannot specify flags for shared sockets. */
1334 			err = -EINVAL;
1335 			goto out_unlock;
1336 		}
1337 
1338 		if (xs->umem) {
1339 			/* We have already our own. */
1340 			err = -EINVAL;
1341 			goto out_unlock;
1342 		}
1343 
1344 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1345 		if (IS_ERR(sock)) {
1346 			err = PTR_ERR(sock);
1347 			goto out_unlock;
1348 		}
1349 
1350 		umem_xs = xdp_sk(sock->sk);
1351 		if (!xsk_is_bound(umem_xs)) {
1352 			err = -EBADF;
1353 			sockfd_put(sock);
1354 			goto out_unlock;
1355 		}
1356 
1357 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1358 			/* One fill and completion ring required for each queue id. */
1359 			if (!xsk_validate_queues(xs)) {
1360 				err = -EINVAL;
1361 				sockfd_put(sock);
1362 				goto out_unlock;
1363 			}
1364 
1365 			/* Share the umem with another socket on another qid
1366 			 * and/or device.
1367 			 */
1368 			xs->pool = xp_create_and_assign_umem(xs,
1369 							     umem_xs->umem);
1370 			if (!xs->pool) {
1371 				err = -ENOMEM;
1372 				sockfd_put(sock);
1373 				goto out_unlock;
1374 			}
1375 
1376 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1377 						   qid);
1378 			if (err) {
1379 				xp_destroy(xs->pool);
1380 				xs->pool = NULL;
1381 				sockfd_put(sock);
1382 				goto out_unlock;
1383 			}
1384 		} else {
1385 			/* Share the buffer pool with the other socket. */
1386 			if (xs->fq_tmp || xs->cq_tmp) {
1387 				/* Do not allow setting your own fq or cq. */
1388 				err = -EINVAL;
1389 				sockfd_put(sock);
1390 				goto out_unlock;
1391 			}
1392 
1393 			xp_get_pool(umem_xs->pool);
1394 			xs->pool = umem_xs->pool;
1395 
1396 			/* If underlying shared umem was created without Tx
1397 			 * ring, allocate Tx descs array that Tx batching API
1398 			 * utilizes
1399 			 */
1400 			if (xs->tx && !xs->pool->tx_descs) {
1401 				err = xp_alloc_tx_descs(xs->pool, xs);
1402 				if (err) {
1403 					xp_put_pool(xs->pool);
1404 					xs->pool = NULL;
1405 					sockfd_put(sock);
1406 					goto out_unlock;
1407 				}
1408 			}
1409 		}
1410 
1411 		xdp_get_umem(umem_xs->umem);
1412 		WRITE_ONCE(xs->umem, umem_xs->umem);
1413 		sockfd_put(sock);
1414 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1415 		err = -EINVAL;
1416 		goto out_unlock;
1417 	} else {
1418 		/* This xsk has its own umem. */
1419 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1420 		if (!xs->pool) {
1421 			err = -ENOMEM;
1422 			goto out_unlock;
1423 		}
1424 
1425 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1426 		if (err) {
1427 			xp_destroy(xs->pool);
1428 			xs->pool = NULL;
1429 			goto out_unlock;
1430 		}
1431 	}
1432 
1433 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1434 	xs->fq_tmp = NULL;
1435 	xs->cq_tmp = NULL;
1436 
1437 	xs->dev = dev;
1438 	xs->zc = xs->umem->zc;
1439 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1440 	xs->queue_id = qid;
1441 	xp_add_xsk(xs->pool, xs);
1442 
1443 	if (qid < dev->real_num_rx_queues) {
1444 		struct netdev_rx_queue *rxq;
1445 
1446 		rxq = __netif_get_rx_queue(dev, qid);
1447 		if (rxq->napi)
1448 			__sk_mark_napi_id_once(sk, rxq->napi->napi_id);
1449 	}
1450 
1451 out_unlock:
1452 	if (err) {
1453 		dev_put(dev);
1454 	} else {
1455 		/* Matches smp_rmb() in bind() for shared umem
1456 		 * sockets, and xsk_is_bound().
1457 		 */
1458 		smp_wmb();
1459 		WRITE_ONCE(xs->state, XSK_BOUND);
1460 	}
1461 	netdev_unlock_ops(dev);
1462 out_release:
1463 	mutex_unlock(&xs->mutex);
1464 	rtnl_unlock();
1465 	return err;
1466 }
1467 
1468 struct xdp_umem_reg_v1 {
1469 	__u64 addr; /* Start of packet data area */
1470 	__u64 len; /* Length of packet data area */
1471 	__u32 chunk_size;
1472 	__u32 headroom;
1473 };
1474 
xsk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1475 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1476 			  sockptr_t optval, unsigned int optlen)
1477 {
1478 	struct sock *sk = sock->sk;
1479 	struct xdp_sock *xs = xdp_sk(sk);
1480 	int err;
1481 
1482 	if (level != SOL_XDP)
1483 		return -ENOPROTOOPT;
1484 
1485 	switch (optname) {
1486 	case XDP_RX_RING:
1487 	case XDP_TX_RING:
1488 	{
1489 		struct xsk_queue **q;
1490 		int entries;
1491 
1492 		if (optlen < sizeof(entries))
1493 			return -EINVAL;
1494 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1495 			return -EFAULT;
1496 
1497 		mutex_lock(&xs->mutex);
1498 		if (xs->state != XSK_READY) {
1499 			mutex_unlock(&xs->mutex);
1500 			return -EBUSY;
1501 		}
1502 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1503 		err = xsk_init_queue(entries, q, false);
1504 		if (!err && optname == XDP_TX_RING)
1505 			/* Tx needs to be explicitly woken up the first time */
1506 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1507 		mutex_unlock(&xs->mutex);
1508 		return err;
1509 	}
1510 	case XDP_UMEM_REG:
1511 	{
1512 		size_t mr_size = sizeof(struct xdp_umem_reg);
1513 		struct xdp_umem_reg mr = {};
1514 		struct xdp_umem *umem;
1515 
1516 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1517 			return -EINVAL;
1518 		else if (optlen < sizeof(mr))
1519 			mr_size = sizeof(struct xdp_umem_reg_v1);
1520 
1521 		BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
1522 
1523 		/* Make sure the last field of the struct doesn't have
1524 		 * uninitialized padding. All padding has to be explicit
1525 		 * and has to be set to zero by the userspace to make
1526 		 * struct xdp_umem_reg extensible in the future.
1527 		 */
1528 		BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
1529 			     sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
1530 			     sizeof(struct xdp_umem_reg));
1531 
1532 		if (copy_from_sockptr(&mr, optval, mr_size))
1533 			return -EFAULT;
1534 
1535 		mutex_lock(&xs->mutex);
1536 		if (xs->state != XSK_READY || xs->umem) {
1537 			mutex_unlock(&xs->mutex);
1538 			return -EBUSY;
1539 		}
1540 
1541 		umem = xdp_umem_create(&mr);
1542 		if (IS_ERR(umem)) {
1543 			mutex_unlock(&xs->mutex);
1544 			return PTR_ERR(umem);
1545 		}
1546 
1547 		/* Make sure umem is ready before it can be seen by others */
1548 		smp_wmb();
1549 		WRITE_ONCE(xs->umem, umem);
1550 		mutex_unlock(&xs->mutex);
1551 		return 0;
1552 	}
1553 	case XDP_UMEM_FILL_RING:
1554 	case XDP_UMEM_COMPLETION_RING:
1555 	{
1556 		struct xsk_queue **q;
1557 		int entries;
1558 
1559 		if (optlen < sizeof(entries))
1560 			return -EINVAL;
1561 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1562 			return -EFAULT;
1563 
1564 		mutex_lock(&xs->mutex);
1565 		if (xs->state != XSK_READY) {
1566 			mutex_unlock(&xs->mutex);
1567 			return -EBUSY;
1568 		}
1569 
1570 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1571 			&xs->cq_tmp;
1572 		err = xsk_init_queue(entries, q, true);
1573 		mutex_unlock(&xs->mutex);
1574 		return err;
1575 	}
1576 	case XDP_MAX_TX_SKB_BUDGET:
1577 	{
1578 		unsigned int budget;
1579 
1580 		if (optlen != sizeof(budget))
1581 			return -EINVAL;
1582 		if (copy_from_sockptr(&budget, optval, sizeof(budget)))
1583 			return -EFAULT;
1584 		if (!xs->tx ||
1585 		    budget < TX_BATCH_SIZE || budget > xs->tx->nentries)
1586 			return -EACCES;
1587 
1588 		WRITE_ONCE(xs->max_tx_budget, budget);
1589 		return 0;
1590 	}
1591 	default:
1592 		break;
1593 	}
1594 
1595 	return -ENOPROTOOPT;
1596 }
1597 
xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 * ring)1598 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1599 {
1600 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1601 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1602 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1603 }
1604 
xsk_enter_umem_offsets(struct xdp_ring_offset_v1 * ring)1605 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1606 {
1607 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1608 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1609 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1610 }
1611 
1612 struct xdp_statistics_v1 {
1613 	__u64 rx_dropped;
1614 	__u64 rx_invalid_descs;
1615 	__u64 tx_invalid_descs;
1616 };
1617 
xsk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1618 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1619 			  char __user *optval, int __user *optlen)
1620 {
1621 	struct sock *sk = sock->sk;
1622 	struct xdp_sock *xs = xdp_sk(sk);
1623 	int len;
1624 
1625 	if (level != SOL_XDP)
1626 		return -ENOPROTOOPT;
1627 
1628 	if (get_user(len, optlen))
1629 		return -EFAULT;
1630 	if (len < 0)
1631 		return -EINVAL;
1632 
1633 	switch (optname) {
1634 	case XDP_STATISTICS:
1635 	{
1636 		struct xdp_statistics stats = {};
1637 		bool extra_stats = true;
1638 		size_t stats_size;
1639 
1640 		if (len < sizeof(struct xdp_statistics_v1)) {
1641 			return -EINVAL;
1642 		} else if (len < sizeof(stats)) {
1643 			extra_stats = false;
1644 			stats_size = sizeof(struct xdp_statistics_v1);
1645 		} else {
1646 			stats_size = sizeof(stats);
1647 		}
1648 
1649 		mutex_lock(&xs->mutex);
1650 		stats.rx_dropped = xs->rx_dropped;
1651 		if (extra_stats) {
1652 			stats.rx_ring_full = xs->rx_queue_full;
1653 			stats.rx_fill_ring_empty_descs =
1654 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1655 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1656 		} else {
1657 			stats.rx_dropped += xs->rx_queue_full;
1658 		}
1659 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1660 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1661 		mutex_unlock(&xs->mutex);
1662 
1663 		if (copy_to_user(optval, &stats, stats_size))
1664 			return -EFAULT;
1665 		if (put_user(stats_size, optlen))
1666 			return -EFAULT;
1667 
1668 		return 0;
1669 	}
1670 	case XDP_MMAP_OFFSETS:
1671 	{
1672 		struct xdp_mmap_offsets off;
1673 		struct xdp_mmap_offsets_v1 off_v1;
1674 		bool flags_supported = true;
1675 		void *to_copy;
1676 
1677 		if (len < sizeof(off_v1))
1678 			return -EINVAL;
1679 		else if (len < sizeof(off))
1680 			flags_supported = false;
1681 
1682 		if (flags_supported) {
1683 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1684 			 * except for the flags field added to the end.
1685 			 */
1686 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1687 					       &off.rx);
1688 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1689 					       &off.tx);
1690 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1691 					       &off.fr);
1692 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1693 					       &off.cr);
1694 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1695 						ptrs.flags);
1696 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1697 						ptrs.flags);
1698 			off.fr.flags = offsetof(struct xdp_umem_ring,
1699 						ptrs.flags);
1700 			off.cr.flags = offsetof(struct xdp_umem_ring,
1701 						ptrs.flags);
1702 
1703 			len = sizeof(off);
1704 			to_copy = &off;
1705 		} else {
1706 			xsk_enter_rxtx_offsets(&off_v1.rx);
1707 			xsk_enter_rxtx_offsets(&off_v1.tx);
1708 			xsk_enter_umem_offsets(&off_v1.fr);
1709 			xsk_enter_umem_offsets(&off_v1.cr);
1710 
1711 			len = sizeof(off_v1);
1712 			to_copy = &off_v1;
1713 		}
1714 
1715 		if (copy_to_user(optval, to_copy, len))
1716 			return -EFAULT;
1717 		if (put_user(len, optlen))
1718 			return -EFAULT;
1719 
1720 		return 0;
1721 	}
1722 	case XDP_OPTIONS:
1723 	{
1724 		struct xdp_options opts = {};
1725 
1726 		if (len < sizeof(opts))
1727 			return -EINVAL;
1728 
1729 		mutex_lock(&xs->mutex);
1730 		if (xs->zc)
1731 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1732 		mutex_unlock(&xs->mutex);
1733 
1734 		len = sizeof(opts);
1735 		if (copy_to_user(optval, &opts, len))
1736 			return -EFAULT;
1737 		if (put_user(len, optlen))
1738 			return -EFAULT;
1739 
1740 		return 0;
1741 	}
1742 	default:
1743 		break;
1744 	}
1745 
1746 	return -EOPNOTSUPP;
1747 }
1748 
xsk_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)1749 static int xsk_mmap(struct file *file, struct socket *sock,
1750 		    struct vm_area_struct *vma)
1751 {
1752 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1753 	unsigned long size = vma->vm_end - vma->vm_start;
1754 	struct xdp_sock *xs = xdp_sk(sock->sk);
1755 	int state = READ_ONCE(xs->state);
1756 	struct xsk_queue *q = NULL;
1757 
1758 	if (state != XSK_READY && state != XSK_BOUND)
1759 		return -EBUSY;
1760 
1761 	if (offset == XDP_PGOFF_RX_RING) {
1762 		q = READ_ONCE(xs->rx);
1763 	} else if (offset == XDP_PGOFF_TX_RING) {
1764 		q = READ_ONCE(xs->tx);
1765 	} else {
1766 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1767 		smp_rmb();
1768 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1769 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1770 						 READ_ONCE(xs->pool->fq);
1771 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1772 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1773 						 READ_ONCE(xs->pool->cq);
1774 	}
1775 
1776 	if (!q)
1777 		return -EINVAL;
1778 
1779 	/* Matches the smp_wmb() in xsk_init_queue */
1780 	smp_rmb();
1781 	if (size > q->ring_vmalloc_size)
1782 		return -EINVAL;
1783 
1784 	return remap_vmalloc_range(vma, q->ring, 0);
1785 }
1786 
xsk_notifier(struct notifier_block * this,unsigned long msg,void * ptr)1787 static int xsk_notifier(struct notifier_block *this,
1788 			unsigned long msg, void *ptr)
1789 {
1790 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1791 	struct net *net = dev_net(dev);
1792 	struct sock *sk;
1793 
1794 	switch (msg) {
1795 	case NETDEV_UNREGISTER:
1796 		mutex_lock(&net->xdp.lock);
1797 		sk_for_each(sk, &net->xdp.list) {
1798 			struct xdp_sock *xs = xdp_sk(sk);
1799 
1800 			mutex_lock(&xs->mutex);
1801 			if (xs->dev == dev) {
1802 				sk->sk_err = ENETDOWN;
1803 				if (!sock_flag(sk, SOCK_DEAD))
1804 					sk_error_report(sk);
1805 
1806 				xsk_unbind_dev(xs);
1807 
1808 				/* Clear device references. */
1809 				xp_clear_dev(xs->pool);
1810 			}
1811 			mutex_unlock(&xs->mutex);
1812 		}
1813 		mutex_unlock(&net->xdp.lock);
1814 		break;
1815 	}
1816 	return NOTIFY_DONE;
1817 }
1818 
1819 static struct proto xsk_proto = {
1820 	.name =		"XDP",
1821 	.owner =	THIS_MODULE,
1822 	.obj_size =	sizeof(struct xdp_sock),
1823 };
1824 
1825 static const struct proto_ops xsk_proto_ops = {
1826 	.family		= PF_XDP,
1827 	.owner		= THIS_MODULE,
1828 	.release	= xsk_release,
1829 	.bind		= xsk_bind,
1830 	.connect	= sock_no_connect,
1831 	.socketpair	= sock_no_socketpair,
1832 	.accept		= sock_no_accept,
1833 	.getname	= sock_no_getname,
1834 	.poll		= xsk_poll,
1835 	.ioctl		= sock_no_ioctl,
1836 	.listen		= sock_no_listen,
1837 	.shutdown	= sock_no_shutdown,
1838 	.setsockopt	= xsk_setsockopt,
1839 	.getsockopt	= xsk_getsockopt,
1840 	.sendmsg	= xsk_sendmsg,
1841 	.recvmsg	= xsk_recvmsg,
1842 	.mmap		= xsk_mmap,
1843 };
1844 
xsk_destruct(struct sock * sk)1845 static void xsk_destruct(struct sock *sk)
1846 {
1847 	struct xdp_sock *xs = xdp_sk(sk);
1848 
1849 	if (!sock_flag(sk, SOCK_DEAD))
1850 		return;
1851 
1852 	if (!xp_put_pool(xs->pool))
1853 		xdp_put_umem(xs->umem, !xs->pool);
1854 }
1855 
xsk_create(struct net * net,struct socket * sock,int protocol,int kern)1856 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1857 		      int kern)
1858 {
1859 	struct xdp_sock *xs;
1860 	struct sock *sk;
1861 
1862 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1863 		return -EPERM;
1864 	if (sock->type != SOCK_RAW)
1865 		return -ESOCKTNOSUPPORT;
1866 
1867 	if (protocol)
1868 		return -EPROTONOSUPPORT;
1869 
1870 	sock->state = SS_UNCONNECTED;
1871 
1872 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1873 	if (!sk)
1874 		return -ENOBUFS;
1875 
1876 	sock->ops = &xsk_proto_ops;
1877 
1878 	sock_init_data(sock, sk);
1879 
1880 	sk->sk_family = PF_XDP;
1881 
1882 	sk->sk_destruct = xsk_destruct;
1883 
1884 	sock_set_flag(sk, SOCK_RCU_FREE);
1885 
1886 	xs = xdp_sk(sk);
1887 	xs->state = XSK_READY;
1888 	xs->max_tx_budget = TX_BATCH_SIZE;
1889 	mutex_init(&xs->mutex);
1890 
1891 	INIT_LIST_HEAD(&xs->map_list);
1892 	spin_lock_init(&xs->map_list_lock);
1893 
1894 	mutex_lock(&net->xdp.lock);
1895 	sk_add_node_rcu(sk, &net->xdp.list);
1896 	mutex_unlock(&net->xdp.lock);
1897 
1898 	sock_prot_inuse_add(net, &xsk_proto, 1);
1899 
1900 	return 0;
1901 }
1902 
1903 static const struct net_proto_family xsk_family_ops = {
1904 	.family = PF_XDP,
1905 	.create = xsk_create,
1906 	.owner	= THIS_MODULE,
1907 };
1908 
1909 static struct notifier_block xsk_netdev_notifier = {
1910 	.notifier_call	= xsk_notifier,
1911 };
1912 
xsk_net_init(struct net * net)1913 static int __net_init xsk_net_init(struct net *net)
1914 {
1915 	mutex_init(&net->xdp.lock);
1916 	INIT_HLIST_HEAD(&net->xdp.list);
1917 	return 0;
1918 }
1919 
xsk_net_exit(struct net * net)1920 static void __net_exit xsk_net_exit(struct net *net)
1921 {
1922 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1923 }
1924 
1925 static struct pernet_operations xsk_net_ops = {
1926 	.init = xsk_net_init,
1927 	.exit = xsk_net_exit,
1928 };
1929 
xsk_init(void)1930 static int __init xsk_init(void)
1931 {
1932 	int err;
1933 
1934 	err = proto_register(&xsk_proto, 0 /* no slab */);
1935 	if (err)
1936 		goto out;
1937 
1938 	err = sock_register(&xsk_family_ops);
1939 	if (err)
1940 		goto out_proto;
1941 
1942 	err = register_pernet_subsys(&xsk_net_ops);
1943 	if (err)
1944 		goto out_sk;
1945 
1946 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1947 	if (err)
1948 		goto out_pernet;
1949 
1950 	xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
1951 						 sizeof(struct xsk_addrs),
1952 						 0, SLAB_HWCACHE_ALIGN, NULL);
1953 	if (!xsk_tx_generic_cache) {
1954 		err = -ENOMEM;
1955 		goto out_unreg_notif;
1956 	}
1957 
1958 	return 0;
1959 
1960 out_unreg_notif:
1961 	unregister_netdevice_notifier(&xsk_netdev_notifier);
1962 out_pernet:
1963 	unregister_pernet_subsys(&xsk_net_ops);
1964 out_sk:
1965 	sock_unregister(PF_XDP);
1966 out_proto:
1967 	proto_unregister(&xsk_proto);
1968 out:
1969 	return err;
1970 }
1971 
1972 fs_initcall(xsk_init);
1973