xref: /linux/drivers/net/tap.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/etherdevice.h>
3 #include <linux/if_tap.h>
4 #include <linux/if_vlan.h>
5 #include <linux/interrupt.h>
6 #include <linux/nsproxy.h>
7 #include <linux/compat.h>
8 #include <linux/if_tun.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/cache.h>
12 #include <linux/sched/signal.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/wait.h>
16 #include <linux/cdev.h>
17 #include <linux/idr.h>
18 #include <linux/fs.h>
19 #include <linux/uio.h>
20 
21 #include <net/net_namespace.h>
22 #include <net/rtnetlink.h>
23 #include <net/sock.h>
24 #include <linux/virtio_net.h>
25 #include <linux/skb_array.h>
26 
27 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
28 
29 #define TAP_VNET_LE 0x80000000
30 #define TAP_VNET_BE 0x40000000
31 
32 #ifdef CONFIG_TUN_VNET_CROSS_LE
33 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
34 {
35 	return q->flags & TAP_VNET_BE ? false :
36 		virtio_legacy_is_little_endian();
37 }
38 
39 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
40 {
41 	int s = !!(q->flags & TAP_VNET_BE);
42 
43 	if (put_user(s, sp))
44 		return -EFAULT;
45 
46 	return 0;
47 }
48 
49 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
50 {
51 	int s;
52 
53 	if (get_user(s, sp))
54 		return -EFAULT;
55 
56 	if (s)
57 		q->flags |= TAP_VNET_BE;
58 	else
59 		q->flags &= ~TAP_VNET_BE;
60 
61 	return 0;
62 }
63 #else
64 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
65 {
66 	return virtio_legacy_is_little_endian();
67 }
68 
69 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
70 {
71 	return -EINVAL;
72 }
73 
74 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
75 {
76 	return -EINVAL;
77 }
78 #endif /* CONFIG_TUN_VNET_CROSS_LE */
79 
80 static inline bool tap_is_little_endian(struct tap_queue *q)
81 {
82 	return q->flags & TAP_VNET_LE ||
83 		tap_legacy_is_little_endian(q);
84 }
85 
86 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
87 {
88 	return __virtio16_to_cpu(tap_is_little_endian(q), val);
89 }
90 
91 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
92 {
93 	return __cpu_to_virtio16(tap_is_little_endian(q), val);
94 }
95 
96 static struct proto tap_proto = {
97 	.name = "tap",
98 	.owner = THIS_MODULE,
99 	.obj_size = sizeof(struct tap_queue),
100 };
101 
102 #define TAP_NUM_DEVS (1U << MINORBITS)
103 
104 static LIST_HEAD(major_list);
105 
106 struct major_info {
107 	struct rcu_head rcu;
108 	dev_t major;
109 	struct idr minor_idr;
110 	spinlock_t minor_lock;
111 	const char *device_name;
112 	struct list_head next;
113 };
114 
115 #define GOODCOPY_LEN 128
116 
117 static const struct proto_ops tap_socket_ops;
118 
119 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
120 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
121 
122 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
123 {
124 	return rcu_dereference(dev->rx_handler_data);
125 }
126 
127 /*
128  * RCU usage:
129  * The tap_queue and the macvlan_dev are loosely coupled, the
130  * pointers from one to the other can only be read while rcu_read_lock
131  * or rtnl is held.
132  *
133  * Both the file and the macvlan_dev hold a reference on the tap_queue
134  * through sock_hold(&q->sk). When the macvlan_dev goes away first,
135  * q->vlan becomes inaccessible. When the files gets closed,
136  * tap_get_queue() fails.
137  *
138  * There may still be references to the struct sock inside of the
139  * queue from outbound SKBs, but these never reference back to the
140  * file or the dev. The data structure is freed through __sk_free
141  * when both our references and any pending SKBs are gone.
142  */
143 
144 static int tap_enable_queue(struct tap_dev *tap, struct file *file,
145 			    struct tap_queue *q)
146 {
147 	int err = -EINVAL;
148 
149 	ASSERT_RTNL();
150 
151 	if (q->enabled)
152 		goto out;
153 
154 	err = 0;
155 	rcu_assign_pointer(tap->taps[tap->numvtaps], q);
156 	q->queue_index = tap->numvtaps;
157 	q->enabled = true;
158 
159 	tap->numvtaps++;
160 out:
161 	return err;
162 }
163 
164 /* Requires RTNL */
165 static int tap_set_queue(struct tap_dev *tap, struct file *file,
166 			 struct tap_queue *q)
167 {
168 	if (tap->numqueues == MAX_TAP_QUEUES)
169 		return -EBUSY;
170 
171 	rcu_assign_pointer(q->tap, tap);
172 	rcu_assign_pointer(tap->taps[tap->numvtaps], q);
173 	sock_hold(&q->sk);
174 
175 	q->file = file;
176 	q->queue_index = tap->numvtaps;
177 	q->enabled = true;
178 	file->private_data = q;
179 	list_add_tail(&q->next, &tap->queue_list);
180 
181 	tap->numvtaps++;
182 	tap->numqueues++;
183 
184 	return 0;
185 }
186 
187 static int tap_disable_queue(struct tap_queue *q)
188 {
189 	struct tap_dev *tap;
190 	struct tap_queue *nq;
191 
192 	ASSERT_RTNL();
193 	if (!q->enabled)
194 		return -EINVAL;
195 
196 	tap = rtnl_dereference(q->tap);
197 
198 	if (tap) {
199 		int index = q->queue_index;
200 		BUG_ON(index >= tap->numvtaps);
201 		nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
202 		nq->queue_index = index;
203 
204 		rcu_assign_pointer(tap->taps[index], nq);
205 		RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
206 		q->enabled = false;
207 
208 		tap->numvtaps--;
209 	}
210 
211 	return 0;
212 }
213 
214 /*
215  * The file owning the queue got closed, give up both
216  * the reference that the files holds as well as the
217  * one from the macvlan_dev if that still exists.
218  *
219  * Using the spinlock makes sure that we don't get
220  * to the queue again after destroying it.
221  */
222 static void tap_put_queue(struct tap_queue *q)
223 {
224 	struct tap_dev *tap;
225 
226 	rtnl_lock();
227 	tap = rtnl_dereference(q->tap);
228 
229 	if (tap) {
230 		if (q->enabled)
231 			BUG_ON(tap_disable_queue(q));
232 
233 		tap->numqueues--;
234 		RCU_INIT_POINTER(q->tap, NULL);
235 		sock_put(&q->sk);
236 		list_del_init(&q->next);
237 	}
238 
239 	rtnl_unlock();
240 
241 	synchronize_rcu();
242 	sock_put(&q->sk);
243 }
244 
245 /*
246  * Select a queue based on the rxq of the device on which this packet
247  * arrived. If the incoming device is not mq, calculate a flow hash
248  * to select a queue. If all fails, find the first available queue.
249  * Cache vlan->numvtaps since it can become zero during the execution
250  * of this function.
251  */
252 static struct tap_queue *tap_get_queue(struct tap_dev *tap,
253 				       struct sk_buff *skb)
254 {
255 	struct tap_queue *queue = NULL;
256 	/* Access to taps array is protected by rcu, but access to numvtaps
257 	 * isn't. Below we use it to lookup a queue, but treat it as a hint
258 	 * and validate that the result isn't NULL - in case we are
259 	 * racing against queue removal.
260 	 */
261 	int numvtaps = READ_ONCE(tap->numvtaps);
262 	__u32 rxq;
263 
264 	if (!numvtaps)
265 		goto out;
266 
267 	if (numvtaps == 1)
268 		goto single;
269 
270 	/* Check if we can use flow to select a queue */
271 	rxq = skb_get_hash(skb);
272 	if (rxq) {
273 		queue = rcu_dereference(tap->taps[rxq % numvtaps]);
274 		goto out;
275 	}
276 
277 	if (likely(skb_rx_queue_recorded(skb))) {
278 		rxq = skb_get_rx_queue(skb);
279 
280 		while (unlikely(rxq >= numvtaps))
281 			rxq -= numvtaps;
282 
283 		queue = rcu_dereference(tap->taps[rxq]);
284 		goto out;
285 	}
286 
287 single:
288 	queue = rcu_dereference(tap->taps[0]);
289 out:
290 	return queue;
291 }
292 
293 /*
294  * The net_device is going away, give up the reference
295  * that it holds on all queues and safely set the pointer
296  * from the queues to NULL.
297  */
298 void tap_del_queues(struct tap_dev *tap)
299 {
300 	struct tap_queue *q, *tmp;
301 
302 	ASSERT_RTNL();
303 	list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
304 		list_del_init(&q->next);
305 		RCU_INIT_POINTER(q->tap, NULL);
306 		if (q->enabled)
307 			tap->numvtaps--;
308 		tap->numqueues--;
309 		sock_put(&q->sk);
310 	}
311 	BUG_ON(tap->numvtaps);
312 	BUG_ON(tap->numqueues);
313 	/* guarantee that any future tap_set_queue will fail */
314 	tap->numvtaps = MAX_TAP_QUEUES;
315 }
316 EXPORT_SYMBOL_GPL(tap_del_queues);
317 
318 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
319 {
320 	struct sk_buff *skb = *pskb;
321 	struct net_device *dev = skb->dev;
322 	struct tap_dev *tap;
323 	struct tap_queue *q;
324 	netdev_features_t features = TAP_FEATURES;
325 	enum skb_drop_reason drop_reason;
326 
327 	tap = tap_dev_get_rcu(dev);
328 	if (!tap)
329 		return RX_HANDLER_PASS;
330 
331 	q = tap_get_queue(tap, skb);
332 	if (!q)
333 		return RX_HANDLER_PASS;
334 
335 	skb_push(skb, ETH_HLEN);
336 
337 	/* Apply the forward feature mask so that we perform segmentation
338 	 * according to users wishes.  This only works if VNET_HDR is
339 	 * enabled.
340 	 */
341 	if (q->flags & IFF_VNET_HDR)
342 		features |= tap->tap_features;
343 	if (netif_needs_gso(skb, features)) {
344 		struct sk_buff *segs = __skb_gso_segment(skb, features, false);
345 		struct sk_buff *next;
346 
347 		if (IS_ERR(segs)) {
348 			drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
349 			goto drop;
350 		}
351 
352 		if (!segs) {
353 			if (ptr_ring_produce(&q->ring, skb)) {
354 				drop_reason = SKB_DROP_REASON_FULL_RING;
355 				goto drop;
356 			}
357 			goto wake_up;
358 		}
359 
360 		consume_skb(skb);
361 		skb_list_walk_safe(segs, skb, next) {
362 			skb_mark_not_on_list(skb);
363 			if (ptr_ring_produce(&q->ring, skb)) {
364 				drop_reason = SKB_DROP_REASON_FULL_RING;
365 				kfree_skb_reason(skb, drop_reason);
366 				kfree_skb_list_reason(next, drop_reason);
367 				break;
368 			}
369 		}
370 	} else {
371 		/* If we receive a partial checksum and the tap side
372 		 * doesn't support checksum offload, compute the checksum.
373 		 * Note: it doesn't matter which checksum feature to
374 		 *	  check, we either support them all or none.
375 		 */
376 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
377 		    !(features & NETIF_F_CSUM_MASK) &&
378 		    skb_checksum_help(skb)) {
379 			drop_reason = SKB_DROP_REASON_SKB_CSUM;
380 			goto drop;
381 		}
382 		if (ptr_ring_produce(&q->ring, skb)) {
383 			drop_reason = SKB_DROP_REASON_FULL_RING;
384 			goto drop;
385 		}
386 	}
387 
388 wake_up:
389 	wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
390 	return RX_HANDLER_CONSUMED;
391 
392 drop:
393 	/* Count errors/drops only here, thus don't care about args. */
394 	if (tap->count_rx_dropped)
395 		tap->count_rx_dropped(tap);
396 	kfree_skb_reason(skb, drop_reason);
397 	return RX_HANDLER_CONSUMED;
398 }
399 EXPORT_SYMBOL_GPL(tap_handle_frame);
400 
401 static struct major_info *tap_get_major(int major)
402 {
403 	struct major_info *tap_major;
404 
405 	list_for_each_entry_rcu(tap_major, &major_list, next) {
406 		if (tap_major->major == major)
407 			return tap_major;
408 	}
409 
410 	return NULL;
411 }
412 
413 int tap_get_minor(dev_t major, struct tap_dev *tap)
414 {
415 	int retval = -ENOMEM;
416 	struct major_info *tap_major;
417 
418 	rcu_read_lock();
419 	tap_major = tap_get_major(MAJOR(major));
420 	if (!tap_major) {
421 		retval = -EINVAL;
422 		goto unlock;
423 	}
424 
425 	spin_lock(&tap_major->minor_lock);
426 	retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
427 	if (retval >= 0) {
428 		tap->minor = retval;
429 	} else if (retval == -ENOSPC) {
430 		netdev_err(tap->dev, "Too many tap devices\n");
431 		retval = -EINVAL;
432 	}
433 	spin_unlock(&tap_major->minor_lock);
434 
435 unlock:
436 	rcu_read_unlock();
437 	return retval < 0 ? retval : 0;
438 }
439 EXPORT_SYMBOL_GPL(tap_get_minor);
440 
441 void tap_free_minor(dev_t major, struct tap_dev *tap)
442 {
443 	struct major_info *tap_major;
444 
445 	rcu_read_lock();
446 	tap_major = tap_get_major(MAJOR(major));
447 	if (!tap_major) {
448 		goto unlock;
449 	}
450 
451 	spin_lock(&tap_major->minor_lock);
452 	if (tap->minor) {
453 		idr_remove(&tap_major->minor_idr, tap->minor);
454 		tap->minor = 0;
455 	}
456 	spin_unlock(&tap_major->minor_lock);
457 
458 unlock:
459 	rcu_read_unlock();
460 }
461 EXPORT_SYMBOL_GPL(tap_free_minor);
462 
463 static struct tap_dev *dev_get_by_tap_file(int major, int minor)
464 {
465 	struct net_device *dev = NULL;
466 	struct tap_dev *tap;
467 	struct major_info *tap_major;
468 
469 	rcu_read_lock();
470 	tap_major = tap_get_major(major);
471 	if (!tap_major) {
472 		tap = NULL;
473 		goto unlock;
474 	}
475 
476 	spin_lock(&tap_major->minor_lock);
477 	tap = idr_find(&tap_major->minor_idr, minor);
478 	if (tap) {
479 		dev = tap->dev;
480 		dev_hold(dev);
481 	}
482 	spin_unlock(&tap_major->minor_lock);
483 
484 unlock:
485 	rcu_read_unlock();
486 	return tap;
487 }
488 
489 static void tap_sock_write_space(struct sock *sk)
490 {
491 	wait_queue_head_t *wqueue;
492 
493 	if (!sock_writeable(sk) ||
494 	    !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
495 		return;
496 
497 	wqueue = sk_sleep(sk);
498 	if (wqueue && waitqueue_active(wqueue))
499 		wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
500 }
501 
502 static void tap_sock_destruct(struct sock *sk)
503 {
504 	struct tap_queue *q = container_of(sk, struct tap_queue, sk);
505 
506 	ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
507 }
508 
509 static int tap_open(struct inode *inode, struct file *file)
510 {
511 	struct net *net = current->nsproxy->net_ns;
512 	struct tap_dev *tap;
513 	struct tap_queue *q;
514 	int err = -ENODEV;
515 
516 	rtnl_lock();
517 	tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
518 	if (!tap)
519 		goto err;
520 
521 	err = -ENOMEM;
522 	q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
523 					     &tap_proto, 0);
524 	if (!q)
525 		goto err;
526 	if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
527 		sk_free(&q->sk);
528 		goto err;
529 	}
530 
531 	init_waitqueue_head(&q->sock.wq.wait);
532 	q->sock.type = SOCK_RAW;
533 	q->sock.state = SS_CONNECTED;
534 	q->sock.file = file;
535 	q->sock.ops = &tap_socket_ops;
536 	sock_init_data(&q->sock, &q->sk);
537 	q->sk.sk_write_space = tap_sock_write_space;
538 	q->sk.sk_destruct = tap_sock_destruct;
539 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
540 	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
541 
542 	/*
543 	 * so far only KVM virtio_net uses tap, enable zero copy between
544 	 * guest kernel and host kernel when lower device supports zerocopy
545 	 *
546 	 * The macvlan supports zerocopy iff the lower device supports zero
547 	 * copy so we don't have to look at the lower device directly.
548 	 */
549 	if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
550 		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
551 
552 	err = tap_set_queue(tap, file, q);
553 	if (err) {
554 		/* tap_sock_destruct() will take care of freeing ptr_ring */
555 		goto err_put;
556 	}
557 
558 	dev_put(tap->dev);
559 
560 	rtnl_unlock();
561 	return err;
562 
563 err_put:
564 	sock_put(&q->sk);
565 err:
566 	if (tap)
567 		dev_put(tap->dev);
568 
569 	rtnl_unlock();
570 	return err;
571 }
572 
573 static int tap_release(struct inode *inode, struct file *file)
574 {
575 	struct tap_queue *q = file->private_data;
576 	tap_put_queue(q);
577 	return 0;
578 }
579 
580 static __poll_t tap_poll(struct file *file, poll_table *wait)
581 {
582 	struct tap_queue *q = file->private_data;
583 	__poll_t mask = EPOLLERR;
584 
585 	if (!q)
586 		goto out;
587 
588 	mask = 0;
589 	poll_wait(file, &q->sock.wq.wait, wait);
590 
591 	if (!ptr_ring_empty(&q->ring))
592 		mask |= EPOLLIN | EPOLLRDNORM;
593 
594 	if (sock_writeable(&q->sk) ||
595 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
596 	     sock_writeable(&q->sk)))
597 		mask |= EPOLLOUT | EPOLLWRNORM;
598 
599 out:
600 	return mask;
601 }
602 
603 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
604 					    size_t len, size_t linear,
605 						int noblock, int *err)
606 {
607 	struct sk_buff *skb;
608 
609 	/* Under a page?  Don't bother with paged skb. */
610 	if (prepad + len < PAGE_SIZE || !linear)
611 		linear = len;
612 
613 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
614 				   err, 0);
615 	if (!skb)
616 		return NULL;
617 
618 	skb_reserve(skb, prepad);
619 	skb_put(skb, linear);
620 	skb->data_len = len - linear;
621 	skb->len += len - linear;
622 
623 	return skb;
624 }
625 
626 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */
627 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
628 
629 /* Get packet from user space buffer */
630 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
631 			    struct iov_iter *from, int noblock)
632 {
633 	int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
634 	struct sk_buff *skb;
635 	struct tap_dev *tap;
636 	unsigned long total_len = iov_iter_count(from);
637 	unsigned long len = total_len;
638 	int err;
639 	struct virtio_net_hdr vnet_hdr = { 0 };
640 	int vnet_hdr_len = 0;
641 	int copylen = 0;
642 	int depth;
643 	bool zerocopy = false;
644 	size_t linear;
645 	enum skb_drop_reason drop_reason;
646 
647 	if (q->flags & IFF_VNET_HDR) {
648 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
649 
650 		err = -EINVAL;
651 		if (len < vnet_hdr_len)
652 			goto err;
653 		len -= vnet_hdr_len;
654 
655 		err = -EFAULT;
656 		if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
657 			goto err;
658 		iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
659 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
660 		     tap16_to_cpu(q, vnet_hdr.csum_start) +
661 		     tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
662 			     tap16_to_cpu(q, vnet_hdr.hdr_len))
663 			vnet_hdr.hdr_len = cpu_to_tap16(q,
664 				 tap16_to_cpu(q, vnet_hdr.csum_start) +
665 				 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
666 		err = -EINVAL;
667 		if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
668 			goto err;
669 	}
670 
671 	err = -EINVAL;
672 	if (unlikely(len < ETH_HLEN))
673 		goto err;
674 
675 	if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
676 		struct iov_iter i;
677 
678 		copylen = vnet_hdr.hdr_len ?
679 			tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
680 		if (copylen > good_linear)
681 			copylen = good_linear;
682 		else if (copylen < ETH_HLEN)
683 			copylen = ETH_HLEN;
684 		linear = copylen;
685 		i = *from;
686 		iov_iter_advance(&i, copylen);
687 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
688 			zerocopy = true;
689 	}
690 
691 	if (!zerocopy) {
692 		copylen = len;
693 		linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
694 		if (linear > good_linear)
695 			linear = good_linear;
696 		else if (linear < ETH_HLEN)
697 			linear = ETH_HLEN;
698 	}
699 
700 	skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
701 			    linear, noblock, &err);
702 	if (!skb)
703 		goto err;
704 
705 	if (zerocopy)
706 		err = zerocopy_sg_from_iter(skb, from);
707 	else
708 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
709 
710 	if (err) {
711 		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
712 		goto err_kfree;
713 	}
714 
715 	skb_set_network_header(skb, ETH_HLEN);
716 	skb_reset_mac_header(skb);
717 	skb->protocol = eth_hdr(skb)->h_proto;
718 
719 	if (vnet_hdr_len) {
720 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
721 					    tap_is_little_endian(q));
722 		if (err) {
723 			drop_reason = SKB_DROP_REASON_DEV_HDR;
724 			goto err_kfree;
725 		}
726 	}
727 
728 	skb_probe_transport_header(skb);
729 
730 	/* Move network header to the right position for VLAN tagged packets */
731 	if (eth_type_vlan(skb->protocol) &&
732 	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
733 		skb_set_network_header(skb, depth);
734 
735 	rcu_read_lock();
736 	tap = rcu_dereference(q->tap);
737 	/* copy skb_ubuf_info for callback when skb has no error */
738 	if (zerocopy) {
739 		skb_zcopy_init(skb, msg_control);
740 	} else if (msg_control) {
741 		struct ubuf_info *uarg = msg_control;
742 		uarg->callback(NULL, uarg, false);
743 	}
744 
745 	if (tap) {
746 		skb->dev = tap->dev;
747 		dev_queue_xmit(skb);
748 	} else {
749 		kfree_skb(skb);
750 	}
751 	rcu_read_unlock();
752 
753 	return total_len;
754 
755 err_kfree:
756 	kfree_skb_reason(skb, drop_reason);
757 
758 err:
759 	rcu_read_lock();
760 	tap = rcu_dereference(q->tap);
761 	if (tap && tap->count_tx_dropped)
762 		tap->count_tx_dropped(tap);
763 	rcu_read_unlock();
764 
765 	return err;
766 }
767 
768 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
769 {
770 	struct file *file = iocb->ki_filp;
771 	struct tap_queue *q = file->private_data;
772 
773 	return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
774 }
775 
776 /* Put packet to the user space buffer */
777 static ssize_t tap_put_user(struct tap_queue *q,
778 			    const struct sk_buff *skb,
779 			    struct iov_iter *iter)
780 {
781 	int ret;
782 	int vnet_hdr_len = 0;
783 	int vlan_offset = 0;
784 	int total;
785 
786 	if (q->flags & IFF_VNET_HDR) {
787 		int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
788 		struct virtio_net_hdr vnet_hdr;
789 
790 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
791 		if (iov_iter_count(iter) < vnet_hdr_len)
792 			return -EINVAL;
793 
794 		if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
795 					    tap_is_little_endian(q), true,
796 					    vlan_hlen))
797 			BUG();
798 
799 		if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
800 		    sizeof(vnet_hdr))
801 			return -EFAULT;
802 
803 		iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
804 	}
805 	total = vnet_hdr_len;
806 	total += skb->len;
807 
808 	if (skb_vlan_tag_present(skb)) {
809 		struct {
810 			__be16 h_vlan_proto;
811 			__be16 h_vlan_TCI;
812 		} veth;
813 		veth.h_vlan_proto = skb->vlan_proto;
814 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
815 
816 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
817 		total += VLAN_HLEN;
818 
819 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
820 		if (ret || !iov_iter_count(iter))
821 			goto done;
822 
823 		ret = copy_to_iter(&veth, sizeof(veth), iter);
824 		if (ret != sizeof(veth) || !iov_iter_count(iter))
825 			goto done;
826 	}
827 
828 	ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
829 				     skb->len - vlan_offset);
830 
831 done:
832 	return ret ? ret : total;
833 }
834 
835 static ssize_t tap_do_read(struct tap_queue *q,
836 			   struct iov_iter *to,
837 			   int noblock, struct sk_buff *skb)
838 {
839 	DEFINE_WAIT(wait);
840 	ssize_t ret = 0;
841 
842 	if (!iov_iter_count(to)) {
843 		kfree_skb(skb);
844 		return 0;
845 	}
846 
847 	if (skb)
848 		goto put;
849 
850 	while (1) {
851 		if (!noblock)
852 			prepare_to_wait(sk_sleep(&q->sk), &wait,
853 					TASK_INTERRUPTIBLE);
854 
855 		/* Read frames from the queue */
856 		skb = ptr_ring_consume(&q->ring);
857 		if (skb)
858 			break;
859 		if (noblock) {
860 			ret = -EAGAIN;
861 			break;
862 		}
863 		if (signal_pending(current)) {
864 			ret = -ERESTARTSYS;
865 			break;
866 		}
867 		/* Nothing to read, let's sleep */
868 		schedule();
869 	}
870 	if (!noblock)
871 		finish_wait(sk_sleep(&q->sk), &wait);
872 
873 put:
874 	if (skb) {
875 		ret = tap_put_user(q, skb, to);
876 		if (unlikely(ret < 0))
877 			kfree_skb(skb);
878 		else
879 			consume_skb(skb);
880 	}
881 	return ret;
882 }
883 
884 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
885 {
886 	struct file *file = iocb->ki_filp;
887 	struct tap_queue *q = file->private_data;
888 	ssize_t len = iov_iter_count(to), ret;
889 
890 	ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL);
891 	ret = min_t(ssize_t, ret, len);
892 	if (ret > 0)
893 		iocb->ki_pos = ret;
894 	return ret;
895 }
896 
897 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
898 {
899 	struct tap_dev *tap;
900 
901 	ASSERT_RTNL();
902 	tap = rtnl_dereference(q->tap);
903 	if (tap)
904 		dev_hold(tap->dev);
905 
906 	return tap;
907 }
908 
909 static void tap_put_tap_dev(struct tap_dev *tap)
910 {
911 	dev_put(tap->dev);
912 }
913 
914 static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
915 {
916 	struct tap_queue *q = file->private_data;
917 	struct tap_dev *tap;
918 	int ret;
919 
920 	tap = tap_get_tap_dev(q);
921 	if (!tap)
922 		return -EINVAL;
923 
924 	if (flags & IFF_ATTACH_QUEUE)
925 		ret = tap_enable_queue(tap, file, q);
926 	else if (flags & IFF_DETACH_QUEUE)
927 		ret = tap_disable_queue(q);
928 	else
929 		ret = -EINVAL;
930 
931 	tap_put_tap_dev(tap);
932 	return ret;
933 }
934 
935 static int set_offload(struct tap_queue *q, unsigned long arg)
936 {
937 	struct tap_dev *tap;
938 	netdev_features_t features;
939 	netdev_features_t feature_mask = 0;
940 
941 	tap = rtnl_dereference(q->tap);
942 	if (!tap)
943 		return -ENOLINK;
944 
945 	features = tap->dev->features;
946 
947 	if (arg & TUN_F_CSUM) {
948 		feature_mask = NETIF_F_HW_CSUM;
949 
950 		if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
951 			if (arg & TUN_F_TSO_ECN)
952 				feature_mask |= NETIF_F_TSO_ECN;
953 			if (arg & TUN_F_TSO4)
954 				feature_mask |= NETIF_F_TSO;
955 			if (arg & TUN_F_TSO6)
956 				feature_mask |= NETIF_F_TSO6;
957 		}
958 	}
959 
960 	/* tun/tap driver inverts the usage for TSO offloads, where
961 	 * setting the TSO bit means that the userspace wants to
962 	 * accept TSO frames and turning it off means that user space
963 	 * does not support TSO.
964 	 * For tap, we have to invert it to mean the same thing.
965 	 * When user space turns off TSO, we turn off GSO/LRO so that
966 	 * user-space will not receive TSO frames.
967 	 */
968 	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
969 		features |= RX_OFFLOADS;
970 	else
971 		features &= ~RX_OFFLOADS;
972 
973 	/* tap_features are the same as features on tun/tap and
974 	 * reflect user expectations.
975 	 */
976 	tap->tap_features = feature_mask;
977 	if (tap->update_features)
978 		tap->update_features(tap, features);
979 
980 	return 0;
981 }
982 
983 /*
984  * provide compatibility with generic tun/tap interface
985  */
986 static long tap_ioctl(struct file *file, unsigned int cmd,
987 		      unsigned long arg)
988 {
989 	struct tap_queue *q = file->private_data;
990 	struct tap_dev *tap;
991 	void __user *argp = (void __user *)arg;
992 	struct ifreq __user *ifr = argp;
993 	unsigned int __user *up = argp;
994 	unsigned short u;
995 	int __user *sp = argp;
996 	struct sockaddr sa;
997 	int s;
998 	int ret;
999 
1000 	switch (cmd) {
1001 	case TUNSETIFF:
1002 		/* ignore the name, just look at flags */
1003 		if (get_user(u, &ifr->ifr_flags))
1004 			return -EFAULT;
1005 
1006 		ret = 0;
1007 		if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
1008 			ret = -EINVAL;
1009 		else
1010 			q->flags = (q->flags & ~TAP_IFFEATURES) | u;
1011 
1012 		return ret;
1013 
1014 	case TUNGETIFF:
1015 		rtnl_lock();
1016 		tap = tap_get_tap_dev(q);
1017 		if (!tap) {
1018 			rtnl_unlock();
1019 			return -ENOLINK;
1020 		}
1021 
1022 		ret = 0;
1023 		u = q->flags;
1024 		if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1025 		    put_user(u, &ifr->ifr_flags))
1026 			ret = -EFAULT;
1027 		tap_put_tap_dev(tap);
1028 		rtnl_unlock();
1029 		return ret;
1030 
1031 	case TUNSETQUEUE:
1032 		if (get_user(u, &ifr->ifr_flags))
1033 			return -EFAULT;
1034 		rtnl_lock();
1035 		ret = tap_ioctl_set_queue(file, u);
1036 		rtnl_unlock();
1037 		return ret;
1038 
1039 	case TUNGETFEATURES:
1040 		if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
1041 			return -EFAULT;
1042 		return 0;
1043 
1044 	case TUNSETSNDBUF:
1045 		if (get_user(s, sp))
1046 			return -EFAULT;
1047 		if (s <= 0)
1048 			return -EINVAL;
1049 
1050 		q->sk.sk_sndbuf = s;
1051 		return 0;
1052 
1053 	case TUNGETVNETHDRSZ:
1054 		s = q->vnet_hdr_sz;
1055 		if (put_user(s, sp))
1056 			return -EFAULT;
1057 		return 0;
1058 
1059 	case TUNSETVNETHDRSZ:
1060 		if (get_user(s, sp))
1061 			return -EFAULT;
1062 		if (s < (int)sizeof(struct virtio_net_hdr))
1063 			return -EINVAL;
1064 
1065 		q->vnet_hdr_sz = s;
1066 		return 0;
1067 
1068 	case TUNGETVNETLE:
1069 		s = !!(q->flags & TAP_VNET_LE);
1070 		if (put_user(s, sp))
1071 			return -EFAULT;
1072 		return 0;
1073 
1074 	case TUNSETVNETLE:
1075 		if (get_user(s, sp))
1076 			return -EFAULT;
1077 		if (s)
1078 			q->flags |= TAP_VNET_LE;
1079 		else
1080 			q->flags &= ~TAP_VNET_LE;
1081 		return 0;
1082 
1083 	case TUNGETVNETBE:
1084 		return tap_get_vnet_be(q, sp);
1085 
1086 	case TUNSETVNETBE:
1087 		return tap_set_vnet_be(q, sp);
1088 
1089 	case TUNSETOFFLOAD:
1090 		/* let the user check for future flags */
1091 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1092 			    TUN_F_TSO_ECN | TUN_F_UFO))
1093 			return -EINVAL;
1094 
1095 		rtnl_lock();
1096 		ret = set_offload(q, arg);
1097 		rtnl_unlock();
1098 		return ret;
1099 
1100 	case SIOCGIFHWADDR:
1101 		rtnl_lock();
1102 		tap = tap_get_tap_dev(q);
1103 		if (!tap) {
1104 			rtnl_unlock();
1105 			return -ENOLINK;
1106 		}
1107 		ret = 0;
1108 		dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
1109 		if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1110 		    copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
1111 			ret = -EFAULT;
1112 		tap_put_tap_dev(tap);
1113 		rtnl_unlock();
1114 		return ret;
1115 
1116 	case SIOCSIFHWADDR:
1117 		if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1118 			return -EFAULT;
1119 		rtnl_lock();
1120 		tap = tap_get_tap_dev(q);
1121 		if (!tap) {
1122 			rtnl_unlock();
1123 			return -ENOLINK;
1124 		}
1125 		ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
1126 		tap_put_tap_dev(tap);
1127 		rtnl_unlock();
1128 		return ret;
1129 
1130 	default:
1131 		return -EINVAL;
1132 	}
1133 }
1134 
1135 static const struct file_operations tap_fops = {
1136 	.owner		= THIS_MODULE,
1137 	.open		= tap_open,
1138 	.release	= tap_release,
1139 	.read_iter	= tap_read_iter,
1140 	.write_iter	= tap_write_iter,
1141 	.poll		= tap_poll,
1142 	.llseek		= no_llseek,
1143 	.unlocked_ioctl	= tap_ioctl,
1144 	.compat_ioctl	= compat_ptr_ioctl,
1145 };
1146 
1147 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
1148 {
1149 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1150 	struct virtio_net_hdr *gso = &hdr->gso;
1151 	int buflen = hdr->buflen;
1152 	int vnet_hdr_len = 0;
1153 	struct tap_dev *tap;
1154 	struct sk_buff *skb;
1155 	int err, depth;
1156 
1157 	if (q->flags & IFF_VNET_HDR)
1158 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1159 
1160 	skb = build_skb(xdp->data_hard_start, buflen);
1161 	if (!skb) {
1162 		err = -ENOMEM;
1163 		goto err;
1164 	}
1165 
1166 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
1167 	skb_put(skb, xdp->data_end - xdp->data);
1168 
1169 	skb_set_network_header(skb, ETH_HLEN);
1170 	skb_reset_mac_header(skb);
1171 	skb->protocol = eth_hdr(skb)->h_proto;
1172 
1173 	if (vnet_hdr_len) {
1174 		err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
1175 		if (err)
1176 			goto err_kfree;
1177 	}
1178 
1179 	/* Move network header to the right position for VLAN tagged packets */
1180 	if (eth_type_vlan(skb->protocol) &&
1181 	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
1182 		skb_set_network_header(skb, depth);
1183 
1184 	rcu_read_lock();
1185 	tap = rcu_dereference(q->tap);
1186 	if (tap) {
1187 		skb->dev = tap->dev;
1188 		skb_probe_transport_header(skb);
1189 		dev_queue_xmit(skb);
1190 	} else {
1191 		kfree_skb(skb);
1192 	}
1193 	rcu_read_unlock();
1194 
1195 	return 0;
1196 
1197 err_kfree:
1198 	kfree_skb(skb);
1199 err:
1200 	rcu_read_lock();
1201 	tap = rcu_dereference(q->tap);
1202 	if (tap && tap->count_tx_dropped)
1203 		tap->count_tx_dropped(tap);
1204 	rcu_read_unlock();
1205 	return err;
1206 }
1207 
1208 static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1209 		       size_t total_len)
1210 {
1211 	struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1212 	struct tun_msg_ctl *ctl = m->msg_control;
1213 	struct xdp_buff *xdp;
1214 	int i;
1215 
1216 	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
1217 	    ctl && ctl->type == TUN_MSG_PTR) {
1218 		for (i = 0; i < ctl->num; i++) {
1219 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
1220 			tap_get_user_xdp(q, xdp);
1221 		}
1222 		return 0;
1223 	}
1224 
1225 	return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1226 			    m->msg_flags & MSG_DONTWAIT);
1227 }
1228 
1229 static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1230 		       size_t total_len, int flags)
1231 {
1232 	struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1233 	struct sk_buff *skb = m->msg_control;
1234 	int ret;
1235 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1236 		kfree_skb(skb);
1237 		return -EINVAL;
1238 	}
1239 	ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1240 	if (ret > total_len) {
1241 		m->msg_flags |= MSG_TRUNC;
1242 		ret = flags & MSG_TRUNC ? ret : total_len;
1243 	}
1244 	return ret;
1245 }
1246 
1247 static int tap_peek_len(struct socket *sock)
1248 {
1249 	struct tap_queue *q = container_of(sock, struct tap_queue,
1250 					       sock);
1251 	return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
1252 }
1253 
1254 /* Ops structure to mimic raw sockets with tun */
1255 static const struct proto_ops tap_socket_ops = {
1256 	.sendmsg = tap_sendmsg,
1257 	.recvmsg = tap_recvmsg,
1258 	.peek_len = tap_peek_len,
1259 };
1260 
1261 /* Get an underlying socket object from tun file.  Returns error unless file is
1262  * attached to a device.  The returned object works like a packet socket, it
1263  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
1264  * holding a reference to the file for as long as the socket is in use. */
1265 struct socket *tap_get_socket(struct file *file)
1266 {
1267 	struct tap_queue *q;
1268 	if (file->f_op != &tap_fops)
1269 		return ERR_PTR(-EINVAL);
1270 	q = file->private_data;
1271 	if (!q)
1272 		return ERR_PTR(-EBADFD);
1273 	return &q->sock;
1274 }
1275 EXPORT_SYMBOL_GPL(tap_get_socket);
1276 
1277 struct ptr_ring *tap_get_ptr_ring(struct file *file)
1278 {
1279 	struct tap_queue *q;
1280 
1281 	if (file->f_op != &tap_fops)
1282 		return ERR_PTR(-EINVAL);
1283 	q = file->private_data;
1284 	if (!q)
1285 		return ERR_PTR(-EBADFD);
1286 	return &q->ring;
1287 }
1288 EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
1289 
1290 int tap_queue_resize(struct tap_dev *tap)
1291 {
1292 	struct net_device *dev = tap->dev;
1293 	struct tap_queue *q;
1294 	struct ptr_ring **rings;
1295 	int n = tap->numqueues;
1296 	int ret, i = 0;
1297 
1298 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
1299 	if (!rings)
1300 		return -ENOMEM;
1301 
1302 	list_for_each_entry(q, &tap->queue_list, next)
1303 		rings[i++] = &q->ring;
1304 
1305 	ret = ptr_ring_resize_multiple(rings, n,
1306 				       dev->tx_queue_len, GFP_KERNEL,
1307 				       __skb_array_destroy_skb);
1308 
1309 	kfree(rings);
1310 	return ret;
1311 }
1312 EXPORT_SYMBOL_GPL(tap_queue_resize);
1313 
1314 static int tap_list_add(dev_t major, const char *device_name)
1315 {
1316 	struct major_info *tap_major;
1317 
1318 	tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1319 	if (!tap_major)
1320 		return -ENOMEM;
1321 
1322 	tap_major->major = MAJOR(major);
1323 
1324 	idr_init(&tap_major->minor_idr);
1325 	spin_lock_init(&tap_major->minor_lock);
1326 
1327 	tap_major->device_name = device_name;
1328 
1329 	list_add_tail_rcu(&tap_major->next, &major_list);
1330 	return 0;
1331 }
1332 
1333 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1334 		    const char *device_name, struct module *module)
1335 {
1336 	int err;
1337 
1338 	err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1339 	if (err)
1340 		goto out1;
1341 
1342 	cdev_init(tap_cdev, &tap_fops);
1343 	tap_cdev->owner = module;
1344 	err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1345 	if (err)
1346 		goto out2;
1347 
1348 	err =  tap_list_add(*tap_major, device_name);
1349 	if (err)
1350 		goto out3;
1351 
1352 	return 0;
1353 
1354 out3:
1355 	cdev_del(tap_cdev);
1356 out2:
1357 	unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1358 out1:
1359 	return err;
1360 }
1361 EXPORT_SYMBOL_GPL(tap_create_cdev);
1362 
1363 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1364 {
1365 	struct major_info *tap_major, *tmp;
1366 
1367 	cdev_del(tap_cdev);
1368 	unregister_chrdev_region(major, TAP_NUM_DEVS);
1369 	list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1370 		if (tap_major->major == MAJOR(major)) {
1371 			idr_destroy(&tap_major->minor_idr);
1372 			list_del_rcu(&tap_major->next);
1373 			kfree_rcu(tap_major, rcu);
1374 		}
1375 	}
1376 }
1377 EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1378 
1379 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1380 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1381 MODULE_LICENSE("GPL");
1382