xref: /linux/drivers/net/macvtap.c (revision 2dbf708448c836754d25fe6108c5bfe1f5697c95)
1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/interrupt.h>
4 #include <linux/nsproxy.h>
5 #include <linux/compat.h>
6 #include <linux/if_tun.h>
7 #include <linux/module.h>
8 #include <linux/skbuff.h>
9 #include <linux/cache.h>
10 #include <linux/sched.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
17 #include <linux/fs.h>
18 
19 #include <net/net_namespace.h>
20 #include <net/rtnetlink.h>
21 #include <net/sock.h>
22 #include <linux/virtio_net.h>
23 
24 /*
25  * A macvtap queue is the central object of this driver, it connects
26  * an open character device to a macvlan interface. There can be
27  * multiple queues on one interface, which map back to queues
28  * implemented in hardware on the underlying device.
29  *
30  * macvtap_proto is used to allocate queues through the sock allocation
31  * mechanism.
32  *
33  * TODO: multiqueue support is currently not implemented, even though
34  * macvtap is basically prepared for that. We will need to add this
35  * here as well as in virtio-net and qemu to get line rate on 10gbit
36  * adapters from a guest.
37  */
38 struct macvtap_queue {
39 	struct sock sk;
40 	struct socket sock;
41 	struct socket_wq wq;
42 	int vnet_hdr_sz;
43 	struct macvlan_dev __rcu *vlan;
44 	struct file *file;
45 	unsigned int flags;
46 };
47 
48 static struct proto macvtap_proto = {
49 	.name = "macvtap",
50 	.owner = THIS_MODULE,
51 	.obj_size = sizeof (struct macvtap_queue),
52 };
53 
54 /*
55  * Variables for dealing with macvtaps device numbers.
56  */
57 static dev_t macvtap_major;
58 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
59 static DEFINE_MUTEX(minor_lock);
60 static DEFINE_IDR(minor_idr);
61 
62 #define GOODCOPY_LEN 128
63 static struct class *macvtap_class;
64 static struct cdev macvtap_cdev;
65 
66 static const struct proto_ops macvtap_socket_ops;
67 
68 /*
69  * RCU usage:
70  * The macvtap_queue and the macvlan_dev are loosely coupled, the
71  * pointers from one to the other can only be read while rcu_read_lock
72  * or macvtap_lock is held.
73  *
74  * Both the file and the macvlan_dev hold a reference on the macvtap_queue
75  * through sock_hold(&q->sk). When the macvlan_dev goes away first,
76  * q->vlan becomes inaccessible. When the files gets closed,
77  * macvtap_get_queue() fails.
78  *
79  * There may still be references to the struct sock inside of the
80  * queue from outbound SKBs, but these never reference back to the
81  * file or the dev. The data structure is freed through __sk_free
82  * when both our references and any pending SKBs are gone.
83  */
84 static DEFINE_SPINLOCK(macvtap_lock);
85 
86 /*
87  * get_slot: return a [unused/occupied] slot in vlan->taps[]:
88  *	- if 'q' is NULL, return the first empty slot;
89  *	- otherwise, return the slot this pointer occupies.
90  */
91 static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
92 {
93 	int i;
94 
95 	for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
96 		if (rcu_dereference(vlan->taps[i]) == q)
97 			return i;
98 	}
99 
100 	/* Should never happen */
101 	BUG_ON(1);
102 }
103 
104 static int macvtap_set_queue(struct net_device *dev, struct file *file,
105 				struct macvtap_queue *q)
106 {
107 	struct macvlan_dev *vlan = netdev_priv(dev);
108 	int index;
109 	int err = -EBUSY;
110 
111 	spin_lock(&macvtap_lock);
112 	if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
113 		goto out;
114 
115 	err = 0;
116 	index = get_slot(vlan, NULL);
117 	rcu_assign_pointer(q->vlan, vlan);
118 	rcu_assign_pointer(vlan->taps[index], q);
119 	sock_hold(&q->sk);
120 
121 	q->file = file;
122 	file->private_data = q;
123 
124 	vlan->numvtaps++;
125 
126 out:
127 	spin_unlock(&macvtap_lock);
128 	return err;
129 }
130 
131 /*
132  * The file owning the queue got closed, give up both
133  * the reference that the files holds as well as the
134  * one from the macvlan_dev if that still exists.
135  *
136  * Using the spinlock makes sure that we don't get
137  * to the queue again after destroying it.
138  */
139 static void macvtap_put_queue(struct macvtap_queue *q)
140 {
141 	struct macvlan_dev *vlan;
142 
143 	spin_lock(&macvtap_lock);
144 	vlan = rcu_dereference_protected(q->vlan,
145 					 lockdep_is_held(&macvtap_lock));
146 	if (vlan) {
147 		int index = get_slot(vlan, q);
148 
149 		RCU_INIT_POINTER(vlan->taps[index], NULL);
150 		RCU_INIT_POINTER(q->vlan, NULL);
151 		sock_put(&q->sk);
152 		--vlan->numvtaps;
153 	}
154 
155 	spin_unlock(&macvtap_lock);
156 
157 	synchronize_rcu();
158 	sock_put(&q->sk);
159 }
160 
161 /*
162  * Select a queue based on the rxq of the device on which this packet
163  * arrived. If the incoming device is not mq, calculate a flow hash
164  * to select a queue. If all fails, find the first available queue.
165  * Cache vlan->numvtaps since it can become zero during the execution
166  * of this function.
167  */
168 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
169 					       struct sk_buff *skb)
170 {
171 	struct macvlan_dev *vlan = netdev_priv(dev);
172 	struct macvtap_queue *tap = NULL;
173 	int numvtaps = vlan->numvtaps;
174 	__u32 rxq;
175 
176 	if (!numvtaps)
177 		goto out;
178 
179 	/* Check if we can use flow to select a queue */
180 	rxq = skb_get_rxhash(skb);
181 	if (rxq) {
182 		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
183 		if (tap)
184 			goto out;
185 	}
186 
187 	if (likely(skb_rx_queue_recorded(skb))) {
188 		rxq = skb_get_rx_queue(skb);
189 
190 		while (unlikely(rxq >= numvtaps))
191 			rxq -= numvtaps;
192 
193 		tap = rcu_dereference(vlan->taps[rxq]);
194 		if (tap)
195 			goto out;
196 	}
197 
198 	/* Everything failed - find first available queue */
199 	for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
200 		tap = rcu_dereference(vlan->taps[rxq]);
201 		if (tap)
202 			break;
203 	}
204 
205 out:
206 	return tap;
207 }
208 
209 /*
210  * The net_device is going away, give up the reference
211  * that it holds on all queues and safely set the pointer
212  * from the queues to NULL.
213  */
214 static void macvtap_del_queues(struct net_device *dev)
215 {
216 	struct macvlan_dev *vlan = netdev_priv(dev);
217 	struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
218 	int i, j = 0;
219 
220 	/* macvtap_put_queue can free some slots, so go through all slots */
221 	spin_lock(&macvtap_lock);
222 	for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
223 		q = rcu_dereference_protected(vlan->taps[i],
224 					      lockdep_is_held(&macvtap_lock));
225 		if (q) {
226 			qlist[j++] = q;
227 			RCU_INIT_POINTER(vlan->taps[i], NULL);
228 			RCU_INIT_POINTER(q->vlan, NULL);
229 			vlan->numvtaps--;
230 		}
231 	}
232 	BUG_ON(vlan->numvtaps != 0);
233 	/* guarantee that any future macvtap_set_queue will fail */
234 	vlan->numvtaps = MAX_MACVTAP_QUEUES;
235 	spin_unlock(&macvtap_lock);
236 
237 	synchronize_rcu();
238 
239 	for (--j; j >= 0; j--)
240 		sock_put(&qlist[j]->sk);
241 }
242 
243 /*
244  * Forward happens for data that gets sent from one macvlan
245  * endpoint to another one in bridge mode. We just take
246  * the skb and put it into the receive queue.
247  */
248 static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
249 {
250 	struct macvtap_queue *q = macvtap_get_queue(dev, skb);
251 	if (!q)
252 		goto drop;
253 
254 	if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
255 		goto drop;
256 
257 	skb_queue_tail(&q->sk.sk_receive_queue, skb);
258 	wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
259 	return NET_RX_SUCCESS;
260 
261 drop:
262 	kfree_skb(skb);
263 	return NET_RX_DROP;
264 }
265 
266 /*
267  * Receive is for data from the external interface (lowerdev),
268  * in case of macvtap, we can treat that the same way as
269  * forward, which macvlan cannot.
270  */
271 static int macvtap_receive(struct sk_buff *skb)
272 {
273 	skb_push(skb, ETH_HLEN);
274 	return macvtap_forward(skb->dev, skb);
275 }
276 
277 static int macvtap_get_minor(struct macvlan_dev *vlan)
278 {
279 	int retval = -ENOMEM;
280 	int id;
281 
282 	mutex_lock(&minor_lock);
283 	if (idr_pre_get(&minor_idr, GFP_KERNEL) == 0)
284 		goto exit;
285 
286 	retval = idr_get_new_above(&minor_idr, vlan, 1, &id);
287 	if (retval < 0) {
288 		if (retval == -EAGAIN)
289 			retval = -ENOMEM;
290 		goto exit;
291 	}
292 	if (id < MACVTAP_NUM_DEVS) {
293 		vlan->minor = id;
294 	} else {
295 		printk(KERN_ERR "too many macvtap devices\n");
296 		retval = -EINVAL;
297 		idr_remove(&minor_idr, id);
298 	}
299 exit:
300 	mutex_unlock(&minor_lock);
301 	return retval;
302 }
303 
304 static void macvtap_free_minor(struct macvlan_dev *vlan)
305 {
306 	mutex_lock(&minor_lock);
307 	if (vlan->minor) {
308 		idr_remove(&minor_idr, vlan->minor);
309 		vlan->minor = 0;
310 	}
311 	mutex_unlock(&minor_lock);
312 }
313 
314 static struct net_device *dev_get_by_macvtap_minor(int minor)
315 {
316 	struct net_device *dev = NULL;
317 	struct macvlan_dev *vlan;
318 
319 	mutex_lock(&minor_lock);
320 	vlan = idr_find(&minor_idr, minor);
321 	if (vlan) {
322 		dev = vlan->dev;
323 		dev_hold(dev);
324 	}
325 	mutex_unlock(&minor_lock);
326 	return dev;
327 }
328 
329 static int macvtap_newlink(struct net *src_net,
330 			   struct net_device *dev,
331 			   struct nlattr *tb[],
332 			   struct nlattr *data[])
333 {
334 	/* Don't put anything that may fail after macvlan_common_newlink
335 	 * because we can't undo what it does.
336 	 */
337 	return macvlan_common_newlink(src_net, dev, tb, data,
338 				      macvtap_receive, macvtap_forward);
339 }
340 
341 static void macvtap_dellink(struct net_device *dev,
342 			    struct list_head *head)
343 {
344 	macvtap_del_queues(dev);
345 	macvlan_dellink(dev, head);
346 }
347 
348 static void macvtap_setup(struct net_device *dev)
349 {
350 	macvlan_common_setup(dev);
351 	dev->tx_queue_len = TUN_READQ_SIZE;
352 }
353 
354 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
355 	.kind		= "macvtap",
356 	.setup		= macvtap_setup,
357 	.newlink	= macvtap_newlink,
358 	.dellink	= macvtap_dellink,
359 };
360 
361 
362 static void macvtap_sock_write_space(struct sock *sk)
363 {
364 	wait_queue_head_t *wqueue;
365 
366 	if (!sock_writeable(sk) ||
367 	    !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
368 		return;
369 
370 	wqueue = sk_sleep(sk);
371 	if (wqueue && waitqueue_active(wqueue))
372 		wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
373 }
374 
375 static void macvtap_sock_destruct(struct sock *sk)
376 {
377 	skb_queue_purge(&sk->sk_receive_queue);
378 }
379 
380 static int macvtap_open(struct inode *inode, struct file *file)
381 {
382 	struct net *net = current->nsproxy->net_ns;
383 	struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode));
384 	struct macvtap_queue *q;
385 	int err;
386 
387 	err = -ENODEV;
388 	if (!dev)
389 		goto out;
390 
391 	err = -ENOMEM;
392 	q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
393 					     &macvtap_proto);
394 	if (!q)
395 		goto out;
396 
397 	q->sock.wq = &q->wq;
398 	init_waitqueue_head(&q->wq.wait);
399 	q->sock.type = SOCK_RAW;
400 	q->sock.state = SS_CONNECTED;
401 	q->sock.file = file;
402 	q->sock.ops = &macvtap_socket_ops;
403 	sock_init_data(&q->sock, &q->sk);
404 	q->sk.sk_write_space = macvtap_sock_write_space;
405 	q->sk.sk_destruct = macvtap_sock_destruct;
406 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
407 	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
408 
409 	/*
410 	 * so far only KVM virtio_net uses macvtap, enable zero copy between
411 	 * guest kernel and host kernel when lower device supports zerocopy
412 	 *
413 	 * The macvlan supports zerocopy iff the lower device supports zero
414 	 * copy so we don't have to look at the lower device directly.
415 	 */
416 	if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
417 		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
418 
419 	err = macvtap_set_queue(dev, file, q);
420 	if (err)
421 		sock_put(&q->sk);
422 
423 out:
424 	if (dev)
425 		dev_put(dev);
426 
427 	return err;
428 }
429 
430 static int macvtap_release(struct inode *inode, struct file *file)
431 {
432 	struct macvtap_queue *q = file->private_data;
433 	macvtap_put_queue(q);
434 	return 0;
435 }
436 
437 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
438 {
439 	struct macvtap_queue *q = file->private_data;
440 	unsigned int mask = POLLERR;
441 
442 	if (!q)
443 		goto out;
444 
445 	mask = 0;
446 	poll_wait(file, &q->wq.wait, wait);
447 
448 	if (!skb_queue_empty(&q->sk.sk_receive_queue))
449 		mask |= POLLIN | POLLRDNORM;
450 
451 	if (sock_writeable(&q->sk) ||
452 	    (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
453 	     sock_writeable(&q->sk)))
454 		mask |= POLLOUT | POLLWRNORM;
455 
456 out:
457 	return mask;
458 }
459 
460 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
461 						size_t len, size_t linear,
462 						int noblock, int *err)
463 {
464 	struct sk_buff *skb;
465 
466 	/* Under a page?  Don't bother with paged skb. */
467 	if (prepad + len < PAGE_SIZE || !linear)
468 		linear = len;
469 
470 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
471 				   err);
472 	if (!skb)
473 		return NULL;
474 
475 	skb_reserve(skb, prepad);
476 	skb_put(skb, linear);
477 	skb->data_len = len - linear;
478 	skb->len += len - linear;
479 
480 	return skb;
481 }
482 
483 /* set skb frags from iovec, this can move to core network code for reuse */
484 static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
485 				  int offset, size_t count)
486 {
487 	int len = iov_length(from, count) - offset;
488 	int copy = skb_headlen(skb);
489 	int size, offset1 = 0;
490 	int i = 0;
491 
492 	/* Skip over from offset */
493 	while (count && (offset >= from->iov_len)) {
494 		offset -= from->iov_len;
495 		++from;
496 		--count;
497 	}
498 
499 	/* copy up to skb headlen */
500 	while (count && (copy > 0)) {
501 		size = min_t(unsigned int, copy, from->iov_len - offset);
502 		if (copy_from_user(skb->data + offset1, from->iov_base + offset,
503 				   size))
504 			return -EFAULT;
505 		if (copy > size) {
506 			++from;
507 			--count;
508 		}
509 		copy -= size;
510 		offset1 += size;
511 		offset = 0;
512 	}
513 
514 	if (len == offset1)
515 		return 0;
516 
517 	while (count--) {
518 		struct page *page[MAX_SKB_FRAGS];
519 		int num_pages;
520 		unsigned long base;
521 
522 		len = from->iov_len - offset1;
523 		if (!len) {
524 			offset1 = 0;
525 			++from;
526 			continue;
527 		}
528 		base = (unsigned long)from->iov_base + offset1;
529 		size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
530 		num_pages = get_user_pages_fast(base, size, 0, &page[i]);
531 		if ((num_pages != size) ||
532 		    (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
533 			/* put_page is in skb free */
534 			return -EFAULT;
535 		skb->data_len += len;
536 		skb->len += len;
537 		skb->truesize += len;
538 		atomic_add(len, &skb->sk->sk_wmem_alloc);
539 		while (len) {
540 			int off = base & ~PAGE_MASK;
541 			int size = min_t(int, len, PAGE_SIZE - off);
542 			__skb_fill_page_desc(skb, i, page[i], off, size);
543 			skb_shinfo(skb)->nr_frags++;
544 			/* increase sk_wmem_alloc */
545 			base += size;
546 			len -= size;
547 			i++;
548 		}
549 		offset1 = 0;
550 		++from;
551 	}
552 	return 0;
553 }
554 
555 /*
556  * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
557  * be shared with the tun/tap driver.
558  */
559 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
560 				     struct virtio_net_hdr *vnet_hdr)
561 {
562 	unsigned short gso_type = 0;
563 	if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
564 		switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
565 		case VIRTIO_NET_HDR_GSO_TCPV4:
566 			gso_type = SKB_GSO_TCPV4;
567 			break;
568 		case VIRTIO_NET_HDR_GSO_TCPV6:
569 			gso_type = SKB_GSO_TCPV6;
570 			break;
571 		case VIRTIO_NET_HDR_GSO_UDP:
572 			gso_type = SKB_GSO_UDP;
573 			break;
574 		default:
575 			return -EINVAL;
576 		}
577 
578 		if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
579 			gso_type |= SKB_GSO_TCP_ECN;
580 
581 		if (vnet_hdr->gso_size == 0)
582 			return -EINVAL;
583 	}
584 
585 	if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
586 		if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
587 					  vnet_hdr->csum_offset))
588 			return -EINVAL;
589 	}
590 
591 	if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
592 		skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
593 		skb_shinfo(skb)->gso_type = gso_type;
594 
595 		/* Header must be checked, and gso_segs computed. */
596 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
597 		skb_shinfo(skb)->gso_segs = 0;
598 	}
599 	return 0;
600 }
601 
602 static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
603 				   struct virtio_net_hdr *vnet_hdr)
604 {
605 	memset(vnet_hdr, 0, sizeof(*vnet_hdr));
606 
607 	if (skb_is_gso(skb)) {
608 		struct skb_shared_info *sinfo = skb_shinfo(skb);
609 
610 		/* This is a hint as to how much should be linear. */
611 		vnet_hdr->hdr_len = skb_headlen(skb);
612 		vnet_hdr->gso_size = sinfo->gso_size;
613 		if (sinfo->gso_type & SKB_GSO_TCPV4)
614 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
615 		else if (sinfo->gso_type & SKB_GSO_TCPV6)
616 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
617 		else if (sinfo->gso_type & SKB_GSO_UDP)
618 			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
619 		else
620 			BUG();
621 		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
622 			vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
623 	} else
624 		vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
625 
626 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
627 		vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
628 		vnet_hdr->csum_start = skb_checksum_start_offset(skb);
629 		vnet_hdr->csum_offset = skb->csum_offset;
630 	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
631 		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
632 	} /* else everything is zero */
633 
634 	return 0;
635 }
636 
637 
638 /* Get packet from user space buffer */
639 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
640 				const struct iovec *iv, unsigned long total_len,
641 				size_t count, int noblock)
642 {
643 	struct sk_buff *skb;
644 	struct macvlan_dev *vlan;
645 	unsigned long len = total_len;
646 	int err;
647 	struct virtio_net_hdr vnet_hdr = { 0 };
648 	int vnet_hdr_len = 0;
649 	int copylen;
650 	bool zerocopy = false;
651 
652 	if (q->flags & IFF_VNET_HDR) {
653 		vnet_hdr_len = q->vnet_hdr_sz;
654 
655 		err = -EINVAL;
656 		if (len < vnet_hdr_len)
657 			goto err;
658 		len -= vnet_hdr_len;
659 
660 		err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
661 					   sizeof(vnet_hdr));
662 		if (err < 0)
663 			goto err;
664 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
665 		     vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
666 							vnet_hdr.hdr_len)
667 			vnet_hdr.hdr_len = vnet_hdr.csum_start +
668 						vnet_hdr.csum_offset + 2;
669 		err = -EINVAL;
670 		if (vnet_hdr.hdr_len > len)
671 			goto err;
672 	}
673 
674 	err = -EINVAL;
675 	if (unlikely(len < ETH_HLEN))
676 		goto err;
677 
678 	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
679 		zerocopy = true;
680 
681 	if (zerocopy) {
682 		/* There are 256 bytes to be copied in skb, so there is enough
683 		 * room for skb expand head in case it is used.
684 		 * The rest buffer is mapped from userspace.
685 		 */
686 		copylen = vnet_hdr.hdr_len;
687 		if (!copylen)
688 			copylen = GOODCOPY_LEN;
689 	} else
690 		copylen = len;
691 
692 	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
693 				vnet_hdr.hdr_len, noblock, &err);
694 	if (!skb)
695 		goto err;
696 
697 	if (zerocopy) {
698 		err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
699 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
700 	} else
701 		err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
702 						   len);
703 	if (err)
704 		goto err_kfree;
705 
706 	skb_set_network_header(skb, ETH_HLEN);
707 	skb_reset_mac_header(skb);
708 	skb->protocol = eth_hdr(skb)->h_proto;
709 
710 	if (vnet_hdr_len) {
711 		err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
712 		if (err)
713 			goto err_kfree;
714 	}
715 
716 	rcu_read_lock_bh();
717 	vlan = rcu_dereference_bh(q->vlan);
718 	/* copy skb_ubuf_info for callback when skb has no error */
719 	if (zerocopy)
720 		skb_shinfo(skb)->destructor_arg = m->msg_control;
721 	if (vlan)
722 		macvlan_start_xmit(skb, vlan->dev);
723 	else
724 		kfree_skb(skb);
725 	rcu_read_unlock_bh();
726 
727 	return total_len;
728 
729 err_kfree:
730 	kfree_skb(skb);
731 
732 err:
733 	rcu_read_lock_bh();
734 	vlan = rcu_dereference_bh(q->vlan);
735 	if (vlan)
736 		vlan->dev->stats.tx_dropped++;
737 	rcu_read_unlock_bh();
738 
739 	return err;
740 }
741 
742 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
743 				 unsigned long count, loff_t pos)
744 {
745 	struct file *file = iocb->ki_filp;
746 	ssize_t result = -ENOLINK;
747 	struct macvtap_queue *q = file->private_data;
748 
749 	result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
750 				  file->f_flags & O_NONBLOCK);
751 	return result;
752 }
753 
754 /* Put packet to the user space buffer */
755 static ssize_t macvtap_put_user(struct macvtap_queue *q,
756 				const struct sk_buff *skb,
757 				const struct iovec *iv, int len)
758 {
759 	struct macvlan_dev *vlan;
760 	int ret;
761 	int vnet_hdr_len = 0;
762 
763 	if (q->flags & IFF_VNET_HDR) {
764 		struct virtio_net_hdr vnet_hdr;
765 		vnet_hdr_len = q->vnet_hdr_sz;
766 		if ((len -= vnet_hdr_len) < 0)
767 			return -EINVAL;
768 
769 		ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
770 		if (ret)
771 			return ret;
772 
773 		if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
774 			return -EFAULT;
775 	}
776 
777 	len = min_t(int, skb->len, len);
778 
779 	ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
780 
781 	rcu_read_lock_bh();
782 	vlan = rcu_dereference_bh(q->vlan);
783 	if (vlan)
784 		macvlan_count_rx(vlan, len, ret == 0, 0);
785 	rcu_read_unlock_bh();
786 
787 	return ret ? ret : (len + vnet_hdr_len);
788 }
789 
790 static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
791 			       const struct iovec *iv, unsigned long len,
792 			       int noblock)
793 {
794 	DECLARE_WAITQUEUE(wait, current);
795 	struct sk_buff *skb;
796 	ssize_t ret = 0;
797 
798 	add_wait_queue(sk_sleep(&q->sk), &wait);
799 	while (len) {
800 		current->state = TASK_INTERRUPTIBLE;
801 
802 		/* Read frames from the queue */
803 		skb = skb_dequeue(&q->sk.sk_receive_queue);
804 		if (!skb) {
805 			if (noblock) {
806 				ret = -EAGAIN;
807 				break;
808 			}
809 			if (signal_pending(current)) {
810 				ret = -ERESTARTSYS;
811 				break;
812 			}
813 			/* Nothing to read, let's sleep */
814 			schedule();
815 			continue;
816 		}
817 		ret = macvtap_put_user(q, skb, iv, len);
818 		kfree_skb(skb);
819 		break;
820 	}
821 
822 	current->state = TASK_RUNNING;
823 	remove_wait_queue(sk_sleep(&q->sk), &wait);
824 	return ret;
825 }
826 
827 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
828 				unsigned long count, loff_t pos)
829 {
830 	struct file *file = iocb->ki_filp;
831 	struct macvtap_queue *q = file->private_data;
832 	ssize_t len, ret = 0;
833 
834 	len = iov_length(iv, count);
835 	if (len < 0) {
836 		ret = -EINVAL;
837 		goto out;
838 	}
839 
840 	ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
841 	ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
842 out:
843 	return ret;
844 }
845 
846 /*
847  * provide compatibility with generic tun/tap interface
848  */
849 static long macvtap_ioctl(struct file *file, unsigned int cmd,
850 			  unsigned long arg)
851 {
852 	struct macvtap_queue *q = file->private_data;
853 	struct macvlan_dev *vlan;
854 	void __user *argp = (void __user *)arg;
855 	struct ifreq __user *ifr = argp;
856 	unsigned int __user *up = argp;
857 	unsigned int u;
858 	int __user *sp = argp;
859 	int s;
860 	int ret;
861 
862 	switch (cmd) {
863 	case TUNSETIFF:
864 		/* ignore the name, just look at flags */
865 		if (get_user(u, &ifr->ifr_flags))
866 			return -EFAULT;
867 
868 		ret = 0;
869 		if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
870 			ret = -EINVAL;
871 		else
872 			q->flags = u;
873 
874 		return ret;
875 
876 	case TUNGETIFF:
877 		rcu_read_lock_bh();
878 		vlan = rcu_dereference_bh(q->vlan);
879 		if (vlan)
880 			dev_hold(vlan->dev);
881 		rcu_read_unlock_bh();
882 
883 		if (!vlan)
884 			return -ENOLINK;
885 
886 		ret = 0;
887 		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
888 		    put_user(q->flags, &ifr->ifr_flags))
889 			ret = -EFAULT;
890 		dev_put(vlan->dev);
891 		return ret;
892 
893 	case TUNGETFEATURES:
894 		if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
895 			return -EFAULT;
896 		return 0;
897 
898 	case TUNSETSNDBUF:
899 		if (get_user(u, up))
900 			return -EFAULT;
901 
902 		q->sk.sk_sndbuf = u;
903 		return 0;
904 
905 	case TUNGETVNETHDRSZ:
906 		s = q->vnet_hdr_sz;
907 		if (put_user(s, sp))
908 			return -EFAULT;
909 		return 0;
910 
911 	case TUNSETVNETHDRSZ:
912 		if (get_user(s, sp))
913 			return -EFAULT;
914 		if (s < (int)sizeof(struct virtio_net_hdr))
915 			return -EINVAL;
916 
917 		q->vnet_hdr_sz = s;
918 		return 0;
919 
920 	case TUNSETOFFLOAD:
921 		/* let the user check for future flags */
922 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
923 			    TUN_F_TSO_ECN | TUN_F_UFO))
924 			return -EINVAL;
925 
926 		/* TODO: only accept frames with the features that
927 			 got enabled for forwarded frames */
928 		if (!(q->flags & IFF_VNET_HDR))
929 			return  -EINVAL;
930 		return 0;
931 
932 	default:
933 		return -EINVAL;
934 	}
935 }
936 
937 #ifdef CONFIG_COMPAT
938 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
939 				 unsigned long arg)
940 {
941 	return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
942 }
943 #endif
944 
945 static const struct file_operations macvtap_fops = {
946 	.owner		= THIS_MODULE,
947 	.open		= macvtap_open,
948 	.release	= macvtap_release,
949 	.aio_read	= macvtap_aio_read,
950 	.aio_write	= macvtap_aio_write,
951 	.poll		= macvtap_poll,
952 	.llseek		= no_llseek,
953 	.unlocked_ioctl	= macvtap_ioctl,
954 #ifdef CONFIG_COMPAT
955 	.compat_ioctl	= macvtap_compat_ioctl,
956 #endif
957 };
958 
959 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
960 			   struct msghdr *m, size_t total_len)
961 {
962 	struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
963 	return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
964 			    m->msg_flags & MSG_DONTWAIT);
965 }
966 
967 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
968 			   struct msghdr *m, size_t total_len,
969 			   int flags)
970 {
971 	struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
972 	int ret;
973 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
974 		return -EINVAL;
975 	ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
976 			  flags & MSG_DONTWAIT);
977 	if (ret > total_len) {
978 		m->msg_flags |= MSG_TRUNC;
979 		ret = flags & MSG_TRUNC ? ret : total_len;
980 	}
981 	return ret;
982 }
983 
984 /* Ops structure to mimic raw sockets with tun */
985 static const struct proto_ops macvtap_socket_ops = {
986 	.sendmsg = macvtap_sendmsg,
987 	.recvmsg = macvtap_recvmsg,
988 };
989 
990 /* Get an underlying socket object from tun file.  Returns error unless file is
991  * attached to a device.  The returned object works like a packet socket, it
992  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
993  * holding a reference to the file for as long as the socket is in use. */
994 struct socket *macvtap_get_socket(struct file *file)
995 {
996 	struct macvtap_queue *q;
997 	if (file->f_op != &macvtap_fops)
998 		return ERR_PTR(-EINVAL);
999 	q = file->private_data;
1000 	if (!q)
1001 		return ERR_PTR(-EBADFD);
1002 	return &q->sock;
1003 }
1004 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1005 
1006 static int macvtap_device_event(struct notifier_block *unused,
1007 				unsigned long event, void *ptr)
1008 {
1009 	struct net_device *dev = ptr;
1010 	struct macvlan_dev *vlan;
1011 	struct device *classdev;
1012 	dev_t devt;
1013 	int err;
1014 
1015 	if (dev->rtnl_link_ops != &macvtap_link_ops)
1016 		return NOTIFY_DONE;
1017 
1018 	vlan = netdev_priv(dev);
1019 
1020 	switch (event) {
1021 	case NETDEV_REGISTER:
1022 		/* Create the device node here after the network device has
1023 		 * been registered but before register_netdevice has
1024 		 * finished running.
1025 		 */
1026 		err = macvtap_get_minor(vlan);
1027 		if (err)
1028 			return notifier_from_errno(err);
1029 
1030 		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1031 		classdev = device_create(macvtap_class, &dev->dev, devt,
1032 					 dev, "tap%d", dev->ifindex);
1033 		if (IS_ERR(classdev)) {
1034 			macvtap_free_minor(vlan);
1035 			return notifier_from_errno(PTR_ERR(classdev));
1036 		}
1037 		break;
1038 	case NETDEV_UNREGISTER:
1039 		devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1040 		device_destroy(macvtap_class, devt);
1041 		macvtap_free_minor(vlan);
1042 		break;
1043 	}
1044 
1045 	return NOTIFY_DONE;
1046 }
1047 
1048 static struct notifier_block macvtap_notifier_block __read_mostly = {
1049 	.notifier_call	= macvtap_device_event,
1050 };
1051 
1052 static int macvtap_init(void)
1053 {
1054 	int err;
1055 
1056 	err = alloc_chrdev_region(&macvtap_major, 0,
1057 				MACVTAP_NUM_DEVS, "macvtap");
1058 	if (err)
1059 		goto out1;
1060 
1061 	cdev_init(&macvtap_cdev, &macvtap_fops);
1062 	err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1063 	if (err)
1064 		goto out2;
1065 
1066 	macvtap_class = class_create(THIS_MODULE, "macvtap");
1067 	if (IS_ERR(macvtap_class)) {
1068 		err = PTR_ERR(macvtap_class);
1069 		goto out3;
1070 	}
1071 
1072 	err = register_netdevice_notifier(&macvtap_notifier_block);
1073 	if (err)
1074 		goto out4;
1075 
1076 	err = macvlan_link_register(&macvtap_link_ops);
1077 	if (err)
1078 		goto out5;
1079 
1080 	return 0;
1081 
1082 out5:
1083 	unregister_netdevice_notifier(&macvtap_notifier_block);
1084 out4:
1085 	class_unregister(macvtap_class);
1086 out3:
1087 	cdev_del(&macvtap_cdev);
1088 out2:
1089 	unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1090 out1:
1091 	return err;
1092 }
1093 module_init(macvtap_init);
1094 
1095 static void macvtap_exit(void)
1096 {
1097 	rtnl_link_unregister(&macvtap_link_ops);
1098 	unregister_netdevice_notifier(&macvtap_notifier_block);
1099 	class_unregister(macvtap_class);
1100 	cdev_del(&macvtap_cdev);
1101 	unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1102 }
1103 module_exit(macvtap_exit);
1104 
1105 MODULE_ALIAS_RTNL_LINK("macvtap");
1106 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1107 MODULE_LICENSE("GPL");
1108