1 #include <linux/etherdevice.h> 2 #include <linux/if_macvlan.h> 3 #include <linux/if_vlan.h> 4 #include <linux/interrupt.h> 5 #include <linux/nsproxy.h> 6 #include <linux/compat.h> 7 #include <linux/if_tun.h> 8 #include <linux/module.h> 9 #include <linux/skbuff.h> 10 #include <linux/cache.h> 11 #include <linux/sched.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/wait.h> 15 #include <linux/cdev.h> 16 #include <linux/idr.h> 17 #include <linux/fs.h> 18 19 #include <net/ipv6.h> 20 #include <net/net_namespace.h> 21 #include <net/rtnetlink.h> 22 #include <net/sock.h> 23 #include <linux/virtio_net.h> 24 25 /* 26 * A macvtap queue is the central object of this driver, it connects 27 * an open character device to a macvlan interface. There can be 28 * multiple queues on one interface, which map back to queues 29 * implemented in hardware on the underlying device. 30 * 31 * macvtap_proto is used to allocate queues through the sock allocation 32 * mechanism. 33 * 34 */ 35 struct macvtap_queue { 36 struct sock sk; 37 struct socket sock; 38 struct socket_wq wq; 39 int vnet_hdr_sz; 40 struct macvlan_dev __rcu *vlan; 41 struct file *file; 42 unsigned int flags; 43 u16 queue_index; 44 bool enabled; 45 struct list_head next; 46 }; 47 48 #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE) 49 50 static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) 51 { 52 return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val); 53 } 54 55 static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) 56 { 57 return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val); 58 } 59 60 static struct proto macvtap_proto = { 61 .name = "macvtap", 62 .owner = THIS_MODULE, 63 .obj_size = sizeof (struct macvtap_queue), 64 }; 65 66 /* 67 * Variables for dealing with macvtaps device numbers. 68 */ 69 static dev_t macvtap_major; 70 #define MACVTAP_NUM_DEVS (1U << MINORBITS) 71 static DEFINE_MUTEX(minor_lock); 72 static DEFINE_IDR(minor_idr); 73 74 #define GOODCOPY_LEN 128 75 static struct class *macvtap_class; 76 static struct cdev macvtap_cdev; 77 78 static const struct proto_ops macvtap_socket_ops; 79 80 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 81 NETIF_F_TSO6) 82 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 83 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 84 85 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) 86 { 87 return rcu_dereference(dev->rx_handler_data); 88 } 89 90 /* 91 * RCU usage: 92 * The macvtap_queue and the macvlan_dev are loosely coupled, the 93 * pointers from one to the other can only be read while rcu_read_lock 94 * or rtnl is held. 95 * 96 * Both the file and the macvlan_dev hold a reference on the macvtap_queue 97 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 98 * q->vlan becomes inaccessible. When the files gets closed, 99 * macvtap_get_queue() fails. 100 * 101 * There may still be references to the struct sock inside of the 102 * queue from outbound SKBs, but these never reference back to the 103 * file or the dev. The data structure is freed through __sk_free 104 * when both our references and any pending SKBs are gone. 105 */ 106 107 static int macvtap_enable_queue(struct net_device *dev, struct file *file, 108 struct macvtap_queue *q) 109 { 110 struct macvlan_dev *vlan = netdev_priv(dev); 111 int err = -EINVAL; 112 113 ASSERT_RTNL(); 114 115 if (q->enabled) 116 goto out; 117 118 err = 0; 119 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 120 q->queue_index = vlan->numvtaps; 121 q->enabled = true; 122 123 vlan->numvtaps++; 124 out: 125 return err; 126 } 127 128 /* Requires RTNL */ 129 static int macvtap_set_queue(struct net_device *dev, struct file *file, 130 struct macvtap_queue *q) 131 { 132 struct macvlan_dev *vlan = netdev_priv(dev); 133 134 if (vlan->numqueues == MAX_MACVTAP_QUEUES) 135 return -EBUSY; 136 137 rcu_assign_pointer(q->vlan, vlan); 138 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 139 sock_hold(&q->sk); 140 141 q->file = file; 142 q->queue_index = vlan->numvtaps; 143 q->enabled = true; 144 file->private_data = q; 145 list_add_tail(&q->next, &vlan->queue_list); 146 147 vlan->numvtaps++; 148 vlan->numqueues++; 149 150 return 0; 151 } 152 153 static int macvtap_disable_queue(struct macvtap_queue *q) 154 { 155 struct macvlan_dev *vlan; 156 struct macvtap_queue *nq; 157 158 ASSERT_RTNL(); 159 if (!q->enabled) 160 return -EINVAL; 161 162 vlan = rtnl_dereference(q->vlan); 163 164 if (vlan) { 165 int index = q->queue_index; 166 BUG_ON(index >= vlan->numvtaps); 167 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); 168 nq->queue_index = index; 169 170 rcu_assign_pointer(vlan->taps[index], nq); 171 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); 172 q->enabled = false; 173 174 vlan->numvtaps--; 175 } 176 177 return 0; 178 } 179 180 /* 181 * The file owning the queue got closed, give up both 182 * the reference that the files holds as well as the 183 * one from the macvlan_dev if that still exists. 184 * 185 * Using the spinlock makes sure that we don't get 186 * to the queue again after destroying it. 187 */ 188 static void macvtap_put_queue(struct macvtap_queue *q) 189 { 190 struct macvlan_dev *vlan; 191 192 rtnl_lock(); 193 vlan = rtnl_dereference(q->vlan); 194 195 if (vlan) { 196 if (q->enabled) 197 BUG_ON(macvtap_disable_queue(q)); 198 199 vlan->numqueues--; 200 RCU_INIT_POINTER(q->vlan, NULL); 201 sock_put(&q->sk); 202 list_del_init(&q->next); 203 } 204 205 rtnl_unlock(); 206 207 synchronize_rcu(); 208 sock_put(&q->sk); 209 } 210 211 /* 212 * Select a queue based on the rxq of the device on which this packet 213 * arrived. If the incoming device is not mq, calculate a flow hash 214 * to select a queue. If all fails, find the first available queue. 215 * Cache vlan->numvtaps since it can become zero during the execution 216 * of this function. 217 */ 218 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 219 struct sk_buff *skb) 220 { 221 struct macvlan_dev *vlan = netdev_priv(dev); 222 struct macvtap_queue *tap = NULL; 223 /* Access to taps array is protected by rcu, but access to numvtaps 224 * isn't. Below we use it to lookup a queue, but treat it as a hint 225 * and validate that the result isn't NULL - in case we are 226 * racing against queue removal. 227 */ 228 int numvtaps = ACCESS_ONCE(vlan->numvtaps); 229 __u32 rxq; 230 231 if (!numvtaps) 232 goto out; 233 234 /* Check if we can use flow to select a queue */ 235 rxq = skb_get_hash(skb); 236 if (rxq) { 237 tap = rcu_dereference(vlan->taps[rxq % numvtaps]); 238 goto out; 239 } 240 241 if (likely(skb_rx_queue_recorded(skb))) { 242 rxq = skb_get_rx_queue(skb); 243 244 while (unlikely(rxq >= numvtaps)) 245 rxq -= numvtaps; 246 247 tap = rcu_dereference(vlan->taps[rxq]); 248 goto out; 249 } 250 251 tap = rcu_dereference(vlan->taps[0]); 252 out: 253 return tap; 254 } 255 256 /* 257 * The net_device is going away, give up the reference 258 * that it holds on all queues and safely set the pointer 259 * from the queues to NULL. 260 */ 261 static void macvtap_del_queues(struct net_device *dev) 262 { 263 struct macvlan_dev *vlan = netdev_priv(dev); 264 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; 265 int i, j = 0; 266 267 ASSERT_RTNL(); 268 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { 269 list_del_init(&q->next); 270 qlist[j++] = q; 271 RCU_INIT_POINTER(q->vlan, NULL); 272 if (q->enabled) 273 vlan->numvtaps--; 274 vlan->numqueues--; 275 } 276 for (i = 0; i < vlan->numvtaps; i++) 277 RCU_INIT_POINTER(vlan->taps[i], NULL); 278 BUG_ON(vlan->numvtaps); 279 BUG_ON(vlan->numqueues); 280 /* guarantee that any future macvtap_set_queue will fail */ 281 vlan->numvtaps = MAX_MACVTAP_QUEUES; 282 283 for (--j; j >= 0; j--) 284 sock_put(&qlist[j]->sk); 285 } 286 287 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) 288 { 289 struct sk_buff *skb = *pskb; 290 struct net_device *dev = skb->dev; 291 struct macvlan_dev *vlan; 292 struct macvtap_queue *q; 293 netdev_features_t features = TAP_FEATURES; 294 295 vlan = macvtap_get_vlan_rcu(dev); 296 if (!vlan) 297 return RX_HANDLER_PASS; 298 299 q = macvtap_get_queue(dev, skb); 300 if (!q) 301 return RX_HANDLER_PASS; 302 303 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 304 goto drop; 305 306 skb_push(skb, ETH_HLEN); 307 308 /* Apply the forward feature mask so that we perform segmentation 309 * according to users wishes. This only works if VNET_HDR is 310 * enabled. 311 */ 312 if (q->flags & IFF_VNET_HDR) 313 features |= vlan->tap_features; 314 if (netif_needs_gso(dev, skb, features)) { 315 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 316 317 if (IS_ERR(segs)) 318 goto drop; 319 320 if (!segs) { 321 skb_queue_tail(&q->sk.sk_receive_queue, skb); 322 goto wake_up; 323 } 324 325 kfree_skb(skb); 326 while (segs) { 327 struct sk_buff *nskb = segs->next; 328 329 segs->next = NULL; 330 skb_queue_tail(&q->sk.sk_receive_queue, segs); 331 segs = nskb; 332 } 333 } else { 334 /* If we receive a partial checksum and the tap side 335 * doesn't support checksum offload, compute the checksum. 336 * Note: it doesn't matter which checksum feature to 337 * check, we either support them all or none. 338 */ 339 if (skb->ip_summed == CHECKSUM_PARTIAL && 340 !(features & NETIF_F_ALL_CSUM) && 341 skb_checksum_help(skb)) 342 goto drop; 343 skb_queue_tail(&q->sk.sk_receive_queue, skb); 344 } 345 346 wake_up: 347 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 348 return RX_HANDLER_CONSUMED; 349 350 drop: 351 /* Count errors/drops only here, thus don't care about args. */ 352 macvlan_count_rx(vlan, 0, 0, 0); 353 kfree_skb(skb); 354 return RX_HANDLER_CONSUMED; 355 } 356 357 static int macvtap_get_minor(struct macvlan_dev *vlan) 358 { 359 int retval = -ENOMEM; 360 361 mutex_lock(&minor_lock); 362 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); 363 if (retval >= 0) { 364 vlan->minor = retval; 365 } else if (retval == -ENOSPC) { 366 printk(KERN_ERR "too many macvtap devices\n"); 367 retval = -EINVAL; 368 } 369 mutex_unlock(&minor_lock); 370 return retval < 0 ? retval : 0; 371 } 372 373 static void macvtap_free_minor(struct macvlan_dev *vlan) 374 { 375 mutex_lock(&minor_lock); 376 if (vlan->minor) { 377 idr_remove(&minor_idr, vlan->minor); 378 vlan->minor = 0; 379 } 380 mutex_unlock(&minor_lock); 381 } 382 383 static struct net_device *dev_get_by_macvtap_minor(int minor) 384 { 385 struct net_device *dev = NULL; 386 struct macvlan_dev *vlan; 387 388 mutex_lock(&minor_lock); 389 vlan = idr_find(&minor_idr, minor); 390 if (vlan) { 391 dev = vlan->dev; 392 dev_hold(dev); 393 } 394 mutex_unlock(&minor_lock); 395 return dev; 396 } 397 398 static int macvtap_newlink(struct net *src_net, 399 struct net_device *dev, 400 struct nlattr *tb[], 401 struct nlattr *data[]) 402 { 403 struct macvlan_dev *vlan = netdev_priv(dev); 404 int err; 405 406 INIT_LIST_HEAD(&vlan->queue_list); 407 408 /* Since macvlan supports all offloads by default, make 409 * tap support all offloads also. 410 */ 411 vlan->tap_features = TUN_OFFLOADS; 412 413 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); 414 if (err) 415 return err; 416 417 /* Don't put anything that may fail after macvlan_common_newlink 418 * because we can't undo what it does. 419 */ 420 return macvlan_common_newlink(src_net, dev, tb, data); 421 } 422 423 static void macvtap_dellink(struct net_device *dev, 424 struct list_head *head) 425 { 426 netdev_rx_handler_unregister(dev); 427 macvtap_del_queues(dev); 428 macvlan_dellink(dev, head); 429 } 430 431 static void macvtap_setup(struct net_device *dev) 432 { 433 macvlan_common_setup(dev); 434 dev->tx_queue_len = TUN_READQ_SIZE; 435 } 436 437 static struct rtnl_link_ops macvtap_link_ops __read_mostly = { 438 .kind = "macvtap", 439 .setup = macvtap_setup, 440 .newlink = macvtap_newlink, 441 .dellink = macvtap_dellink, 442 }; 443 444 445 static void macvtap_sock_write_space(struct sock *sk) 446 { 447 wait_queue_head_t *wqueue; 448 449 if (!sock_writeable(sk) || 450 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 451 return; 452 453 wqueue = sk_sleep(sk); 454 if (wqueue && waitqueue_active(wqueue)) 455 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); 456 } 457 458 static void macvtap_sock_destruct(struct sock *sk) 459 { 460 skb_queue_purge(&sk->sk_receive_queue); 461 } 462 463 static int macvtap_open(struct inode *inode, struct file *file) 464 { 465 struct net *net = current->nsproxy->net_ns; 466 struct net_device *dev; 467 struct macvtap_queue *q; 468 int err = -ENODEV; 469 470 rtnl_lock(); 471 dev = dev_get_by_macvtap_minor(iminor(inode)); 472 if (!dev) 473 goto out; 474 475 err = -ENOMEM; 476 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 477 &macvtap_proto); 478 if (!q) 479 goto out; 480 481 RCU_INIT_POINTER(q->sock.wq, &q->wq); 482 init_waitqueue_head(&q->wq.wait); 483 q->sock.type = SOCK_RAW; 484 q->sock.state = SS_CONNECTED; 485 q->sock.file = file; 486 q->sock.ops = &macvtap_socket_ops; 487 sock_init_data(&q->sock, &q->sk); 488 q->sk.sk_write_space = macvtap_sock_write_space; 489 q->sk.sk_destruct = macvtap_sock_destruct; 490 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 491 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 492 493 /* 494 * so far only KVM virtio_net uses macvtap, enable zero copy between 495 * guest kernel and host kernel when lower device supports zerocopy 496 * 497 * The macvlan supports zerocopy iff the lower device supports zero 498 * copy so we don't have to look at the lower device directly. 499 */ 500 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) 501 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 502 503 err = macvtap_set_queue(dev, file, q); 504 if (err) 505 sock_put(&q->sk); 506 507 out: 508 if (dev) 509 dev_put(dev); 510 511 rtnl_unlock(); 512 return err; 513 } 514 515 static int macvtap_release(struct inode *inode, struct file *file) 516 { 517 struct macvtap_queue *q = file->private_data; 518 macvtap_put_queue(q); 519 return 0; 520 } 521 522 static unsigned int macvtap_poll(struct file *file, poll_table * wait) 523 { 524 struct macvtap_queue *q = file->private_data; 525 unsigned int mask = POLLERR; 526 527 if (!q) 528 goto out; 529 530 mask = 0; 531 poll_wait(file, &q->wq.wait, wait); 532 533 if (!skb_queue_empty(&q->sk.sk_receive_queue)) 534 mask |= POLLIN | POLLRDNORM; 535 536 if (sock_writeable(&q->sk) || 537 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 538 sock_writeable(&q->sk))) 539 mask |= POLLOUT | POLLWRNORM; 540 541 out: 542 return mask; 543 } 544 545 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, 546 size_t len, size_t linear, 547 int noblock, int *err) 548 { 549 struct sk_buff *skb; 550 551 /* Under a page? Don't bother with paged skb. */ 552 if (prepad + len < PAGE_SIZE || !linear) 553 linear = len; 554 555 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 556 err, 0); 557 if (!skb) 558 return NULL; 559 560 skb_reserve(skb, prepad); 561 skb_put(skb, linear); 562 skb->data_len = len - linear; 563 skb->len += len - linear; 564 565 return skb; 566 } 567 568 /* 569 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should 570 * be shared with the tun/tap driver. 571 */ 572 static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, 573 struct sk_buff *skb, 574 struct virtio_net_hdr *vnet_hdr) 575 { 576 unsigned short gso_type = 0; 577 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 578 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 579 case VIRTIO_NET_HDR_GSO_TCPV4: 580 gso_type = SKB_GSO_TCPV4; 581 break; 582 case VIRTIO_NET_HDR_GSO_TCPV6: 583 gso_type = SKB_GSO_TCPV6; 584 break; 585 case VIRTIO_NET_HDR_GSO_UDP: 586 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", 587 current->comm); 588 gso_type = SKB_GSO_UDP; 589 if (skb->protocol == htons(ETH_P_IPV6)) 590 ipv6_proxy_select_ident(skb); 591 break; 592 default: 593 return -EINVAL; 594 } 595 596 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 597 gso_type |= SKB_GSO_TCP_ECN; 598 599 if (vnet_hdr->gso_size == 0) 600 return -EINVAL; 601 } 602 603 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 604 if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start), 605 macvtap16_to_cpu(q, vnet_hdr->csum_offset))) 606 return -EINVAL; 607 } 608 609 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 610 skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size); 611 skb_shinfo(skb)->gso_type = gso_type; 612 613 /* Header must be checked, and gso_segs computed. */ 614 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 615 skb_shinfo(skb)->gso_segs = 0; 616 } 617 return 0; 618 } 619 620 static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, 621 const struct sk_buff *skb, 622 struct virtio_net_hdr *vnet_hdr) 623 { 624 memset(vnet_hdr, 0, sizeof(*vnet_hdr)); 625 626 if (skb_is_gso(skb)) { 627 struct skb_shared_info *sinfo = skb_shinfo(skb); 628 629 /* This is a hint as to how much should be linear. */ 630 vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb)); 631 vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size); 632 if (sinfo->gso_type & SKB_GSO_TCPV4) 633 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 634 else if (sinfo->gso_type & SKB_GSO_TCPV6) 635 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 636 else 637 BUG(); 638 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 639 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 640 } else 641 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 642 643 if (skb->ip_summed == CHECKSUM_PARTIAL) { 644 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 645 if (vlan_tx_tag_present(skb)) 646 vnet_hdr->csum_start = cpu_to_macvtap16(q, 647 skb_checksum_start_offset(skb) + VLAN_HLEN); 648 else 649 vnet_hdr->csum_start = cpu_to_macvtap16(q, 650 skb_checksum_start_offset(skb)); 651 vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset); 652 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 653 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 654 } /* else everything is zero */ 655 } 656 657 /* Get packet from user space buffer */ 658 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 659 const struct iovec *iv, unsigned long total_len, 660 size_t count, int noblock) 661 { 662 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 663 struct sk_buff *skb; 664 struct macvlan_dev *vlan; 665 unsigned long len = total_len; 666 int err; 667 struct virtio_net_hdr vnet_hdr = { 0 }; 668 int vnet_hdr_len = 0; 669 int copylen = 0; 670 bool zerocopy = false; 671 size_t linear; 672 673 if (q->flags & IFF_VNET_HDR) { 674 vnet_hdr_len = q->vnet_hdr_sz; 675 676 err = -EINVAL; 677 if (len < vnet_hdr_len) 678 goto err; 679 len -= vnet_hdr_len; 680 681 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 682 sizeof(vnet_hdr)); 683 if (err < 0) 684 goto err; 685 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 686 macvtap16_to_cpu(q, vnet_hdr.csum_start) + 687 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > 688 macvtap16_to_cpu(q, vnet_hdr.hdr_len)) 689 vnet_hdr.hdr_len = cpu_to_macvtap16(q, 690 macvtap16_to_cpu(q, vnet_hdr.csum_start) + 691 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); 692 err = -EINVAL; 693 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) 694 goto err; 695 } 696 697 err = -EINVAL; 698 if (unlikely(len < ETH_HLEN)) 699 goto err; 700 701 err = -EMSGSIZE; 702 if (unlikely(count > UIO_MAXIOV)) 703 goto err; 704 705 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 706 copylen = vnet_hdr.hdr_len ? 707 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; 708 if (copylen > good_linear) 709 copylen = good_linear; 710 linear = copylen; 711 if (iov_pages(iv, vnet_hdr_len + copylen, count) 712 <= MAX_SKB_FRAGS) 713 zerocopy = true; 714 } 715 716 if (!zerocopy) { 717 copylen = len; 718 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear) 719 linear = good_linear; 720 else 721 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 722 } 723 724 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 725 linear, noblock, &err); 726 if (!skb) 727 goto err; 728 729 if (zerocopy) 730 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); 731 else { 732 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, 733 len); 734 if (!err && m && m->msg_control) { 735 struct ubuf_info *uarg = m->msg_control; 736 uarg->callback(uarg, false); 737 } 738 } 739 740 if (err) 741 goto err_kfree; 742 743 skb_set_network_header(skb, ETH_HLEN); 744 skb_reset_mac_header(skb); 745 skb->protocol = eth_hdr(skb)->h_proto; 746 747 if (vnet_hdr_len) { 748 err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); 749 if (err) 750 goto err_kfree; 751 } 752 753 skb_probe_transport_header(skb, ETH_HLEN); 754 755 rcu_read_lock(); 756 vlan = rcu_dereference(q->vlan); 757 /* copy skb_ubuf_info for callback when skb has no error */ 758 if (zerocopy) { 759 skb_shinfo(skb)->destructor_arg = m->msg_control; 760 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 761 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 762 } 763 if (vlan) { 764 skb->dev = vlan->dev; 765 dev_queue_xmit(skb); 766 } else { 767 kfree_skb(skb); 768 } 769 rcu_read_unlock(); 770 771 return total_len; 772 773 err_kfree: 774 kfree_skb(skb); 775 776 err: 777 rcu_read_lock(); 778 vlan = rcu_dereference(q->vlan); 779 if (vlan) 780 this_cpu_inc(vlan->pcpu_stats->tx_dropped); 781 rcu_read_unlock(); 782 783 return err; 784 } 785 786 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, 787 unsigned long count, loff_t pos) 788 { 789 struct file *file = iocb->ki_filp; 790 ssize_t result = -ENOLINK; 791 struct macvtap_queue *q = file->private_data; 792 793 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, 794 file->f_flags & O_NONBLOCK); 795 return result; 796 } 797 798 /* Put packet to the user space buffer */ 799 static ssize_t macvtap_put_user(struct macvtap_queue *q, 800 const struct sk_buff *skb, 801 const struct iovec *iv, int len) 802 { 803 int ret; 804 int vnet_hdr_len = 0; 805 int vlan_offset = 0; 806 int copied, total; 807 808 if (q->flags & IFF_VNET_HDR) { 809 struct virtio_net_hdr vnet_hdr; 810 vnet_hdr_len = q->vnet_hdr_sz; 811 if ((len -= vnet_hdr_len) < 0) 812 return -EINVAL; 813 814 macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); 815 816 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 817 return -EFAULT; 818 } 819 total = copied = vnet_hdr_len; 820 total += skb->len; 821 822 if (!vlan_tx_tag_present(skb)) 823 len = min_t(int, skb->len, len); 824 else { 825 int copy; 826 struct { 827 __be16 h_vlan_proto; 828 __be16 h_vlan_TCI; 829 } veth; 830 veth.h_vlan_proto = skb->vlan_proto; 831 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 832 833 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 834 len = min_t(int, skb->len + VLAN_HLEN, len); 835 total += VLAN_HLEN; 836 837 copy = min_t(int, vlan_offset, len); 838 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 839 len -= copy; 840 copied += copy; 841 if (ret || !len) 842 goto done; 843 844 copy = min_t(int, sizeof(veth), len); 845 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); 846 len -= copy; 847 copied += copy; 848 if (ret || !len) 849 goto done; 850 } 851 852 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 853 854 done: 855 return ret ? ret : total; 856 } 857 858 static ssize_t macvtap_do_read(struct macvtap_queue *q, 859 const struct iovec *iv, unsigned long len, 860 int noblock) 861 { 862 DEFINE_WAIT(wait); 863 struct sk_buff *skb; 864 ssize_t ret = 0; 865 866 while (len) { 867 if (!noblock) 868 prepare_to_wait(sk_sleep(&q->sk), &wait, 869 TASK_INTERRUPTIBLE); 870 871 /* Read frames from the queue */ 872 skb = skb_dequeue(&q->sk.sk_receive_queue); 873 if (!skb) { 874 if (noblock) { 875 ret = -EAGAIN; 876 break; 877 } 878 if (signal_pending(current)) { 879 ret = -ERESTARTSYS; 880 break; 881 } 882 /* Nothing to read, let's sleep */ 883 schedule(); 884 continue; 885 } 886 ret = macvtap_put_user(q, skb, iv, len); 887 kfree_skb(skb); 888 break; 889 } 890 891 if (!noblock) 892 finish_wait(sk_sleep(&q->sk), &wait); 893 return ret; 894 } 895 896 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, 897 unsigned long count, loff_t pos) 898 { 899 struct file *file = iocb->ki_filp; 900 struct macvtap_queue *q = file->private_data; 901 ssize_t len, ret = 0; 902 903 len = iov_length(iv, count); 904 if (len < 0) { 905 ret = -EINVAL; 906 goto out; 907 } 908 909 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); 910 ret = min_t(ssize_t, ret, len); 911 if (ret > 0) 912 iocb->ki_pos = ret; 913 out: 914 return ret; 915 } 916 917 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) 918 { 919 struct macvlan_dev *vlan; 920 921 ASSERT_RTNL(); 922 vlan = rtnl_dereference(q->vlan); 923 if (vlan) 924 dev_hold(vlan->dev); 925 926 return vlan; 927 } 928 929 static void macvtap_put_vlan(struct macvlan_dev *vlan) 930 { 931 dev_put(vlan->dev); 932 } 933 934 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) 935 { 936 struct macvtap_queue *q = file->private_data; 937 struct macvlan_dev *vlan; 938 int ret; 939 940 vlan = macvtap_get_vlan(q); 941 if (!vlan) 942 return -EINVAL; 943 944 if (flags & IFF_ATTACH_QUEUE) 945 ret = macvtap_enable_queue(vlan->dev, file, q); 946 else if (flags & IFF_DETACH_QUEUE) 947 ret = macvtap_disable_queue(q); 948 else 949 ret = -EINVAL; 950 951 macvtap_put_vlan(vlan); 952 return ret; 953 } 954 955 static int set_offload(struct macvtap_queue *q, unsigned long arg) 956 { 957 struct macvlan_dev *vlan; 958 netdev_features_t features; 959 netdev_features_t feature_mask = 0; 960 961 vlan = rtnl_dereference(q->vlan); 962 if (!vlan) 963 return -ENOLINK; 964 965 features = vlan->dev->features; 966 967 if (arg & TUN_F_CSUM) { 968 feature_mask = NETIF_F_HW_CSUM; 969 970 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 971 if (arg & TUN_F_TSO_ECN) 972 feature_mask |= NETIF_F_TSO_ECN; 973 if (arg & TUN_F_TSO4) 974 feature_mask |= NETIF_F_TSO; 975 if (arg & TUN_F_TSO6) 976 feature_mask |= NETIF_F_TSO6; 977 } 978 } 979 980 /* tun/tap driver inverts the usage for TSO offloads, where 981 * setting the TSO bit means that the userspace wants to 982 * accept TSO frames and turning it off means that user space 983 * does not support TSO. 984 * For macvtap, we have to invert it to mean the same thing. 985 * When user space turns off TSO, we turn off GSO/LRO so that 986 * user-space will not receive TSO frames. 987 */ 988 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 989 features |= RX_OFFLOADS; 990 else 991 features &= ~RX_OFFLOADS; 992 993 /* tap_features are the same as features on tun/tap and 994 * reflect user expectations. 995 */ 996 vlan->tap_features = feature_mask; 997 vlan->set_features = features; 998 netdev_update_features(vlan->dev); 999 1000 return 0; 1001 } 1002 1003 /* 1004 * provide compatibility with generic tun/tap interface 1005 */ 1006 static long macvtap_ioctl(struct file *file, unsigned int cmd, 1007 unsigned long arg) 1008 { 1009 struct macvtap_queue *q = file->private_data; 1010 struct macvlan_dev *vlan; 1011 void __user *argp = (void __user *)arg; 1012 struct ifreq __user *ifr = argp; 1013 unsigned int __user *up = argp; 1014 unsigned int u; 1015 int __user *sp = argp; 1016 int s; 1017 int ret; 1018 1019 switch (cmd) { 1020 case TUNSETIFF: 1021 /* ignore the name, just look at flags */ 1022 if (get_user(u, &ifr->ifr_flags)) 1023 return -EFAULT; 1024 1025 ret = 0; 1026 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) 1027 ret = -EINVAL; 1028 else 1029 q->flags = u; 1030 1031 return ret; 1032 1033 case TUNGETIFF: 1034 rtnl_lock(); 1035 vlan = macvtap_get_vlan(q); 1036 if (!vlan) { 1037 rtnl_unlock(); 1038 return -ENOLINK; 1039 } 1040 1041 ret = 0; 1042 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 1043 put_user(q->flags, &ifr->ifr_flags)) 1044 ret = -EFAULT; 1045 macvtap_put_vlan(vlan); 1046 rtnl_unlock(); 1047 return ret; 1048 1049 case TUNSETQUEUE: 1050 if (get_user(u, &ifr->ifr_flags)) 1051 return -EFAULT; 1052 rtnl_lock(); 1053 ret = macvtap_ioctl_set_queue(file, u); 1054 rtnl_unlock(); 1055 return ret; 1056 1057 case TUNGETFEATURES: 1058 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) 1059 return -EFAULT; 1060 return 0; 1061 1062 case TUNSETSNDBUF: 1063 if (get_user(u, up)) 1064 return -EFAULT; 1065 1066 q->sk.sk_sndbuf = u; 1067 return 0; 1068 1069 case TUNGETVNETHDRSZ: 1070 s = q->vnet_hdr_sz; 1071 if (put_user(s, sp)) 1072 return -EFAULT; 1073 return 0; 1074 1075 case TUNSETVNETHDRSZ: 1076 if (get_user(s, sp)) 1077 return -EFAULT; 1078 if (s < (int)sizeof(struct virtio_net_hdr)) 1079 return -EINVAL; 1080 1081 q->vnet_hdr_sz = s; 1082 return 0; 1083 1084 case TUNSETOFFLOAD: 1085 /* let the user check for future flags */ 1086 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1087 TUN_F_TSO_ECN)) 1088 return -EINVAL; 1089 1090 rtnl_lock(); 1091 ret = set_offload(q, arg); 1092 rtnl_unlock(); 1093 return ret; 1094 1095 default: 1096 return -EINVAL; 1097 } 1098 } 1099 1100 #ifdef CONFIG_COMPAT 1101 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, 1102 unsigned long arg) 1103 { 1104 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1105 } 1106 #endif 1107 1108 static const struct file_operations macvtap_fops = { 1109 .owner = THIS_MODULE, 1110 .open = macvtap_open, 1111 .release = macvtap_release, 1112 .aio_read = macvtap_aio_read, 1113 .aio_write = macvtap_aio_write, 1114 .poll = macvtap_poll, 1115 .llseek = no_llseek, 1116 .unlocked_ioctl = macvtap_ioctl, 1117 #ifdef CONFIG_COMPAT 1118 .compat_ioctl = macvtap_compat_ioctl, 1119 #endif 1120 }; 1121 1122 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, 1123 struct msghdr *m, size_t total_len) 1124 { 1125 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1126 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, 1127 m->msg_flags & MSG_DONTWAIT); 1128 } 1129 1130 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, 1131 struct msghdr *m, size_t total_len, 1132 int flags) 1133 { 1134 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1135 int ret; 1136 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1137 return -EINVAL; 1138 ret = macvtap_do_read(q, m->msg_iov, total_len, 1139 flags & MSG_DONTWAIT); 1140 if (ret > total_len) { 1141 m->msg_flags |= MSG_TRUNC; 1142 ret = flags & MSG_TRUNC ? ret : total_len; 1143 } 1144 return ret; 1145 } 1146 1147 /* Ops structure to mimic raw sockets with tun */ 1148 static const struct proto_ops macvtap_socket_ops = { 1149 .sendmsg = macvtap_sendmsg, 1150 .recvmsg = macvtap_recvmsg, 1151 }; 1152 1153 /* Get an underlying socket object from tun file. Returns error unless file is 1154 * attached to a device. The returned object works like a packet socket, it 1155 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1156 * holding a reference to the file for as long as the socket is in use. */ 1157 struct socket *macvtap_get_socket(struct file *file) 1158 { 1159 struct macvtap_queue *q; 1160 if (file->f_op != &macvtap_fops) 1161 return ERR_PTR(-EINVAL); 1162 q = file->private_data; 1163 if (!q) 1164 return ERR_PTR(-EBADFD); 1165 return &q->sock; 1166 } 1167 EXPORT_SYMBOL_GPL(macvtap_get_socket); 1168 1169 static int macvtap_device_event(struct notifier_block *unused, 1170 unsigned long event, void *ptr) 1171 { 1172 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1173 struct macvlan_dev *vlan; 1174 struct device *classdev; 1175 dev_t devt; 1176 int err; 1177 1178 if (dev->rtnl_link_ops != &macvtap_link_ops) 1179 return NOTIFY_DONE; 1180 1181 vlan = netdev_priv(dev); 1182 1183 switch (event) { 1184 case NETDEV_REGISTER: 1185 /* Create the device node here after the network device has 1186 * been registered but before register_netdevice has 1187 * finished running. 1188 */ 1189 err = macvtap_get_minor(vlan); 1190 if (err) 1191 return notifier_from_errno(err); 1192 1193 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1194 classdev = device_create(macvtap_class, &dev->dev, devt, 1195 dev, "tap%d", dev->ifindex); 1196 if (IS_ERR(classdev)) { 1197 macvtap_free_minor(vlan); 1198 return notifier_from_errno(PTR_ERR(classdev)); 1199 } 1200 break; 1201 case NETDEV_UNREGISTER: 1202 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1203 device_destroy(macvtap_class, devt); 1204 macvtap_free_minor(vlan); 1205 break; 1206 } 1207 1208 return NOTIFY_DONE; 1209 } 1210 1211 static struct notifier_block macvtap_notifier_block __read_mostly = { 1212 .notifier_call = macvtap_device_event, 1213 }; 1214 1215 static int macvtap_init(void) 1216 { 1217 int err; 1218 1219 err = alloc_chrdev_region(&macvtap_major, 0, 1220 MACVTAP_NUM_DEVS, "macvtap"); 1221 if (err) 1222 goto out1; 1223 1224 cdev_init(&macvtap_cdev, &macvtap_fops); 1225 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); 1226 if (err) 1227 goto out2; 1228 1229 macvtap_class = class_create(THIS_MODULE, "macvtap"); 1230 if (IS_ERR(macvtap_class)) { 1231 err = PTR_ERR(macvtap_class); 1232 goto out3; 1233 } 1234 1235 err = register_netdevice_notifier(&macvtap_notifier_block); 1236 if (err) 1237 goto out4; 1238 1239 err = macvlan_link_register(&macvtap_link_ops); 1240 if (err) 1241 goto out5; 1242 1243 return 0; 1244 1245 out5: 1246 unregister_netdevice_notifier(&macvtap_notifier_block); 1247 out4: 1248 class_unregister(macvtap_class); 1249 out3: 1250 cdev_del(&macvtap_cdev); 1251 out2: 1252 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1253 out1: 1254 return err; 1255 } 1256 module_init(macvtap_init); 1257 1258 static void macvtap_exit(void) 1259 { 1260 rtnl_link_unregister(&macvtap_link_ops); 1261 unregister_netdevice_notifier(&macvtap_notifier_block); 1262 class_unregister(macvtap_class); 1263 cdev_del(&macvtap_cdev); 1264 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1265 } 1266 module_exit(macvtap_exit); 1267 1268 MODULE_ALIAS_RTNL_LINK("macvtap"); 1269 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1270 MODULE_LICENSE("GPL"); 1271