1 #include <linux/etherdevice.h> 2 #include <linux/if_macvlan.h> 3 #include <linux/if_vlan.h> 4 #include <linux/interrupt.h> 5 #include <linux/nsproxy.h> 6 #include <linux/compat.h> 7 #include <linux/if_tun.h> 8 #include <linux/module.h> 9 #include <linux/skbuff.h> 10 #include <linux/cache.h> 11 #include <linux/sched.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/wait.h> 15 #include <linux/cdev.h> 16 #include <linux/idr.h> 17 #include <linux/fs.h> 18 #include <linux/uio.h> 19 20 #include <net/ipv6.h> 21 #include <net/net_namespace.h> 22 #include <net/rtnetlink.h> 23 #include <net/sock.h> 24 #include <linux/virtio_net.h> 25 26 /* 27 * A macvtap queue is the central object of this driver, it connects 28 * an open character device to a macvlan interface. There can be 29 * multiple queues on one interface, which map back to queues 30 * implemented in hardware on the underlying device. 31 * 32 * macvtap_proto is used to allocate queues through the sock allocation 33 * mechanism. 34 * 35 */ 36 struct macvtap_queue { 37 struct sock sk; 38 struct socket sock; 39 struct socket_wq wq; 40 int vnet_hdr_sz; 41 struct macvlan_dev __rcu *vlan; 42 struct file *file; 43 unsigned int flags; 44 u16 queue_index; 45 bool enabled; 46 struct list_head next; 47 }; 48 49 #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_VNET_LE | IFF_MULTI_QUEUE) 50 51 static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) 52 { 53 return __virtio16_to_cpu(q->flags & IFF_VNET_LE, val); 54 } 55 56 static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) 57 { 58 return __cpu_to_virtio16(q->flags & IFF_VNET_LE, val); 59 } 60 61 static struct proto macvtap_proto = { 62 .name = "macvtap", 63 .owner = THIS_MODULE, 64 .obj_size = sizeof (struct macvtap_queue), 65 }; 66 67 /* 68 * Variables for dealing with macvtaps device numbers. 69 */ 70 static dev_t macvtap_major; 71 #define MACVTAP_NUM_DEVS (1U << MINORBITS) 72 static DEFINE_MUTEX(minor_lock); 73 static DEFINE_IDR(minor_idr); 74 75 #define GOODCOPY_LEN 128 76 static struct class *macvtap_class; 77 static struct cdev macvtap_cdev; 78 79 static const struct proto_ops macvtap_socket_ops; 80 81 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 82 NETIF_F_TSO6) 83 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 84 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 85 86 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) 87 { 88 return rcu_dereference(dev->rx_handler_data); 89 } 90 91 /* 92 * RCU usage: 93 * The macvtap_queue and the macvlan_dev are loosely coupled, the 94 * pointers from one to the other can only be read while rcu_read_lock 95 * or rtnl is held. 96 * 97 * Both the file and the macvlan_dev hold a reference on the macvtap_queue 98 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 99 * q->vlan becomes inaccessible. When the files gets closed, 100 * macvtap_get_queue() fails. 101 * 102 * There may still be references to the struct sock inside of the 103 * queue from outbound SKBs, but these never reference back to the 104 * file or the dev. The data structure is freed through __sk_free 105 * when both our references and any pending SKBs are gone. 106 */ 107 108 static int macvtap_enable_queue(struct net_device *dev, struct file *file, 109 struct macvtap_queue *q) 110 { 111 struct macvlan_dev *vlan = netdev_priv(dev); 112 int err = -EINVAL; 113 114 ASSERT_RTNL(); 115 116 if (q->enabled) 117 goto out; 118 119 err = 0; 120 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 121 q->queue_index = vlan->numvtaps; 122 q->enabled = true; 123 124 vlan->numvtaps++; 125 out: 126 return err; 127 } 128 129 /* Requires RTNL */ 130 static int macvtap_set_queue(struct net_device *dev, struct file *file, 131 struct macvtap_queue *q) 132 { 133 struct macvlan_dev *vlan = netdev_priv(dev); 134 135 if (vlan->numqueues == MAX_MACVTAP_QUEUES) 136 return -EBUSY; 137 138 rcu_assign_pointer(q->vlan, vlan); 139 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 140 sock_hold(&q->sk); 141 142 q->file = file; 143 q->queue_index = vlan->numvtaps; 144 q->enabled = true; 145 file->private_data = q; 146 list_add_tail(&q->next, &vlan->queue_list); 147 148 vlan->numvtaps++; 149 vlan->numqueues++; 150 151 return 0; 152 } 153 154 static int macvtap_disable_queue(struct macvtap_queue *q) 155 { 156 struct macvlan_dev *vlan; 157 struct macvtap_queue *nq; 158 159 ASSERT_RTNL(); 160 if (!q->enabled) 161 return -EINVAL; 162 163 vlan = rtnl_dereference(q->vlan); 164 165 if (vlan) { 166 int index = q->queue_index; 167 BUG_ON(index >= vlan->numvtaps); 168 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); 169 nq->queue_index = index; 170 171 rcu_assign_pointer(vlan->taps[index], nq); 172 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); 173 q->enabled = false; 174 175 vlan->numvtaps--; 176 } 177 178 return 0; 179 } 180 181 /* 182 * The file owning the queue got closed, give up both 183 * the reference that the files holds as well as the 184 * one from the macvlan_dev if that still exists. 185 * 186 * Using the spinlock makes sure that we don't get 187 * to the queue again after destroying it. 188 */ 189 static void macvtap_put_queue(struct macvtap_queue *q) 190 { 191 struct macvlan_dev *vlan; 192 193 rtnl_lock(); 194 vlan = rtnl_dereference(q->vlan); 195 196 if (vlan) { 197 if (q->enabled) 198 BUG_ON(macvtap_disable_queue(q)); 199 200 vlan->numqueues--; 201 RCU_INIT_POINTER(q->vlan, NULL); 202 sock_put(&q->sk); 203 list_del_init(&q->next); 204 } 205 206 rtnl_unlock(); 207 208 synchronize_rcu(); 209 sock_put(&q->sk); 210 } 211 212 /* 213 * Select a queue based on the rxq of the device on which this packet 214 * arrived. If the incoming device is not mq, calculate a flow hash 215 * to select a queue. If all fails, find the first available queue. 216 * Cache vlan->numvtaps since it can become zero during the execution 217 * of this function. 218 */ 219 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 220 struct sk_buff *skb) 221 { 222 struct macvlan_dev *vlan = netdev_priv(dev); 223 struct macvtap_queue *tap = NULL; 224 /* Access to taps array is protected by rcu, but access to numvtaps 225 * isn't. Below we use it to lookup a queue, but treat it as a hint 226 * and validate that the result isn't NULL - in case we are 227 * racing against queue removal. 228 */ 229 int numvtaps = ACCESS_ONCE(vlan->numvtaps); 230 __u32 rxq; 231 232 if (!numvtaps) 233 goto out; 234 235 /* Check if we can use flow to select a queue */ 236 rxq = skb_get_hash(skb); 237 if (rxq) { 238 tap = rcu_dereference(vlan->taps[rxq % numvtaps]); 239 goto out; 240 } 241 242 if (likely(skb_rx_queue_recorded(skb))) { 243 rxq = skb_get_rx_queue(skb); 244 245 while (unlikely(rxq >= numvtaps)) 246 rxq -= numvtaps; 247 248 tap = rcu_dereference(vlan->taps[rxq]); 249 goto out; 250 } 251 252 tap = rcu_dereference(vlan->taps[0]); 253 out: 254 return tap; 255 } 256 257 /* 258 * The net_device is going away, give up the reference 259 * that it holds on all queues and safely set the pointer 260 * from the queues to NULL. 261 */ 262 static void macvtap_del_queues(struct net_device *dev) 263 { 264 struct macvlan_dev *vlan = netdev_priv(dev); 265 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; 266 int i, j = 0; 267 268 ASSERT_RTNL(); 269 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { 270 list_del_init(&q->next); 271 qlist[j++] = q; 272 RCU_INIT_POINTER(q->vlan, NULL); 273 if (q->enabled) 274 vlan->numvtaps--; 275 vlan->numqueues--; 276 } 277 for (i = 0; i < vlan->numvtaps; i++) 278 RCU_INIT_POINTER(vlan->taps[i], NULL); 279 BUG_ON(vlan->numvtaps); 280 BUG_ON(vlan->numqueues); 281 /* guarantee that any future macvtap_set_queue will fail */ 282 vlan->numvtaps = MAX_MACVTAP_QUEUES; 283 284 for (--j; j >= 0; j--) 285 sock_put(&qlist[j]->sk); 286 } 287 288 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) 289 { 290 struct sk_buff *skb = *pskb; 291 struct net_device *dev = skb->dev; 292 struct macvlan_dev *vlan; 293 struct macvtap_queue *q; 294 netdev_features_t features = TAP_FEATURES; 295 296 vlan = macvtap_get_vlan_rcu(dev); 297 if (!vlan) 298 return RX_HANDLER_PASS; 299 300 q = macvtap_get_queue(dev, skb); 301 if (!q) 302 return RX_HANDLER_PASS; 303 304 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 305 goto drop; 306 307 skb_push(skb, ETH_HLEN); 308 309 /* Apply the forward feature mask so that we perform segmentation 310 * according to users wishes. This only works if VNET_HDR is 311 * enabled. 312 */ 313 if (q->flags & IFF_VNET_HDR) 314 features |= vlan->tap_features; 315 if (netif_needs_gso(dev, skb, features)) { 316 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 317 318 if (IS_ERR(segs)) 319 goto drop; 320 321 if (!segs) { 322 skb_queue_tail(&q->sk.sk_receive_queue, skb); 323 goto wake_up; 324 } 325 326 kfree_skb(skb); 327 while (segs) { 328 struct sk_buff *nskb = segs->next; 329 330 segs->next = NULL; 331 skb_queue_tail(&q->sk.sk_receive_queue, segs); 332 segs = nskb; 333 } 334 } else { 335 /* If we receive a partial checksum and the tap side 336 * doesn't support checksum offload, compute the checksum. 337 * Note: it doesn't matter which checksum feature to 338 * check, we either support them all or none. 339 */ 340 if (skb->ip_summed == CHECKSUM_PARTIAL && 341 !(features & NETIF_F_ALL_CSUM) && 342 skb_checksum_help(skb)) 343 goto drop; 344 skb_queue_tail(&q->sk.sk_receive_queue, skb); 345 } 346 347 wake_up: 348 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 349 return RX_HANDLER_CONSUMED; 350 351 drop: 352 /* Count errors/drops only here, thus don't care about args. */ 353 macvlan_count_rx(vlan, 0, 0, 0); 354 kfree_skb(skb); 355 return RX_HANDLER_CONSUMED; 356 } 357 358 static int macvtap_get_minor(struct macvlan_dev *vlan) 359 { 360 int retval = -ENOMEM; 361 362 mutex_lock(&minor_lock); 363 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); 364 if (retval >= 0) { 365 vlan->minor = retval; 366 } else if (retval == -ENOSPC) { 367 printk(KERN_ERR "too many macvtap devices\n"); 368 retval = -EINVAL; 369 } 370 mutex_unlock(&minor_lock); 371 return retval < 0 ? retval : 0; 372 } 373 374 static void macvtap_free_minor(struct macvlan_dev *vlan) 375 { 376 mutex_lock(&minor_lock); 377 if (vlan->minor) { 378 idr_remove(&minor_idr, vlan->minor); 379 vlan->minor = 0; 380 } 381 mutex_unlock(&minor_lock); 382 } 383 384 static struct net_device *dev_get_by_macvtap_minor(int minor) 385 { 386 struct net_device *dev = NULL; 387 struct macvlan_dev *vlan; 388 389 mutex_lock(&minor_lock); 390 vlan = idr_find(&minor_idr, minor); 391 if (vlan) { 392 dev = vlan->dev; 393 dev_hold(dev); 394 } 395 mutex_unlock(&minor_lock); 396 return dev; 397 } 398 399 static int macvtap_newlink(struct net *src_net, 400 struct net_device *dev, 401 struct nlattr *tb[], 402 struct nlattr *data[]) 403 { 404 struct macvlan_dev *vlan = netdev_priv(dev); 405 int err; 406 407 INIT_LIST_HEAD(&vlan->queue_list); 408 409 /* Since macvlan supports all offloads by default, make 410 * tap support all offloads also. 411 */ 412 vlan->tap_features = TUN_OFFLOADS; 413 414 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); 415 if (err) 416 return err; 417 418 /* Don't put anything that may fail after macvlan_common_newlink 419 * because we can't undo what it does. 420 */ 421 return macvlan_common_newlink(src_net, dev, tb, data); 422 } 423 424 static void macvtap_dellink(struct net_device *dev, 425 struct list_head *head) 426 { 427 netdev_rx_handler_unregister(dev); 428 macvtap_del_queues(dev); 429 macvlan_dellink(dev, head); 430 } 431 432 static void macvtap_setup(struct net_device *dev) 433 { 434 macvlan_common_setup(dev); 435 dev->tx_queue_len = TUN_READQ_SIZE; 436 } 437 438 static struct rtnl_link_ops macvtap_link_ops __read_mostly = { 439 .kind = "macvtap", 440 .setup = macvtap_setup, 441 .newlink = macvtap_newlink, 442 .dellink = macvtap_dellink, 443 }; 444 445 446 static void macvtap_sock_write_space(struct sock *sk) 447 { 448 wait_queue_head_t *wqueue; 449 450 if (!sock_writeable(sk) || 451 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 452 return; 453 454 wqueue = sk_sleep(sk); 455 if (wqueue && waitqueue_active(wqueue)) 456 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); 457 } 458 459 static void macvtap_sock_destruct(struct sock *sk) 460 { 461 skb_queue_purge(&sk->sk_receive_queue); 462 } 463 464 static int macvtap_open(struct inode *inode, struct file *file) 465 { 466 struct net *net = current->nsproxy->net_ns; 467 struct net_device *dev; 468 struct macvtap_queue *q; 469 int err = -ENODEV; 470 471 rtnl_lock(); 472 dev = dev_get_by_macvtap_minor(iminor(inode)); 473 if (!dev) 474 goto out; 475 476 err = -ENOMEM; 477 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 478 &macvtap_proto); 479 if (!q) 480 goto out; 481 482 RCU_INIT_POINTER(q->sock.wq, &q->wq); 483 init_waitqueue_head(&q->wq.wait); 484 q->sock.type = SOCK_RAW; 485 q->sock.state = SS_CONNECTED; 486 q->sock.file = file; 487 q->sock.ops = &macvtap_socket_ops; 488 sock_init_data(&q->sock, &q->sk); 489 q->sk.sk_write_space = macvtap_sock_write_space; 490 q->sk.sk_destruct = macvtap_sock_destruct; 491 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 492 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 493 494 /* 495 * so far only KVM virtio_net uses macvtap, enable zero copy between 496 * guest kernel and host kernel when lower device supports zerocopy 497 * 498 * The macvlan supports zerocopy iff the lower device supports zero 499 * copy so we don't have to look at the lower device directly. 500 */ 501 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) 502 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 503 504 err = macvtap_set_queue(dev, file, q); 505 if (err) 506 sock_put(&q->sk); 507 508 out: 509 if (dev) 510 dev_put(dev); 511 512 rtnl_unlock(); 513 return err; 514 } 515 516 static int macvtap_release(struct inode *inode, struct file *file) 517 { 518 struct macvtap_queue *q = file->private_data; 519 macvtap_put_queue(q); 520 return 0; 521 } 522 523 static unsigned int macvtap_poll(struct file *file, poll_table * wait) 524 { 525 struct macvtap_queue *q = file->private_data; 526 unsigned int mask = POLLERR; 527 528 if (!q) 529 goto out; 530 531 mask = 0; 532 poll_wait(file, &q->wq.wait, wait); 533 534 if (!skb_queue_empty(&q->sk.sk_receive_queue)) 535 mask |= POLLIN | POLLRDNORM; 536 537 if (sock_writeable(&q->sk) || 538 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 539 sock_writeable(&q->sk))) 540 mask |= POLLOUT | POLLWRNORM; 541 542 out: 543 return mask; 544 } 545 546 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, 547 size_t len, size_t linear, 548 int noblock, int *err) 549 { 550 struct sk_buff *skb; 551 552 /* Under a page? Don't bother with paged skb. */ 553 if (prepad + len < PAGE_SIZE || !linear) 554 linear = len; 555 556 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 557 err, 0); 558 if (!skb) 559 return NULL; 560 561 skb_reserve(skb, prepad); 562 skb_put(skb, linear); 563 skb->data_len = len - linear; 564 skb->len += len - linear; 565 566 return skb; 567 } 568 569 /* 570 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should 571 * be shared with the tun/tap driver. 572 */ 573 static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, 574 struct sk_buff *skb, 575 struct virtio_net_hdr *vnet_hdr) 576 { 577 unsigned short gso_type = 0; 578 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 579 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 580 case VIRTIO_NET_HDR_GSO_TCPV4: 581 gso_type = SKB_GSO_TCPV4; 582 break; 583 case VIRTIO_NET_HDR_GSO_TCPV6: 584 gso_type = SKB_GSO_TCPV6; 585 break; 586 case VIRTIO_NET_HDR_GSO_UDP: 587 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", 588 current->comm); 589 gso_type = SKB_GSO_UDP; 590 if (skb->protocol == htons(ETH_P_IPV6)) 591 ipv6_proxy_select_ident(skb); 592 break; 593 default: 594 return -EINVAL; 595 } 596 597 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 598 gso_type |= SKB_GSO_TCP_ECN; 599 600 if (vnet_hdr->gso_size == 0) 601 return -EINVAL; 602 } 603 604 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 605 if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start), 606 macvtap16_to_cpu(q, vnet_hdr->csum_offset))) 607 return -EINVAL; 608 } 609 610 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 611 skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size); 612 skb_shinfo(skb)->gso_type = gso_type; 613 614 /* Header must be checked, and gso_segs computed. */ 615 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 616 skb_shinfo(skb)->gso_segs = 0; 617 } 618 return 0; 619 } 620 621 static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, 622 const struct sk_buff *skb, 623 struct virtio_net_hdr *vnet_hdr) 624 { 625 memset(vnet_hdr, 0, sizeof(*vnet_hdr)); 626 627 if (skb_is_gso(skb)) { 628 struct skb_shared_info *sinfo = skb_shinfo(skb); 629 630 /* This is a hint as to how much should be linear. */ 631 vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb)); 632 vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size); 633 if (sinfo->gso_type & SKB_GSO_TCPV4) 634 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 635 else if (sinfo->gso_type & SKB_GSO_TCPV6) 636 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 637 else 638 BUG(); 639 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 640 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 641 } else 642 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 643 644 if (skb->ip_summed == CHECKSUM_PARTIAL) { 645 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 646 if (vlan_tx_tag_present(skb)) 647 vnet_hdr->csum_start = cpu_to_macvtap16(q, 648 skb_checksum_start_offset(skb) + VLAN_HLEN); 649 else 650 vnet_hdr->csum_start = cpu_to_macvtap16(q, 651 skb_checksum_start_offset(skb)); 652 vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset); 653 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 654 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 655 } /* else everything is zero */ 656 } 657 658 /* Get packet from user space buffer */ 659 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 660 struct iov_iter *from, int noblock) 661 { 662 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 663 struct sk_buff *skb; 664 struct macvlan_dev *vlan; 665 unsigned long total_len = iov_iter_count(from); 666 unsigned long len = total_len; 667 int err; 668 struct virtio_net_hdr vnet_hdr = { 0 }; 669 int vnet_hdr_len = 0; 670 int copylen = 0; 671 bool zerocopy = false; 672 size_t linear; 673 ssize_t n; 674 675 if (q->flags & IFF_VNET_HDR) { 676 vnet_hdr_len = q->vnet_hdr_sz; 677 678 err = -EINVAL; 679 if (len < vnet_hdr_len) 680 goto err; 681 len -= vnet_hdr_len; 682 683 err = -EFAULT; 684 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from); 685 if (n != sizeof(vnet_hdr)) 686 goto err; 687 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); 688 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 689 macvtap16_to_cpu(q, vnet_hdr.csum_start) + 690 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > 691 macvtap16_to_cpu(q, vnet_hdr.hdr_len)) 692 vnet_hdr.hdr_len = cpu_to_macvtap16(q, 693 macvtap16_to_cpu(q, vnet_hdr.csum_start) + 694 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); 695 err = -EINVAL; 696 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) 697 goto err; 698 } 699 700 err = -EINVAL; 701 if (unlikely(len < ETH_HLEN)) 702 goto err; 703 704 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 705 struct iov_iter i; 706 707 copylen = vnet_hdr.hdr_len ? 708 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; 709 if (copylen > good_linear) 710 copylen = good_linear; 711 linear = copylen; 712 i = *from; 713 iov_iter_advance(&i, copylen); 714 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 715 zerocopy = true; 716 } 717 718 if (!zerocopy) { 719 copylen = len; 720 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear) 721 linear = good_linear; 722 else 723 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 724 } 725 726 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 727 linear, noblock, &err); 728 if (!skb) 729 goto err; 730 731 if (zerocopy) 732 err = zerocopy_sg_from_iter(skb, from); 733 else { 734 err = skb_copy_datagram_from_iter(skb, 0, from, len); 735 if (!err && m && m->msg_control) { 736 struct ubuf_info *uarg = m->msg_control; 737 uarg->callback(uarg, false); 738 } 739 } 740 741 if (err) 742 goto err_kfree; 743 744 skb_set_network_header(skb, ETH_HLEN); 745 skb_reset_mac_header(skb); 746 skb->protocol = eth_hdr(skb)->h_proto; 747 748 if (vnet_hdr_len) { 749 err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); 750 if (err) 751 goto err_kfree; 752 } 753 754 skb_probe_transport_header(skb, ETH_HLEN); 755 756 rcu_read_lock(); 757 vlan = rcu_dereference(q->vlan); 758 /* copy skb_ubuf_info for callback when skb has no error */ 759 if (zerocopy) { 760 skb_shinfo(skb)->destructor_arg = m->msg_control; 761 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 762 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 763 } 764 if (vlan) { 765 skb->dev = vlan->dev; 766 dev_queue_xmit(skb); 767 } else { 768 kfree_skb(skb); 769 } 770 rcu_read_unlock(); 771 772 return total_len; 773 774 err_kfree: 775 kfree_skb(skb); 776 777 err: 778 rcu_read_lock(); 779 vlan = rcu_dereference(q->vlan); 780 if (vlan) 781 this_cpu_inc(vlan->pcpu_stats->tx_dropped); 782 rcu_read_unlock(); 783 784 return err; 785 } 786 787 static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) 788 { 789 struct file *file = iocb->ki_filp; 790 struct macvtap_queue *q = file->private_data; 791 792 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); 793 } 794 795 /* Put packet to the user space buffer */ 796 static ssize_t macvtap_put_user(struct macvtap_queue *q, 797 const struct sk_buff *skb, 798 struct iov_iter *iter) 799 { 800 int ret; 801 int vnet_hdr_len = 0; 802 int vlan_offset = 0; 803 int total; 804 805 if (q->flags & IFF_VNET_HDR) { 806 struct virtio_net_hdr vnet_hdr; 807 vnet_hdr_len = q->vnet_hdr_sz; 808 if (iov_iter_count(iter) < vnet_hdr_len) 809 return -EINVAL; 810 811 macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); 812 813 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 814 sizeof(vnet_hdr)) 815 return -EFAULT; 816 817 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); 818 } 819 total = vnet_hdr_len; 820 total += skb->len; 821 822 if (vlan_tx_tag_present(skb)) { 823 struct { 824 __be16 h_vlan_proto; 825 __be16 h_vlan_TCI; 826 } veth; 827 veth.h_vlan_proto = skb->vlan_proto; 828 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 829 830 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 831 total += VLAN_HLEN; 832 833 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 834 if (ret || !iov_iter_count(iter)) 835 goto done; 836 837 ret = copy_to_iter(&veth, sizeof(veth), iter); 838 if (ret != sizeof(veth) || !iov_iter_count(iter)) 839 goto done; 840 } 841 842 ret = skb_copy_datagram_iter(skb, vlan_offset, iter, 843 skb->len - vlan_offset); 844 845 done: 846 return ret ? ret : total; 847 } 848 849 static ssize_t macvtap_do_read(struct macvtap_queue *q, 850 struct iov_iter *to, 851 int noblock) 852 { 853 DEFINE_WAIT(wait); 854 struct sk_buff *skb; 855 ssize_t ret = 0; 856 857 if (!iov_iter_count(to)) 858 return 0; 859 860 while (1) { 861 if (!noblock) 862 prepare_to_wait(sk_sleep(&q->sk), &wait, 863 TASK_INTERRUPTIBLE); 864 865 /* Read frames from the queue */ 866 skb = skb_dequeue(&q->sk.sk_receive_queue); 867 if (skb) 868 break; 869 if (noblock) { 870 ret = -EAGAIN; 871 break; 872 } 873 if (signal_pending(current)) { 874 ret = -ERESTARTSYS; 875 break; 876 } 877 /* Nothing to read, let's sleep */ 878 schedule(); 879 } 880 if (skb) { 881 ret = macvtap_put_user(q, skb, to); 882 if (unlikely(ret < 0)) 883 kfree_skb(skb); 884 else 885 consume_skb(skb); 886 } 887 if (!noblock) 888 finish_wait(sk_sleep(&q->sk), &wait); 889 return ret; 890 } 891 892 static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) 893 { 894 struct file *file = iocb->ki_filp; 895 struct macvtap_queue *q = file->private_data; 896 ssize_t len = iov_iter_count(to), ret; 897 898 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); 899 ret = min_t(ssize_t, ret, len); 900 if (ret > 0) 901 iocb->ki_pos = ret; 902 return ret; 903 } 904 905 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) 906 { 907 struct macvlan_dev *vlan; 908 909 ASSERT_RTNL(); 910 vlan = rtnl_dereference(q->vlan); 911 if (vlan) 912 dev_hold(vlan->dev); 913 914 return vlan; 915 } 916 917 static void macvtap_put_vlan(struct macvlan_dev *vlan) 918 { 919 dev_put(vlan->dev); 920 } 921 922 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) 923 { 924 struct macvtap_queue *q = file->private_data; 925 struct macvlan_dev *vlan; 926 int ret; 927 928 vlan = macvtap_get_vlan(q); 929 if (!vlan) 930 return -EINVAL; 931 932 if (flags & IFF_ATTACH_QUEUE) 933 ret = macvtap_enable_queue(vlan->dev, file, q); 934 else if (flags & IFF_DETACH_QUEUE) 935 ret = macvtap_disable_queue(q); 936 else 937 ret = -EINVAL; 938 939 macvtap_put_vlan(vlan); 940 return ret; 941 } 942 943 static int set_offload(struct macvtap_queue *q, unsigned long arg) 944 { 945 struct macvlan_dev *vlan; 946 netdev_features_t features; 947 netdev_features_t feature_mask = 0; 948 949 vlan = rtnl_dereference(q->vlan); 950 if (!vlan) 951 return -ENOLINK; 952 953 features = vlan->dev->features; 954 955 if (arg & TUN_F_CSUM) { 956 feature_mask = NETIF_F_HW_CSUM; 957 958 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 959 if (arg & TUN_F_TSO_ECN) 960 feature_mask |= NETIF_F_TSO_ECN; 961 if (arg & TUN_F_TSO4) 962 feature_mask |= NETIF_F_TSO; 963 if (arg & TUN_F_TSO6) 964 feature_mask |= NETIF_F_TSO6; 965 } 966 } 967 968 /* tun/tap driver inverts the usage for TSO offloads, where 969 * setting the TSO bit means that the userspace wants to 970 * accept TSO frames and turning it off means that user space 971 * does not support TSO. 972 * For macvtap, we have to invert it to mean the same thing. 973 * When user space turns off TSO, we turn off GSO/LRO so that 974 * user-space will not receive TSO frames. 975 */ 976 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 977 features |= RX_OFFLOADS; 978 else 979 features &= ~RX_OFFLOADS; 980 981 /* tap_features are the same as features on tun/tap and 982 * reflect user expectations. 983 */ 984 vlan->tap_features = feature_mask; 985 vlan->set_features = features; 986 netdev_update_features(vlan->dev); 987 988 return 0; 989 } 990 991 /* 992 * provide compatibility with generic tun/tap interface 993 */ 994 static long macvtap_ioctl(struct file *file, unsigned int cmd, 995 unsigned long arg) 996 { 997 struct macvtap_queue *q = file->private_data; 998 struct macvlan_dev *vlan; 999 void __user *argp = (void __user *)arg; 1000 struct ifreq __user *ifr = argp; 1001 unsigned int __user *up = argp; 1002 unsigned int u; 1003 int __user *sp = argp; 1004 int s; 1005 int ret; 1006 1007 switch (cmd) { 1008 case TUNSETIFF: 1009 /* ignore the name, just look at flags */ 1010 if (get_user(u, &ifr->ifr_flags)) 1011 return -EFAULT; 1012 1013 ret = 0; 1014 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) 1015 ret = -EINVAL; 1016 else 1017 q->flags = u; 1018 1019 return ret; 1020 1021 case TUNGETIFF: 1022 rtnl_lock(); 1023 vlan = macvtap_get_vlan(q); 1024 if (!vlan) { 1025 rtnl_unlock(); 1026 return -ENOLINK; 1027 } 1028 1029 ret = 0; 1030 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 1031 put_user(q->flags, &ifr->ifr_flags)) 1032 ret = -EFAULT; 1033 macvtap_put_vlan(vlan); 1034 rtnl_unlock(); 1035 return ret; 1036 1037 case TUNSETQUEUE: 1038 if (get_user(u, &ifr->ifr_flags)) 1039 return -EFAULT; 1040 rtnl_lock(); 1041 ret = macvtap_ioctl_set_queue(file, u); 1042 rtnl_unlock(); 1043 return ret; 1044 1045 case TUNGETFEATURES: 1046 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) 1047 return -EFAULT; 1048 return 0; 1049 1050 case TUNSETSNDBUF: 1051 if (get_user(u, up)) 1052 return -EFAULT; 1053 1054 q->sk.sk_sndbuf = u; 1055 return 0; 1056 1057 case TUNGETVNETHDRSZ: 1058 s = q->vnet_hdr_sz; 1059 if (put_user(s, sp)) 1060 return -EFAULT; 1061 return 0; 1062 1063 case TUNSETVNETHDRSZ: 1064 if (get_user(s, sp)) 1065 return -EFAULT; 1066 if (s < (int)sizeof(struct virtio_net_hdr)) 1067 return -EINVAL; 1068 1069 q->vnet_hdr_sz = s; 1070 return 0; 1071 1072 case TUNSETOFFLOAD: 1073 /* let the user check for future flags */ 1074 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1075 TUN_F_TSO_ECN)) 1076 return -EINVAL; 1077 1078 rtnl_lock(); 1079 ret = set_offload(q, arg); 1080 rtnl_unlock(); 1081 return ret; 1082 1083 default: 1084 return -EINVAL; 1085 } 1086 } 1087 1088 #ifdef CONFIG_COMPAT 1089 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, 1090 unsigned long arg) 1091 { 1092 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1093 } 1094 #endif 1095 1096 static const struct file_operations macvtap_fops = { 1097 .owner = THIS_MODULE, 1098 .open = macvtap_open, 1099 .release = macvtap_release, 1100 .read = new_sync_read, 1101 .write = new_sync_write, 1102 .read_iter = macvtap_read_iter, 1103 .write_iter = macvtap_write_iter, 1104 .poll = macvtap_poll, 1105 .llseek = no_llseek, 1106 .unlocked_ioctl = macvtap_ioctl, 1107 #ifdef CONFIG_COMPAT 1108 .compat_ioctl = macvtap_compat_ioctl, 1109 #endif 1110 }; 1111 1112 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, 1113 struct msghdr *m, size_t total_len) 1114 { 1115 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1116 return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); 1117 } 1118 1119 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, 1120 struct msghdr *m, size_t total_len, 1121 int flags) 1122 { 1123 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1124 int ret; 1125 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1126 return -EINVAL; 1127 ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); 1128 if (ret > total_len) { 1129 m->msg_flags |= MSG_TRUNC; 1130 ret = flags & MSG_TRUNC ? ret : total_len; 1131 } 1132 return ret; 1133 } 1134 1135 /* Ops structure to mimic raw sockets with tun */ 1136 static const struct proto_ops macvtap_socket_ops = { 1137 .sendmsg = macvtap_sendmsg, 1138 .recvmsg = macvtap_recvmsg, 1139 }; 1140 1141 /* Get an underlying socket object from tun file. Returns error unless file is 1142 * attached to a device. The returned object works like a packet socket, it 1143 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1144 * holding a reference to the file for as long as the socket is in use. */ 1145 struct socket *macvtap_get_socket(struct file *file) 1146 { 1147 struct macvtap_queue *q; 1148 if (file->f_op != &macvtap_fops) 1149 return ERR_PTR(-EINVAL); 1150 q = file->private_data; 1151 if (!q) 1152 return ERR_PTR(-EBADFD); 1153 return &q->sock; 1154 } 1155 EXPORT_SYMBOL_GPL(macvtap_get_socket); 1156 1157 static int macvtap_device_event(struct notifier_block *unused, 1158 unsigned long event, void *ptr) 1159 { 1160 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1161 struct macvlan_dev *vlan; 1162 struct device *classdev; 1163 dev_t devt; 1164 int err; 1165 1166 if (dev->rtnl_link_ops != &macvtap_link_ops) 1167 return NOTIFY_DONE; 1168 1169 vlan = netdev_priv(dev); 1170 1171 switch (event) { 1172 case NETDEV_REGISTER: 1173 /* Create the device node here after the network device has 1174 * been registered but before register_netdevice has 1175 * finished running. 1176 */ 1177 err = macvtap_get_minor(vlan); 1178 if (err) 1179 return notifier_from_errno(err); 1180 1181 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1182 classdev = device_create(macvtap_class, &dev->dev, devt, 1183 dev, "tap%d", dev->ifindex); 1184 if (IS_ERR(classdev)) { 1185 macvtap_free_minor(vlan); 1186 return notifier_from_errno(PTR_ERR(classdev)); 1187 } 1188 break; 1189 case NETDEV_UNREGISTER: 1190 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1191 device_destroy(macvtap_class, devt); 1192 macvtap_free_minor(vlan); 1193 break; 1194 } 1195 1196 return NOTIFY_DONE; 1197 } 1198 1199 static struct notifier_block macvtap_notifier_block __read_mostly = { 1200 .notifier_call = macvtap_device_event, 1201 }; 1202 1203 static int macvtap_init(void) 1204 { 1205 int err; 1206 1207 err = alloc_chrdev_region(&macvtap_major, 0, 1208 MACVTAP_NUM_DEVS, "macvtap"); 1209 if (err) 1210 goto out1; 1211 1212 cdev_init(&macvtap_cdev, &macvtap_fops); 1213 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); 1214 if (err) 1215 goto out2; 1216 1217 macvtap_class = class_create(THIS_MODULE, "macvtap"); 1218 if (IS_ERR(macvtap_class)) { 1219 err = PTR_ERR(macvtap_class); 1220 goto out3; 1221 } 1222 1223 err = register_netdevice_notifier(&macvtap_notifier_block); 1224 if (err) 1225 goto out4; 1226 1227 err = macvlan_link_register(&macvtap_link_ops); 1228 if (err) 1229 goto out5; 1230 1231 return 0; 1232 1233 out5: 1234 unregister_netdevice_notifier(&macvtap_notifier_block); 1235 out4: 1236 class_unregister(macvtap_class); 1237 out3: 1238 cdev_del(&macvtap_cdev); 1239 out2: 1240 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1241 out1: 1242 return err; 1243 } 1244 module_init(macvtap_init); 1245 1246 static void macvtap_exit(void) 1247 { 1248 rtnl_link_unregister(&macvtap_link_ops); 1249 unregister_netdevice_notifier(&macvtap_notifier_block); 1250 class_unregister(macvtap_class); 1251 cdev_del(&macvtap_cdev); 1252 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1253 } 1254 module_exit(macvtap_exit); 1255 1256 MODULE_ALIAS_RTNL_LINK("macvtap"); 1257 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1258 MODULE_LICENSE("GPL"); 1259