1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/etherdevice.h> 3 #include <linux/if_tap.h> 4 #include <linux/if_vlan.h> 5 #include <linux/interrupt.h> 6 #include <linux/nsproxy.h> 7 #include <linux/compat.h> 8 #include <linux/if_tun.h> 9 #include <linux/module.h> 10 #include <linux/skbuff.h> 11 #include <linux/cache.h> 12 #include <linux/sched/signal.h> 13 #include <linux/types.h> 14 #include <linux/slab.h> 15 #include <linux/wait.h> 16 #include <linux/cdev.h> 17 #include <linux/idr.h> 18 #include <linux/fs.h> 19 #include <linux/uio.h> 20 21 #include <net/gso.h> 22 #include <net/net_namespace.h> 23 #include <net/rtnetlink.h> 24 #include <net/sock.h> 25 #include <net/xdp.h> 26 #include <linux/virtio_net.h> 27 #include <linux/skb_array.h> 28 29 #include "tun_vnet.h" 30 31 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) 32 33 static struct proto tap_proto = { 34 .name = "tap", 35 .owner = THIS_MODULE, 36 .obj_size = sizeof(struct tap_queue), 37 }; 38 39 #define TAP_NUM_DEVS (1U << MINORBITS) 40 41 static LIST_HEAD(major_list); 42 43 struct major_info { 44 struct rcu_head rcu; 45 dev_t major; 46 struct idr minor_idr; 47 spinlock_t minor_lock; 48 const char *device_name; 49 struct list_head next; 50 }; 51 52 #define GOODCOPY_LEN 128 53 54 static const struct proto_ops tap_socket_ops; 55 56 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 57 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) 58 tap_dev_get_rcu(const struct net_device * dev)59 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev) 60 { 61 return rcu_dereference(dev->rx_handler_data); 62 } 63 64 /* 65 * RCU usage: 66 * The tap_queue and the macvlan_dev are loosely coupled, the 67 * pointers from one to the other can only be read while rcu_read_lock 68 * or rtnl is held. 69 * 70 * Both the file and the macvlan_dev hold a reference on the tap_queue 71 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 72 * q->vlan becomes inaccessible. When the files gets closed, 73 * tap_get_queue() fails. 74 * 75 * There may still be references to the struct sock inside of the 76 * queue from outbound SKBs, but these never reference back to the 77 * file or the dev. The data structure is freed through __sk_free 78 * when both our references and any pending SKBs are gone. 79 */ 80 tap_enable_queue(struct tap_dev * tap,struct file * file,struct tap_queue * q)81 static int tap_enable_queue(struct tap_dev *tap, struct file *file, 82 struct tap_queue *q) 83 { 84 int err = -EINVAL; 85 86 ASSERT_RTNL(); 87 88 if (q->enabled) 89 goto out; 90 91 err = 0; 92 rcu_assign_pointer(tap->taps[tap->numvtaps], q); 93 q->queue_index = tap->numvtaps; 94 q->enabled = true; 95 96 tap->numvtaps++; 97 out: 98 return err; 99 } 100 101 /* Requires RTNL */ tap_set_queue(struct tap_dev * tap,struct file * file,struct tap_queue * q)102 static int tap_set_queue(struct tap_dev *tap, struct file *file, 103 struct tap_queue *q) 104 { 105 if (tap->numqueues == MAX_TAP_QUEUES) 106 return -EBUSY; 107 108 rcu_assign_pointer(q->tap, tap); 109 rcu_assign_pointer(tap->taps[tap->numvtaps], q); 110 sock_hold(&q->sk); 111 112 q->file = file; 113 q->queue_index = tap->numvtaps; 114 q->enabled = true; 115 file->private_data = q; 116 list_add_tail(&q->next, &tap->queue_list); 117 118 tap->numvtaps++; 119 tap->numqueues++; 120 121 return 0; 122 } 123 tap_disable_queue(struct tap_queue * q)124 static int tap_disable_queue(struct tap_queue *q) 125 { 126 struct tap_dev *tap; 127 struct tap_queue *nq; 128 129 ASSERT_RTNL(); 130 if (!q->enabled) 131 return -EINVAL; 132 133 tap = rtnl_dereference(q->tap); 134 135 if (tap) { 136 int index = q->queue_index; 137 BUG_ON(index >= tap->numvtaps); 138 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); 139 nq->queue_index = index; 140 141 rcu_assign_pointer(tap->taps[index], nq); 142 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL); 143 q->enabled = false; 144 145 tap->numvtaps--; 146 } 147 148 return 0; 149 } 150 151 /* 152 * The file owning the queue got closed, give up both 153 * the reference that the files holds as well as the 154 * one from the macvlan_dev if that still exists. 155 * 156 * Using the spinlock makes sure that we don't get 157 * to the queue again after destroying it. 158 */ tap_put_queue(struct tap_queue * q)159 static void tap_put_queue(struct tap_queue *q) 160 { 161 struct tap_dev *tap; 162 163 rtnl_lock(); 164 tap = rtnl_dereference(q->tap); 165 166 if (tap) { 167 if (q->enabled) 168 BUG_ON(tap_disable_queue(q)); 169 170 tap->numqueues--; 171 RCU_INIT_POINTER(q->tap, NULL); 172 sock_put(&q->sk); 173 list_del_init(&q->next); 174 } 175 176 rtnl_unlock(); 177 178 synchronize_rcu(); 179 sock_put(&q->sk); 180 } 181 182 /* 183 * Select a queue based on the rxq of the device on which this packet 184 * arrived. If the incoming device is not mq, calculate a flow hash 185 * to select a queue. If all fails, find the first available queue. 186 * Cache vlan->numvtaps since it can become zero during the execution 187 * of this function. 188 */ tap_get_queue(struct tap_dev * tap,struct sk_buff * skb)189 static struct tap_queue *tap_get_queue(struct tap_dev *tap, 190 struct sk_buff *skb) 191 { 192 struct tap_queue *queue = NULL; 193 /* Access to taps array is protected by rcu, but access to numvtaps 194 * isn't. Below we use it to lookup a queue, but treat it as a hint 195 * and validate that the result isn't NULL - in case we are 196 * racing against queue removal. 197 */ 198 int numvtaps = READ_ONCE(tap->numvtaps); 199 __u32 rxq; 200 201 if (!numvtaps) 202 goto out; 203 204 if (numvtaps == 1) 205 goto single; 206 207 /* Check if we can use flow to select a queue */ 208 rxq = skb_get_hash(skb); 209 if (rxq) { 210 queue = rcu_dereference(tap->taps[rxq % numvtaps]); 211 goto out; 212 } 213 214 if (likely(skb_rx_queue_recorded(skb))) { 215 rxq = skb_get_rx_queue(skb); 216 217 while (unlikely(rxq >= numvtaps)) 218 rxq -= numvtaps; 219 220 queue = rcu_dereference(tap->taps[rxq]); 221 goto out; 222 } 223 224 single: 225 queue = rcu_dereference(tap->taps[0]); 226 out: 227 return queue; 228 } 229 230 /* 231 * The net_device is going away, give up the reference 232 * that it holds on all queues and safely set the pointer 233 * from the queues to NULL. 234 */ tap_del_queues(struct tap_dev * tap)235 void tap_del_queues(struct tap_dev *tap) 236 { 237 struct tap_queue *q, *tmp; 238 239 ASSERT_RTNL(); 240 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { 241 list_del_init(&q->next); 242 RCU_INIT_POINTER(q->tap, NULL); 243 if (q->enabled) 244 tap->numvtaps--; 245 tap->numqueues--; 246 sock_put(&q->sk); 247 } 248 BUG_ON(tap->numvtaps); 249 BUG_ON(tap->numqueues); 250 /* guarantee that any future tap_set_queue will fail */ 251 tap->numvtaps = MAX_TAP_QUEUES; 252 } 253 EXPORT_SYMBOL_GPL(tap_del_queues); 254 tap_handle_frame(struct sk_buff ** pskb)255 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) 256 { 257 struct sk_buff *skb = *pskb; 258 struct net_device *dev = skb->dev; 259 struct tap_dev *tap; 260 struct tap_queue *q; 261 netdev_features_t features = TAP_FEATURES; 262 enum skb_drop_reason drop_reason; 263 264 tap = tap_dev_get_rcu(dev); 265 if (!tap) 266 return RX_HANDLER_PASS; 267 268 q = tap_get_queue(tap, skb); 269 if (!q) 270 return RX_HANDLER_PASS; 271 272 skb_push(skb, ETH_HLEN); 273 274 /* Apply the forward feature mask so that we perform segmentation 275 * according to users wishes. This only works if VNET_HDR is 276 * enabled. 277 */ 278 if (q->flags & IFF_VNET_HDR) 279 features |= tap->tap_features; 280 if (netif_needs_gso(skb, features)) { 281 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 282 struct sk_buff *next; 283 284 if (IS_ERR(segs)) { 285 drop_reason = SKB_DROP_REASON_SKB_GSO_SEG; 286 goto drop; 287 } 288 289 if (!segs) { 290 if (ptr_ring_produce(&q->ring, skb)) { 291 drop_reason = SKB_DROP_REASON_FULL_RING; 292 goto drop; 293 } 294 goto wake_up; 295 } 296 297 consume_skb(skb); 298 skb_list_walk_safe(segs, skb, next) { 299 skb_mark_not_on_list(skb); 300 if (ptr_ring_produce(&q->ring, skb)) { 301 drop_reason = SKB_DROP_REASON_FULL_RING; 302 kfree_skb_reason(skb, drop_reason); 303 kfree_skb_list_reason(next, drop_reason); 304 break; 305 } 306 } 307 } else { 308 /* If we receive a partial checksum and the tap side 309 * doesn't support checksum offload, compute the checksum. 310 * Note: it doesn't matter which checksum feature to 311 * check, we either support them all or none. 312 */ 313 if (skb->ip_summed == CHECKSUM_PARTIAL && 314 !(features & NETIF_F_CSUM_MASK) && 315 skb_checksum_help(skb)) { 316 drop_reason = SKB_DROP_REASON_SKB_CSUM; 317 goto drop; 318 } 319 if (ptr_ring_produce(&q->ring, skb)) { 320 drop_reason = SKB_DROP_REASON_FULL_RING; 321 goto drop; 322 } 323 } 324 325 wake_up: 326 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); 327 return RX_HANDLER_CONSUMED; 328 329 drop: 330 /* Count errors/drops only here, thus don't care about args. */ 331 if (tap->count_rx_dropped) 332 tap->count_rx_dropped(tap); 333 kfree_skb_reason(skb, drop_reason); 334 return RX_HANDLER_CONSUMED; 335 } 336 EXPORT_SYMBOL_GPL(tap_handle_frame); 337 tap_get_major(int major)338 static struct major_info *tap_get_major(int major) 339 { 340 struct major_info *tap_major; 341 342 list_for_each_entry_rcu(tap_major, &major_list, next) { 343 if (tap_major->major == major) 344 return tap_major; 345 } 346 347 return NULL; 348 } 349 tap_get_minor(dev_t major,struct tap_dev * tap)350 int tap_get_minor(dev_t major, struct tap_dev *tap) 351 { 352 int retval = -ENOMEM; 353 struct major_info *tap_major; 354 355 rcu_read_lock(); 356 tap_major = tap_get_major(MAJOR(major)); 357 if (!tap_major) { 358 retval = -EINVAL; 359 goto unlock; 360 } 361 362 spin_lock(&tap_major->minor_lock); 363 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); 364 if (retval >= 0) { 365 tap->minor = retval; 366 } else if (retval == -ENOSPC) { 367 netdev_err(tap->dev, "Too many tap devices\n"); 368 retval = -EINVAL; 369 } 370 spin_unlock(&tap_major->minor_lock); 371 372 unlock: 373 rcu_read_unlock(); 374 return retval < 0 ? retval : 0; 375 } 376 EXPORT_SYMBOL_GPL(tap_get_minor); 377 tap_free_minor(dev_t major,struct tap_dev * tap)378 void tap_free_minor(dev_t major, struct tap_dev *tap) 379 { 380 struct major_info *tap_major; 381 382 rcu_read_lock(); 383 tap_major = tap_get_major(MAJOR(major)); 384 if (!tap_major) { 385 goto unlock; 386 } 387 388 spin_lock(&tap_major->minor_lock); 389 if (tap->minor) { 390 idr_remove(&tap_major->minor_idr, tap->minor); 391 tap->minor = 0; 392 } 393 spin_unlock(&tap_major->minor_lock); 394 395 unlock: 396 rcu_read_unlock(); 397 } 398 EXPORT_SYMBOL_GPL(tap_free_minor); 399 dev_get_by_tap_file(int major,int minor)400 static struct tap_dev *dev_get_by_tap_file(int major, int minor) 401 { 402 struct net_device *dev = NULL; 403 struct tap_dev *tap; 404 struct major_info *tap_major; 405 406 rcu_read_lock(); 407 tap_major = tap_get_major(major); 408 if (!tap_major) { 409 tap = NULL; 410 goto unlock; 411 } 412 413 spin_lock(&tap_major->minor_lock); 414 tap = idr_find(&tap_major->minor_idr, minor); 415 if (tap) { 416 dev = tap->dev; 417 dev_hold(dev); 418 } 419 spin_unlock(&tap_major->minor_lock); 420 421 unlock: 422 rcu_read_unlock(); 423 return tap; 424 } 425 tap_sock_write_space(struct sock * sk)426 static void tap_sock_write_space(struct sock *sk) 427 { 428 wait_queue_head_t *wqueue; 429 430 if (!sock_writeable(sk) || 431 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 432 return; 433 434 wqueue = sk_sleep(sk); 435 if (wqueue && waitqueue_active(wqueue)) 436 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 437 } 438 tap_sock_destruct(struct sock * sk)439 static void tap_sock_destruct(struct sock *sk) 440 { 441 struct tap_queue *q = container_of(sk, struct tap_queue, sk); 442 443 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); 444 } 445 tap_open(struct inode * inode,struct file * file)446 static int tap_open(struct inode *inode, struct file *file) 447 { 448 struct net *net = current->nsproxy->net_ns; 449 struct tap_dev *tap; 450 struct tap_queue *q; 451 int err = -ENODEV; 452 453 rtnl_lock(); 454 tap = dev_get_by_tap_file(imajor(inode), iminor(inode)); 455 if (!tap) 456 goto err; 457 458 err = -ENOMEM; 459 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 460 &tap_proto, 0); 461 if (!q) 462 goto err; 463 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { 464 sk_free(&q->sk); 465 goto err; 466 } 467 468 init_waitqueue_head(&q->sock.wq.wait); 469 q->sock.type = SOCK_RAW; 470 q->sock.state = SS_CONNECTED; 471 q->sock.file = file; 472 q->sock.ops = &tap_socket_ops; 473 sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); 474 q->sk.sk_write_space = tap_sock_write_space; 475 q->sk.sk_destruct = tap_sock_destruct; 476 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 477 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 478 479 /* 480 * so far only KVM virtio_net uses tap, enable zero copy between 481 * guest kernel and host kernel when lower device supports zerocopy 482 * 483 * The macvlan supports zerocopy iff the lower device supports zero 484 * copy so we don't have to look at the lower device directly. 485 */ 486 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) 487 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 488 489 err = tap_set_queue(tap, file, q); 490 if (err) { 491 /* tap_sock_destruct() will take care of freeing ptr_ring */ 492 goto err_put; 493 } 494 495 /* tap groks IOCB_NOWAIT just fine, mark it as such */ 496 file->f_mode |= FMODE_NOWAIT; 497 498 dev_put(tap->dev); 499 500 rtnl_unlock(); 501 return err; 502 503 err_put: 504 sock_put(&q->sk); 505 err: 506 if (tap) 507 dev_put(tap->dev); 508 509 rtnl_unlock(); 510 return err; 511 } 512 tap_release(struct inode * inode,struct file * file)513 static int tap_release(struct inode *inode, struct file *file) 514 { 515 struct tap_queue *q = file->private_data; 516 tap_put_queue(q); 517 return 0; 518 } 519 tap_poll(struct file * file,poll_table * wait)520 static __poll_t tap_poll(struct file *file, poll_table *wait) 521 { 522 struct tap_queue *q = file->private_data; 523 __poll_t mask = EPOLLERR; 524 525 if (!q) 526 goto out; 527 528 mask = 0; 529 poll_wait(file, &q->sock.wq.wait, wait); 530 531 if (!ptr_ring_empty(&q->ring)) 532 mask |= EPOLLIN | EPOLLRDNORM; 533 534 if (sock_writeable(&q->sk) || 535 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && 536 sock_writeable(&q->sk))) 537 mask |= EPOLLOUT | EPOLLWRNORM; 538 539 out: 540 return mask; 541 } 542 tap_alloc_skb(struct sock * sk,size_t prepad,size_t len,size_t linear,int noblock,int * err)543 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, 544 size_t len, size_t linear, 545 int noblock, int *err) 546 { 547 struct sk_buff *skb; 548 549 /* Under a page? Don't bother with paged skb. */ 550 if (prepad + len < PAGE_SIZE || !linear) 551 linear = len; 552 553 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 554 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 555 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 556 err, PAGE_ALLOC_COSTLY_ORDER); 557 if (!skb) 558 return NULL; 559 560 skb_reserve(skb, prepad); 561 skb_put(skb, linear); 562 skb->data_len = len - linear; 563 skb->len += len - linear; 564 565 return skb; 566 } 567 568 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ 569 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) 570 571 /* Get packet from user space buffer */ tap_get_user(struct tap_queue * q,void * msg_control,struct iov_iter * from,int noblock)572 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, 573 struct iov_iter *from, int noblock) 574 { 575 int good_linear = SKB_MAX_HEAD(TAP_RESERVE); 576 struct sk_buff *skb; 577 struct tap_dev *tap; 578 unsigned long total_len = iov_iter_count(from); 579 unsigned long len = total_len; 580 int err; 581 struct virtio_net_hdr vnet_hdr = { 0 }; 582 int vnet_hdr_len = 0; 583 int hdr_len = 0; 584 int copylen = 0; 585 int depth; 586 bool zerocopy = false; 587 size_t linear; 588 enum skb_drop_reason drop_reason; 589 590 if (q->flags & IFF_VNET_HDR) { 591 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 592 593 hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr); 594 if (hdr_len < 0) { 595 err = hdr_len; 596 goto err; 597 } 598 599 len -= vnet_hdr_len; 600 } 601 602 err = -EINVAL; 603 if (unlikely(len < ETH_HLEN)) 604 goto err; 605 606 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 607 struct iov_iter i; 608 609 copylen = clamp(hdr_len ?: GOODCOPY_LEN, ETH_HLEN, good_linear); 610 linear = copylen; 611 i = *from; 612 iov_iter_advance(&i, copylen); 613 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 614 zerocopy = true; 615 } 616 617 if (!zerocopy) { 618 copylen = len; 619 linear = clamp(hdr_len, ETH_HLEN, good_linear); 620 } 621 622 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, 623 linear, noblock, &err); 624 if (!skb) 625 goto err; 626 627 if (zerocopy) 628 err = zerocopy_sg_from_iter(skb, from); 629 else 630 err = skb_copy_datagram_from_iter(skb, 0, from, len); 631 632 if (err) { 633 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 634 goto err_kfree; 635 } 636 637 skb_set_network_header(skb, ETH_HLEN); 638 skb_reset_mac_header(skb); 639 skb->protocol = eth_hdr(skb)->h_proto; 640 641 rcu_read_lock(); 642 tap = rcu_dereference(q->tap); 643 if (!tap) { 644 kfree_skb(skb); 645 rcu_read_unlock(); 646 return total_len; 647 } 648 skb->dev = tap->dev; 649 650 if (vnet_hdr_len) { 651 err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr); 652 if (err) { 653 rcu_read_unlock(); 654 drop_reason = SKB_DROP_REASON_DEV_HDR; 655 goto err_kfree; 656 } 657 } 658 659 skb_probe_transport_header(skb); 660 661 /* Move network header to the right position for VLAN tagged packets */ 662 if (eth_type_vlan(skb->protocol) && 663 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) 664 skb_set_network_header(skb, depth); 665 666 /* copy skb_ubuf_info for callback when skb has no error */ 667 if (zerocopy) { 668 skb_zcopy_init(skb, msg_control); 669 } else if (msg_control) { 670 struct ubuf_info *uarg = msg_control; 671 uarg->ops->complete(NULL, uarg, false); 672 } 673 674 dev_queue_xmit(skb); 675 rcu_read_unlock(); 676 return total_len; 677 678 err_kfree: 679 kfree_skb_reason(skb, drop_reason); 680 681 err: 682 rcu_read_lock(); 683 tap = rcu_dereference(q->tap); 684 if (tap && tap->count_tx_dropped) 685 tap->count_tx_dropped(tap); 686 rcu_read_unlock(); 687 688 return err; 689 } 690 tap_write_iter(struct kiocb * iocb,struct iov_iter * from)691 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) 692 { 693 struct file *file = iocb->ki_filp; 694 struct tap_queue *q = file->private_data; 695 int noblock = 0; 696 697 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 698 noblock = 1; 699 700 return tap_get_user(q, NULL, from, noblock); 701 } 702 703 /* Put packet to the user space buffer */ tap_put_user(struct tap_queue * q,const struct sk_buff * skb,struct iov_iter * iter)704 static ssize_t tap_put_user(struct tap_queue *q, 705 const struct sk_buff *skb, 706 struct iov_iter *iter) 707 { 708 int ret; 709 int vnet_hdr_len = 0; 710 int vlan_offset = 0; 711 int total; 712 713 if (q->flags & IFF_VNET_HDR) { 714 struct virtio_net_hdr vnet_hdr; 715 716 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 717 718 ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr); 719 if (ret) 720 return ret; 721 722 ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr); 723 if (ret) 724 return ret; 725 } 726 total = vnet_hdr_len; 727 total += skb->len; 728 729 if (skb_vlan_tag_present(skb)) { 730 struct { 731 __be16 h_vlan_proto; 732 __be16 h_vlan_TCI; 733 } veth; 734 veth.h_vlan_proto = skb->vlan_proto; 735 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 736 737 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 738 total += VLAN_HLEN; 739 740 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 741 if (ret || !iov_iter_count(iter)) 742 goto done; 743 744 ret = copy_to_iter(&veth, sizeof(veth), iter); 745 if (ret != sizeof(veth) || !iov_iter_count(iter)) 746 goto done; 747 } 748 749 ret = skb_copy_datagram_iter(skb, vlan_offset, iter, 750 skb->len - vlan_offset); 751 752 done: 753 return ret ? ret : total; 754 } 755 tap_do_read(struct tap_queue * q,struct iov_iter * to,int noblock,struct sk_buff * skb)756 static ssize_t tap_do_read(struct tap_queue *q, 757 struct iov_iter *to, 758 int noblock, struct sk_buff *skb) 759 { 760 DEFINE_WAIT(wait); 761 ssize_t ret = 0; 762 763 if (!iov_iter_count(to)) { 764 kfree_skb(skb); 765 return 0; 766 } 767 768 if (skb) 769 goto put; 770 771 while (1) { 772 if (!noblock) 773 prepare_to_wait(sk_sleep(&q->sk), &wait, 774 TASK_INTERRUPTIBLE); 775 776 /* Read frames from the queue */ 777 skb = ptr_ring_consume(&q->ring); 778 if (skb) 779 break; 780 if (noblock) { 781 ret = -EAGAIN; 782 break; 783 } 784 if (signal_pending(current)) { 785 ret = -ERESTARTSYS; 786 break; 787 } 788 /* Nothing to read, let's sleep */ 789 schedule(); 790 } 791 if (!noblock) 792 finish_wait(sk_sleep(&q->sk), &wait); 793 794 put: 795 if (skb) { 796 ret = tap_put_user(q, skb, to); 797 if (unlikely(ret < 0)) 798 kfree_skb(skb); 799 else 800 consume_skb(skb); 801 } 802 return ret; 803 } 804 tap_read_iter(struct kiocb * iocb,struct iov_iter * to)805 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) 806 { 807 struct file *file = iocb->ki_filp; 808 struct tap_queue *q = file->private_data; 809 ssize_t len = iov_iter_count(to), ret; 810 int noblock = 0; 811 812 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 813 noblock = 1; 814 815 ret = tap_do_read(q, to, noblock, NULL); 816 ret = min_t(ssize_t, ret, len); 817 if (ret > 0) 818 iocb->ki_pos = ret; 819 return ret; 820 } 821 tap_get_tap_dev(struct tap_queue * q)822 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) 823 { 824 struct tap_dev *tap; 825 826 ASSERT_RTNL(); 827 tap = rtnl_dereference(q->tap); 828 if (tap) 829 dev_hold(tap->dev); 830 831 return tap; 832 } 833 tap_put_tap_dev(struct tap_dev * tap)834 static void tap_put_tap_dev(struct tap_dev *tap) 835 { 836 dev_put(tap->dev); 837 } 838 tap_ioctl_set_queue(struct file * file,unsigned int flags)839 static int tap_ioctl_set_queue(struct file *file, unsigned int flags) 840 { 841 struct tap_queue *q = file->private_data; 842 struct tap_dev *tap; 843 int ret; 844 845 tap = tap_get_tap_dev(q); 846 if (!tap) 847 return -EINVAL; 848 849 if (flags & IFF_ATTACH_QUEUE) 850 ret = tap_enable_queue(tap, file, q); 851 else if (flags & IFF_DETACH_QUEUE) 852 ret = tap_disable_queue(q); 853 else 854 ret = -EINVAL; 855 856 tap_put_tap_dev(tap); 857 return ret; 858 } 859 set_offload(struct tap_queue * q,unsigned long arg)860 static int set_offload(struct tap_queue *q, unsigned long arg) 861 { 862 struct tap_dev *tap; 863 netdev_features_t features; 864 netdev_features_t feature_mask = 0; 865 866 tap = rtnl_dereference(q->tap); 867 if (!tap) 868 return -ENOLINK; 869 870 features = tap->dev->features; 871 872 if (arg & TUN_F_CSUM) { 873 feature_mask = NETIF_F_HW_CSUM; 874 875 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 876 if (arg & TUN_F_TSO_ECN) 877 feature_mask |= NETIF_F_TSO_ECN; 878 if (arg & TUN_F_TSO4) 879 feature_mask |= NETIF_F_TSO; 880 if (arg & TUN_F_TSO6) 881 feature_mask |= NETIF_F_TSO6; 882 } 883 884 /* TODO: for now USO4 and USO6 should work simultaneously */ 885 if ((arg & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6)) 886 features |= NETIF_F_GSO_UDP_L4; 887 } 888 889 /* tun/tap driver inverts the usage for TSO offloads, where 890 * setting the TSO bit means that the userspace wants to 891 * accept TSO frames and turning it off means that user space 892 * does not support TSO. 893 * For tap, we have to invert it to mean the same thing. 894 * When user space turns off TSO, we turn off GSO/LRO so that 895 * user-space will not receive TSO frames. 896 */ 897 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6) || 898 (feature_mask & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6)) 899 features |= RX_OFFLOADS; 900 else 901 features &= ~RX_OFFLOADS; 902 903 /* tap_features are the same as features on tun/tap and 904 * reflect user expectations. 905 */ 906 tap->tap_features = feature_mask; 907 if (tap->update_features) 908 tap->update_features(tap, features); 909 910 return 0; 911 } 912 913 /* 914 * provide compatibility with generic tun/tap interface 915 */ tap_ioctl(struct file * file,unsigned int cmd,unsigned long arg)916 static long tap_ioctl(struct file *file, unsigned int cmd, 917 unsigned long arg) 918 { 919 struct tap_queue *q = file->private_data; 920 struct tap_dev *tap; 921 void __user *argp = (void __user *)arg; 922 struct ifreq __user *ifr = argp; 923 unsigned int __user *up = argp; 924 unsigned short u; 925 int __user *sp = argp; 926 struct sockaddr sa; 927 int s; 928 int ret; 929 930 switch (cmd) { 931 case TUNSETIFF: 932 /* ignore the name, just look at flags */ 933 if (get_user(u, &ifr->ifr_flags)) 934 return -EFAULT; 935 936 ret = 0; 937 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP)) 938 ret = -EINVAL; 939 else 940 q->flags = (q->flags & ~TAP_IFFEATURES) | u; 941 942 return ret; 943 944 case TUNGETIFF: 945 rtnl_lock(); 946 tap = tap_get_tap_dev(q); 947 if (!tap) { 948 rtnl_unlock(); 949 return -ENOLINK; 950 } 951 952 ret = 0; 953 u = q->flags; 954 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || 955 put_user(u, &ifr->ifr_flags)) 956 ret = -EFAULT; 957 tap_put_tap_dev(tap); 958 rtnl_unlock(); 959 return ret; 960 961 case TUNSETQUEUE: 962 if (get_user(u, &ifr->ifr_flags)) 963 return -EFAULT; 964 rtnl_lock(); 965 ret = tap_ioctl_set_queue(file, u); 966 rtnl_unlock(); 967 return ret; 968 969 case TUNGETFEATURES: 970 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up)) 971 return -EFAULT; 972 return 0; 973 974 case TUNSETSNDBUF: 975 if (get_user(s, sp)) 976 return -EFAULT; 977 if (s <= 0) 978 return -EINVAL; 979 980 q->sk.sk_sndbuf = s; 981 return 0; 982 983 case TUNSETOFFLOAD: 984 /* let the user check for future flags */ 985 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 986 TUN_F_TSO_ECN | TUN_F_UFO | 987 TUN_F_USO4 | TUN_F_USO6)) 988 return -EINVAL; 989 990 rtnl_lock(); 991 ret = set_offload(q, arg); 992 rtnl_unlock(); 993 return ret; 994 995 case SIOCGIFHWADDR: 996 rtnl_lock(); 997 tap = tap_get_tap_dev(q); 998 if (!tap) { 999 rtnl_unlock(); 1000 return -ENOLINK; 1001 } 1002 ret = 0; 1003 dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name); 1004 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || 1005 copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa))) 1006 ret = -EFAULT; 1007 tap_put_tap_dev(tap); 1008 rtnl_unlock(); 1009 return ret; 1010 1011 case SIOCSIFHWADDR: 1012 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) 1013 return -EFAULT; 1014 rtnl_lock(); 1015 tap = tap_get_tap_dev(q); 1016 if (!tap) { 1017 rtnl_unlock(); 1018 return -ENOLINK; 1019 } 1020 ret = dev_set_mac_address_user(tap->dev, &sa, NULL); 1021 tap_put_tap_dev(tap); 1022 rtnl_unlock(); 1023 return ret; 1024 1025 default: 1026 return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp); 1027 } 1028 } 1029 1030 static const struct file_operations tap_fops = { 1031 .owner = THIS_MODULE, 1032 .open = tap_open, 1033 .release = tap_release, 1034 .read_iter = tap_read_iter, 1035 .write_iter = tap_write_iter, 1036 .poll = tap_poll, 1037 .unlocked_ioctl = tap_ioctl, 1038 .compat_ioctl = compat_ptr_ioctl, 1039 }; 1040 tap_get_user_xdp(struct tap_queue * q,struct xdp_buff * xdp)1041 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) 1042 { 1043 struct tun_xdp_hdr *hdr = xdp->data_hard_start; 1044 struct virtio_net_hdr *gso = &hdr->gso; 1045 int buflen = hdr->buflen; 1046 int vnet_hdr_len = 0; 1047 struct tap_dev *tap; 1048 struct sk_buff *skb; 1049 int err, depth; 1050 1051 if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) { 1052 err = -EINVAL; 1053 goto err; 1054 } 1055 1056 if (q->flags & IFF_VNET_HDR) 1057 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 1058 1059 skb = build_skb(xdp->data_hard_start, buflen); 1060 if (!skb) { 1061 err = -ENOMEM; 1062 goto err; 1063 } 1064 1065 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1066 skb_put(skb, xdp->data_end - xdp->data); 1067 1068 skb_set_network_header(skb, ETH_HLEN); 1069 skb_reset_mac_header(skb); 1070 skb->protocol = eth_hdr(skb)->h_proto; 1071 1072 if (vnet_hdr_len) { 1073 err = tun_vnet_hdr_to_skb(q->flags, skb, gso); 1074 if (err) 1075 goto err_kfree; 1076 } 1077 1078 /* Move network header to the right position for VLAN tagged packets */ 1079 if (eth_type_vlan(skb->protocol) && 1080 vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) 1081 skb_set_network_header(skb, depth); 1082 1083 rcu_read_lock(); 1084 tap = rcu_dereference(q->tap); 1085 if (tap) { 1086 skb->dev = tap->dev; 1087 skb_probe_transport_header(skb); 1088 dev_queue_xmit(skb); 1089 } else { 1090 kfree_skb(skb); 1091 } 1092 rcu_read_unlock(); 1093 1094 return 0; 1095 1096 err_kfree: 1097 kfree_skb(skb); 1098 err: 1099 rcu_read_lock(); 1100 tap = rcu_dereference(q->tap); 1101 if (tap && tap->count_tx_dropped) 1102 tap->count_tx_dropped(tap); 1103 rcu_read_unlock(); 1104 return err; 1105 } 1106 tap_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)1107 static int tap_sendmsg(struct socket *sock, struct msghdr *m, 1108 size_t total_len) 1109 { 1110 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1111 struct tun_msg_ctl *ctl = m->msg_control; 1112 struct xdp_buff *xdp; 1113 int i; 1114 1115 if (m->msg_controllen == sizeof(struct tun_msg_ctl) && 1116 ctl && ctl->type == TUN_MSG_PTR) { 1117 for (i = 0; i < ctl->num; i++) { 1118 xdp = &((struct xdp_buff *)ctl->ptr)[i]; 1119 tap_get_user_xdp(q, xdp); 1120 } 1121 return 0; 1122 } 1123 1124 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, 1125 m->msg_flags & MSG_DONTWAIT); 1126 } 1127 tap_recvmsg(struct socket * sock,struct msghdr * m,size_t total_len,int flags)1128 static int tap_recvmsg(struct socket *sock, struct msghdr *m, 1129 size_t total_len, int flags) 1130 { 1131 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1132 struct sk_buff *skb = m->msg_control; 1133 int ret; 1134 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { 1135 kfree_skb(skb); 1136 return -EINVAL; 1137 } 1138 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); 1139 if (ret > total_len) { 1140 m->msg_flags |= MSG_TRUNC; 1141 ret = flags & MSG_TRUNC ? ret : total_len; 1142 } 1143 return ret; 1144 } 1145 tap_peek_len(struct socket * sock)1146 static int tap_peek_len(struct socket *sock) 1147 { 1148 struct tap_queue *q = container_of(sock, struct tap_queue, 1149 sock); 1150 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); 1151 } 1152 1153 /* Ops structure to mimic raw sockets with tun */ 1154 static const struct proto_ops tap_socket_ops = { 1155 .sendmsg = tap_sendmsg, 1156 .recvmsg = tap_recvmsg, 1157 .peek_len = tap_peek_len, 1158 }; 1159 1160 /* Get an underlying socket object from tun file. Returns error unless file is 1161 * attached to a device. The returned object works like a packet socket, it 1162 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1163 * holding a reference to the file for as long as the socket is in use. */ tap_get_socket(struct file * file)1164 struct socket *tap_get_socket(struct file *file) 1165 { 1166 struct tap_queue *q; 1167 if (file->f_op != &tap_fops) 1168 return ERR_PTR(-EINVAL); 1169 q = file->private_data; 1170 if (!q) 1171 return ERR_PTR(-EBADFD); 1172 return &q->sock; 1173 } 1174 EXPORT_SYMBOL_GPL(tap_get_socket); 1175 tap_get_ptr_ring(struct file * file)1176 struct ptr_ring *tap_get_ptr_ring(struct file *file) 1177 { 1178 struct tap_queue *q; 1179 1180 if (file->f_op != &tap_fops) 1181 return ERR_PTR(-EINVAL); 1182 q = file->private_data; 1183 if (!q) 1184 return ERR_PTR(-EBADFD); 1185 return &q->ring; 1186 } 1187 EXPORT_SYMBOL_GPL(tap_get_ptr_ring); 1188 tap_queue_resize(struct tap_dev * tap)1189 int tap_queue_resize(struct tap_dev *tap) 1190 { 1191 struct net_device *dev = tap->dev; 1192 struct tap_queue *q; 1193 struct ptr_ring **rings; 1194 int n = tap->numqueues; 1195 int ret, i = 0; 1196 1197 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 1198 if (!rings) 1199 return -ENOMEM; 1200 1201 list_for_each_entry(q, &tap->queue_list, next) 1202 rings[i++] = &q->ring; 1203 1204 ret = ptr_ring_resize_multiple_bh(rings, n, 1205 dev->tx_queue_len, GFP_KERNEL, 1206 __skb_array_destroy_skb); 1207 1208 kfree(rings); 1209 return ret; 1210 } 1211 EXPORT_SYMBOL_GPL(tap_queue_resize); 1212 tap_list_add(dev_t major,const char * device_name)1213 static int tap_list_add(dev_t major, const char *device_name) 1214 { 1215 struct major_info *tap_major; 1216 1217 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC); 1218 if (!tap_major) 1219 return -ENOMEM; 1220 1221 tap_major->major = MAJOR(major); 1222 1223 idr_init(&tap_major->minor_idr); 1224 spin_lock_init(&tap_major->minor_lock); 1225 1226 tap_major->device_name = device_name; 1227 1228 list_add_tail_rcu(&tap_major->next, &major_list); 1229 return 0; 1230 } 1231 tap_create_cdev(struct cdev * tap_cdev,dev_t * tap_major,const char * device_name,struct module * module)1232 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, 1233 const char *device_name, struct module *module) 1234 { 1235 int err; 1236 1237 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name); 1238 if (err) 1239 goto out1; 1240 1241 cdev_init(tap_cdev, &tap_fops); 1242 tap_cdev->owner = module; 1243 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); 1244 if (err) 1245 goto out2; 1246 1247 err = tap_list_add(*tap_major, device_name); 1248 if (err) 1249 goto out3; 1250 1251 return 0; 1252 1253 out3: 1254 cdev_del(tap_cdev); 1255 out2: 1256 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); 1257 out1: 1258 return err; 1259 } 1260 EXPORT_SYMBOL_GPL(tap_create_cdev); 1261 tap_destroy_cdev(dev_t major,struct cdev * tap_cdev)1262 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) 1263 { 1264 struct major_info *tap_major, *tmp; 1265 1266 cdev_del(tap_cdev); 1267 unregister_chrdev_region(major, TAP_NUM_DEVS); 1268 list_for_each_entry_safe(tap_major, tmp, &major_list, next) { 1269 if (tap_major->major == MAJOR(major)) { 1270 idr_destroy(&tap_major->minor_idr); 1271 list_del_rcu(&tap_major->next); 1272 kfree_rcu(tap_major, rcu); 1273 } 1274 } 1275 } 1276 EXPORT_SYMBOL_GPL(tap_destroy_cdev); 1277 1278 MODULE_DESCRIPTION("Common library for drivers implementing the TAP interface"); 1279 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1280 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); 1281 MODULE_LICENSE("GPL"); 1282