1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * TUN - Universal TUN/TAP device driver. 4 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 5 * 6 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 7 */ 8 9 /* 10 * Changes: 11 * 12 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 13 * Add TUNSETLINK ioctl to set the link encapsulation 14 * 15 * Mark Smith <markzzzsmith@yahoo.com.au> 16 * Use eth_random_addr() for tap MAC address. 17 * 18 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 19 * Fixes in packet dropping, queue length setting and queue wakeup. 20 * Increased default tx queue length. 21 * Added ethtool API. 22 * Minor cleanups 23 * 24 * Daniel Podlejski <underley@underley.eu.org> 25 * Modifications for 2.3.99-pre5 kernel. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #define DRV_NAME "tun" 31 #define DRV_VERSION "1.6" 32 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 33 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 34 35 #include <linux/module.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/sched/signal.h> 39 #include <linux/major.h> 40 #include <linux/slab.h> 41 #include <linux/poll.h> 42 #include <linux/fcntl.h> 43 #include <linux/init.h> 44 #include <linux/skbuff.h> 45 #include <linux/netdevice.h> 46 #include <linux/etherdevice.h> 47 #include <linux/miscdevice.h> 48 #include <linux/ethtool.h> 49 #include <linux/rtnetlink.h> 50 #include <linux/compat.h> 51 #include <linux/if.h> 52 #include <linux/if_arp.h> 53 #include <linux/if_ether.h> 54 #include <linux/if_tun.h> 55 #include <linux/if_vlan.h> 56 #include <linux/crc32.h> 57 #include <linux/math.h> 58 #include <linux/nsproxy.h> 59 #include <linux/virtio_net.h> 60 #include <linux/rcupdate.h> 61 #include <net/net_namespace.h> 62 #include <net/netns/generic.h> 63 #include <net/rtnetlink.h> 64 #include <net/sock.h> 65 #include <net/xdp.h> 66 #include <net/ip_tunnels.h> 67 #include <linux/seq_file.h> 68 #include <linux/uio.h> 69 #include <linux/skb_array.h> 70 #include <linux/bpf.h> 71 #include <linux/bpf_trace.h> 72 #include <linux/mutex.h> 73 #include <linux/ieee802154.h> 74 #include <uapi/linux/if_ltalk.h> 75 #include <uapi/linux/if_fddi.h> 76 #include <uapi/linux/if_hippi.h> 77 #include <uapi/linux/if_fc.h> 78 #include <net/ax25.h> 79 #include <net/rose.h> 80 #include <net/6lowpan.h> 81 #include <net/rps.h> 82 83 #include <linux/uaccess.h> 84 #include <linux/proc_fs.h> 85 86 #include "tun_vnet.h" 87 88 static void tun_default_link_ksettings(struct net_device *dev, 89 struct ethtool_link_ksettings *cmd); 90 91 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 92 93 /* TUN device flags */ 94 95 /* IFF_ATTACH_QUEUE is never stored in device flags, 96 * overload it to mean fasync when stored there. 97 */ 98 #define TUN_FASYNC IFF_ATTACH_QUEUE 99 100 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 101 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 102 103 #define GOODCOPY_LEN 128 104 105 #define FLT_EXACT_COUNT 8 106 struct tap_filter { 107 unsigned int count; /* Number of addrs. Zero means disabled */ 108 u32 mask[2]; /* Mask of the hashed addrs */ 109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 110 }; 111 112 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 113 * to max number of VCPUs in guest. */ 114 #define MAX_TAP_QUEUES 256 115 #define MAX_TAP_FLOWS 4096 116 117 #define TUN_FLOW_EXPIRE (3 * HZ) 118 119 /* A tun_file connects an open character device to a tuntap netdevice. It 120 * also contains all socket related structures (except sock_fprog and tap_filter) 121 * to serve as one transmit queue for tuntap device. The sock_fprog and 122 * tap_filter were kept in tun_struct since they were used for filtering for the 123 * netdevice not for a specific queue (at least I didn't see the requirement for 124 * this). 125 * 126 * RCU usage: 127 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 128 * other can only be read while rcu_read_lock or rtnl_lock is held. 129 */ 130 struct tun_file { 131 struct sock sk; 132 struct socket socket; 133 struct tun_struct __rcu *tun; 134 struct fasync_struct *fasync; 135 /* only used for fasnyc */ 136 unsigned int flags; 137 union { 138 u16 queue_index; 139 unsigned int ifindex; 140 }; 141 struct napi_struct napi; 142 bool napi_enabled; 143 bool napi_frags_enabled; 144 struct mutex napi_mutex; /* Protects access to the above napi */ 145 struct list_head next; 146 struct tun_struct *detached; 147 struct ptr_ring tx_ring; 148 struct xdp_rxq_info xdp_rxq; 149 }; 150 151 struct tun_page { 152 struct page *page; 153 int count; 154 }; 155 156 struct tun_flow_entry { 157 struct hlist_node hash_link; 158 struct rcu_head rcu; 159 struct tun_struct *tun; 160 161 u32 rxhash; 162 u32 rps_rxhash; 163 int queue_index; 164 unsigned long updated ____cacheline_aligned_in_smp; 165 }; 166 167 #define TUN_NUM_FLOW_ENTRIES 1024 168 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 169 170 struct tun_prog { 171 struct rcu_head rcu; 172 struct bpf_prog *prog; 173 }; 174 175 /* Since the socket were moved to tun_file, to preserve the behavior of persist 176 * device, socket filter, sndbuf and vnet header size were restore when the 177 * file were attached to a persist device. 178 */ 179 struct tun_struct { 180 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 181 unsigned int numqueues; 182 unsigned int flags; 183 kuid_t owner; 184 kgid_t group; 185 186 struct net_device *dev; 187 netdev_features_t set_features; 188 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 189 NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4 | \ 190 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) 191 192 int align; 193 int vnet_hdr_sz; 194 int sndbuf; 195 struct tap_filter txflt; 196 struct sock_fprog fprog; 197 /* protected by rtnl lock */ 198 bool filter_attached; 199 u32 msg_enable; 200 spinlock_t lock; 201 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 202 struct timer_list flow_gc_timer; 203 unsigned long ageing_time; 204 unsigned int numdisabled; 205 struct list_head disabled; 206 void *security; 207 u32 flow_count; 208 u32 rx_batched; 209 atomic_long_t rx_frame_errors; 210 struct bpf_prog __rcu *xdp_prog; 211 struct tun_prog __rcu *steering_prog; 212 struct tun_prog __rcu *filter_prog; 213 struct ethtool_link_ksettings link_ksettings; 214 /* init args */ 215 struct file *file; 216 struct ifreq *ifr; 217 }; 218 219 struct veth { 220 __be16 h_vlan_proto; 221 __be16 h_vlan_TCI; 222 }; 223 224 static void tun_flow_init(struct tun_struct *tun); 225 static void tun_flow_uninit(struct tun_struct *tun); 226 227 static int tun_napi_receive(struct napi_struct *napi, int budget) 228 { 229 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 230 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 231 struct sk_buff_head process_queue; 232 struct sk_buff *skb; 233 int received = 0; 234 235 __skb_queue_head_init(&process_queue); 236 237 spin_lock(&queue->lock); 238 skb_queue_splice_tail_init(queue, &process_queue); 239 spin_unlock(&queue->lock); 240 241 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 242 napi_gro_receive(napi, skb); 243 ++received; 244 } 245 246 if (!skb_queue_empty(&process_queue)) { 247 spin_lock(&queue->lock); 248 skb_queue_splice(&process_queue, queue); 249 spin_unlock(&queue->lock); 250 } 251 252 return received; 253 } 254 255 static int tun_napi_poll(struct napi_struct *napi, int budget) 256 { 257 unsigned int received; 258 259 received = tun_napi_receive(napi, budget); 260 261 if (received < budget) 262 napi_complete_done(napi, received); 263 264 return received; 265 } 266 267 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 268 bool napi_en, bool napi_frags) 269 { 270 tfile->napi_enabled = napi_en; 271 tfile->napi_frags_enabled = napi_en && napi_frags; 272 if (napi_en) { 273 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); 274 napi_enable(&tfile->napi); 275 } 276 } 277 278 static void tun_napi_enable(struct tun_file *tfile) 279 { 280 if (tfile->napi_enabled) 281 napi_enable(&tfile->napi); 282 } 283 284 static void tun_napi_disable(struct tun_file *tfile) 285 { 286 if (tfile->napi_enabled) 287 napi_disable(&tfile->napi); 288 } 289 290 static void tun_napi_del(struct tun_file *tfile) 291 { 292 if (tfile->napi_enabled) 293 netif_napi_del(&tfile->napi); 294 } 295 296 static bool tun_napi_frags_enabled(const struct tun_file *tfile) 297 { 298 return tfile->napi_frags_enabled; 299 } 300 301 static inline u32 tun_hashfn(u32 rxhash) 302 { 303 return rxhash & TUN_MASK_FLOW_ENTRIES; 304 } 305 306 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 307 { 308 struct tun_flow_entry *e; 309 310 hlist_for_each_entry_rcu(e, head, hash_link) { 311 if (e->rxhash == rxhash) 312 return e; 313 } 314 return NULL; 315 } 316 317 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 318 struct hlist_head *head, 319 u32 rxhash, u16 queue_index) 320 { 321 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 322 323 if (e) { 324 netif_info(tun, tx_queued, tun->dev, 325 "create flow: hash %u index %u\n", 326 rxhash, queue_index); 327 e->updated = jiffies; 328 e->rxhash = rxhash; 329 e->rps_rxhash = 0; 330 e->queue_index = queue_index; 331 e->tun = tun; 332 hlist_add_head_rcu(&e->hash_link, head); 333 ++tun->flow_count; 334 } 335 return e; 336 } 337 338 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 339 { 340 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", 341 e->rxhash, e->queue_index); 342 hlist_del_rcu(&e->hash_link); 343 kfree_rcu(e, rcu); 344 --tun->flow_count; 345 } 346 347 static void tun_flow_flush(struct tun_struct *tun) 348 { 349 int i; 350 351 spin_lock_bh(&tun->lock); 352 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 353 struct tun_flow_entry *e; 354 struct hlist_node *n; 355 356 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 357 tun_flow_delete(tun, e); 358 } 359 spin_unlock_bh(&tun->lock); 360 } 361 362 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 363 { 364 int i; 365 366 spin_lock_bh(&tun->lock); 367 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 368 struct tun_flow_entry *e; 369 struct hlist_node *n; 370 371 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 372 if (e->queue_index == queue_index) 373 tun_flow_delete(tun, e); 374 } 375 } 376 spin_unlock_bh(&tun->lock); 377 } 378 379 static void tun_flow_cleanup(struct timer_list *t) 380 { 381 struct tun_struct *tun = timer_container_of(tun, t, flow_gc_timer); 382 unsigned long delay = tun->ageing_time; 383 unsigned long next_timer = jiffies + delay; 384 unsigned long count = 0; 385 int i; 386 387 spin_lock(&tun->lock); 388 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 389 struct tun_flow_entry *e; 390 struct hlist_node *n; 391 392 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 393 unsigned long this_timer; 394 395 this_timer = e->updated + delay; 396 if (time_before_eq(this_timer, jiffies)) { 397 tun_flow_delete(tun, e); 398 continue; 399 } 400 count++; 401 if (time_before(this_timer, next_timer)) 402 next_timer = this_timer; 403 } 404 } 405 406 if (count) 407 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 408 spin_unlock(&tun->lock); 409 } 410 411 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 412 struct tun_file *tfile) 413 { 414 struct hlist_head *head; 415 struct tun_flow_entry *e; 416 unsigned long delay = tun->ageing_time; 417 u16 queue_index = tfile->queue_index; 418 419 head = &tun->flows[tun_hashfn(rxhash)]; 420 421 rcu_read_lock(); 422 423 e = tun_flow_find(head, rxhash); 424 if (likely(e)) { 425 /* TODO: keep queueing to old queue until it's empty? */ 426 if (READ_ONCE(e->queue_index) != queue_index) 427 WRITE_ONCE(e->queue_index, queue_index); 428 if (e->updated != jiffies) 429 e->updated = jiffies; 430 sock_rps_record_flow_hash(e->rps_rxhash); 431 } else { 432 spin_lock_bh(&tun->lock); 433 if (!tun_flow_find(head, rxhash) && 434 tun->flow_count < MAX_TAP_FLOWS) 435 tun_flow_create(tun, head, rxhash, queue_index); 436 437 if (!timer_pending(&tun->flow_gc_timer)) 438 mod_timer(&tun->flow_gc_timer, 439 round_jiffies_up(jiffies + delay)); 440 spin_unlock_bh(&tun->lock); 441 } 442 443 rcu_read_unlock(); 444 } 445 446 /* Save the hash received in the stack receive path and update the 447 * flow_hash table accordingly. 448 */ 449 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 450 { 451 if (unlikely(e->rps_rxhash != hash)) 452 e->rps_rxhash = hash; 453 } 454 455 /* We try to identify a flow through its rxhash. The reason that 456 * we do not check rxq no. is because some cards(e.g 82599), chooses 457 * the rxq based on the txq where the last packet of the flow comes. As 458 * the userspace application move between processors, we may get a 459 * different rxq no. here. 460 */ 461 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 462 { 463 struct tun_flow_entry *e; 464 u32 txq, numqueues; 465 466 numqueues = READ_ONCE(tun->numqueues); 467 468 txq = __skb_get_hash_symmetric(skb); 469 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 470 if (e) { 471 tun_flow_save_rps_rxhash(e, txq); 472 txq = e->queue_index; 473 } else { 474 txq = reciprocal_scale(txq, numqueues); 475 } 476 477 return txq; 478 } 479 480 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 481 { 482 struct tun_prog *prog; 483 u32 numqueues; 484 u16 ret = 0; 485 486 numqueues = READ_ONCE(tun->numqueues); 487 if (!numqueues) 488 return 0; 489 490 prog = rcu_dereference(tun->steering_prog); 491 if (prog) 492 ret = bpf_prog_run_clear_cb(prog->prog, skb); 493 494 return ret % numqueues; 495 } 496 497 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 498 struct net_device *sb_dev) 499 { 500 struct tun_struct *tun = netdev_priv(dev); 501 u16 ret; 502 503 rcu_read_lock(); 504 if (rcu_dereference(tun->steering_prog)) 505 ret = tun_ebpf_select_queue(tun, skb); 506 else 507 ret = tun_automq_select_queue(tun, skb); 508 rcu_read_unlock(); 509 510 return ret; 511 } 512 513 static inline bool tun_not_capable(struct tun_struct *tun) 514 { 515 const struct cred *cred = current_cred(); 516 struct net *net = dev_net(tun->dev); 517 518 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 519 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 520 !ns_capable(net->user_ns, CAP_NET_ADMIN); 521 } 522 523 static void tun_set_real_num_queues(struct tun_struct *tun) 524 { 525 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 526 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 527 } 528 529 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 530 { 531 tfile->detached = tun; 532 list_add_tail(&tfile->next, &tun->disabled); 533 ++tun->numdisabled; 534 } 535 536 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 537 { 538 struct tun_struct *tun = tfile->detached; 539 540 tfile->detached = NULL; 541 list_del_init(&tfile->next); 542 --tun->numdisabled; 543 return tun; 544 } 545 546 void tun_ptr_free(void *ptr) 547 { 548 if (!ptr) 549 return; 550 if (tun_is_xdp_frame(ptr)) { 551 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 552 553 xdp_return_frame(xdpf); 554 } else { 555 __skb_array_destroy_skb(ptr); 556 } 557 } 558 EXPORT_SYMBOL_GPL(tun_ptr_free); 559 560 static void tun_queue_purge(struct tun_file *tfile) 561 { 562 void *ptr; 563 564 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 565 tun_ptr_free(ptr); 566 567 skb_queue_purge(&tfile->sk.sk_write_queue); 568 skb_queue_purge(&tfile->sk.sk_error_queue); 569 } 570 571 static void __tun_detach(struct tun_file *tfile, bool clean) 572 { 573 struct tun_file *ntfile; 574 struct tun_struct *tun; 575 576 tun = rtnl_dereference(tfile->tun); 577 578 if (tun && clean) { 579 if (!tfile->detached) 580 tun_napi_disable(tfile); 581 tun_napi_del(tfile); 582 } 583 584 if (tun && !tfile->detached) { 585 u16 index = tfile->queue_index; 586 BUG_ON(index >= tun->numqueues); 587 588 rcu_assign_pointer(tun->tfiles[index], 589 tun->tfiles[tun->numqueues - 1]); 590 ntfile = rtnl_dereference(tun->tfiles[index]); 591 ntfile->queue_index = index; 592 ntfile->xdp_rxq.queue_index = index; 593 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], 594 NULL); 595 596 --tun->numqueues; 597 if (clean) { 598 RCU_INIT_POINTER(tfile->tun, NULL); 599 sock_put(&tfile->sk); 600 } else { 601 tun_disable_queue(tun, tfile); 602 tun_napi_disable(tfile); 603 } 604 605 synchronize_net(); 606 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 607 /* Drop read queue */ 608 tun_queue_purge(tfile); 609 tun_set_real_num_queues(tun); 610 } else if (tfile->detached && clean) { 611 tun = tun_enable_queue(tfile); 612 sock_put(&tfile->sk); 613 } 614 615 if (clean) { 616 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 617 netif_carrier_off(tun->dev); 618 619 if (!(tun->flags & IFF_PERSIST) && 620 tun->dev->reg_state == NETREG_REGISTERED) 621 unregister_netdevice(tun->dev); 622 } 623 if (tun) 624 xdp_rxq_info_unreg(&tfile->xdp_rxq); 625 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 626 } 627 } 628 629 static void tun_detach(struct tun_file *tfile, bool clean) 630 { 631 struct tun_struct *tun; 632 struct net_device *dev; 633 634 rtnl_lock(); 635 tun = rtnl_dereference(tfile->tun); 636 dev = tun ? tun->dev : NULL; 637 __tun_detach(tfile, clean); 638 if (dev) 639 netdev_state_change(dev); 640 rtnl_unlock(); 641 642 if (clean) 643 sock_put(&tfile->sk); 644 } 645 646 static void tun_detach_all(struct net_device *dev) 647 { 648 struct tun_struct *tun = netdev_priv(dev); 649 struct tun_file *tfile, *tmp; 650 int i, n = tun->numqueues; 651 652 for (i = 0; i < n; i++) { 653 tfile = rtnl_dereference(tun->tfiles[i]); 654 BUG_ON(!tfile); 655 tun_napi_disable(tfile); 656 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 657 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 658 RCU_INIT_POINTER(tfile->tun, NULL); 659 --tun->numqueues; 660 } 661 list_for_each_entry(tfile, &tun->disabled, next) { 662 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 663 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 664 RCU_INIT_POINTER(tfile->tun, NULL); 665 } 666 BUG_ON(tun->numqueues != 0); 667 668 synchronize_net(); 669 for (i = 0; i < n; i++) { 670 tfile = rtnl_dereference(tun->tfiles[i]); 671 tun_napi_del(tfile); 672 /* Drop read queue */ 673 tun_queue_purge(tfile); 674 xdp_rxq_info_unreg(&tfile->xdp_rxq); 675 sock_put(&tfile->sk); 676 } 677 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 678 tun_napi_del(tfile); 679 tun_enable_queue(tfile); 680 tun_queue_purge(tfile); 681 xdp_rxq_info_unreg(&tfile->xdp_rxq); 682 sock_put(&tfile->sk); 683 } 684 BUG_ON(tun->numdisabled != 0); 685 686 if (tun->flags & IFF_PERSIST) 687 module_put(THIS_MODULE); 688 } 689 690 static int tun_attach(struct tun_struct *tun, struct file *file, 691 bool skip_filter, bool napi, bool napi_frags, 692 bool publish_tun) 693 { 694 struct tun_file *tfile = file->private_data; 695 struct net_device *dev = tun->dev; 696 int err; 697 698 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 699 if (err < 0) 700 goto out; 701 702 err = -EINVAL; 703 if (rtnl_dereference(tfile->tun) && !tfile->detached) 704 goto out; 705 706 err = -EBUSY; 707 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 708 goto out; 709 710 err = -E2BIG; 711 if (!tfile->detached && 712 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 713 goto out; 714 715 err = 0; 716 717 /* Re-attach the filter to persist device */ 718 if (!skip_filter && (tun->filter_attached == true)) { 719 lock_sock(tfile->socket.sk); 720 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 721 release_sock(tfile->socket.sk); 722 if (!err) 723 goto out; 724 } 725 726 if (!tfile->detached && 727 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 728 GFP_KERNEL, tun_ptr_free)) { 729 err = -ENOMEM; 730 goto out; 731 } 732 733 tfile->queue_index = tun->numqueues; 734 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 735 736 if (tfile->detached) { 737 /* Re-attach detached tfile, updating XDP queue_index */ 738 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 739 740 if (tfile->xdp_rxq.queue_index != tfile->queue_index) 741 tfile->xdp_rxq.queue_index = tfile->queue_index; 742 } else { 743 /* Setup XDP RX-queue info, for new tfile getting attached */ 744 err = xdp_rxq_info_reg(&tfile->xdp_rxq, 745 tun->dev, tfile->queue_index, 0); 746 if (err < 0) 747 goto out; 748 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 749 MEM_TYPE_PAGE_SHARED, NULL); 750 if (err < 0) { 751 xdp_rxq_info_unreg(&tfile->xdp_rxq); 752 goto out; 753 } 754 err = 0; 755 } 756 757 if (tfile->detached) { 758 tun_enable_queue(tfile); 759 tun_napi_enable(tfile); 760 } else { 761 sock_hold(&tfile->sk); 762 tun_napi_init(tun, tfile, napi, napi_frags); 763 } 764 765 if (rtnl_dereference(tun->xdp_prog)) 766 sock_set_flag(&tfile->sk, SOCK_XDP); 767 768 /* device is allowed to go away first, so no need to hold extra 769 * refcnt. 770 */ 771 772 /* Publish tfile->tun and tun->tfiles only after we've fully 773 * initialized tfile; otherwise we risk using half-initialized 774 * object. 775 */ 776 if (publish_tun) 777 rcu_assign_pointer(tfile->tun, tun); 778 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 779 tun->numqueues++; 780 tun_set_real_num_queues(tun); 781 out: 782 return err; 783 } 784 785 static struct tun_struct *tun_get(struct tun_file *tfile) 786 { 787 struct tun_struct *tun; 788 789 rcu_read_lock(); 790 tun = rcu_dereference(tfile->tun); 791 if (tun) 792 dev_hold(tun->dev); 793 rcu_read_unlock(); 794 795 return tun; 796 } 797 798 static void tun_put(struct tun_struct *tun) 799 { 800 dev_put(tun->dev); 801 } 802 803 /* TAP filtering */ 804 static void addr_hash_set(u32 *mask, const u8 *addr) 805 { 806 int n = ether_crc(ETH_ALEN, addr) >> 26; 807 mask[n >> 5] |= (1 << (n & 31)); 808 } 809 810 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 811 { 812 int n = ether_crc(ETH_ALEN, addr) >> 26; 813 return mask[n >> 5] & (1 << (n & 31)); 814 } 815 816 static int update_filter(struct tap_filter *filter, void __user *arg) 817 { 818 struct { u8 u[ETH_ALEN]; } *addr; 819 struct tun_filter uf; 820 int err, alen, n, nexact; 821 822 if (copy_from_user(&uf, arg, sizeof(uf))) 823 return -EFAULT; 824 825 if (!uf.count) { 826 /* Disabled */ 827 filter->count = 0; 828 return 0; 829 } 830 831 alen = ETH_ALEN * uf.count; 832 addr = memdup_user(arg + sizeof(uf), alen); 833 if (IS_ERR(addr)) 834 return PTR_ERR(addr); 835 836 /* The filter is updated without holding any locks. Which is 837 * perfectly safe. We disable it first and in the worst 838 * case we'll accept a few undesired packets. */ 839 filter->count = 0; 840 wmb(); 841 842 /* Use first set of addresses as an exact filter */ 843 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 844 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 845 846 nexact = n; 847 848 /* Remaining multicast addresses are hashed, 849 * unicast will leave the filter disabled. */ 850 memset(filter->mask, 0, sizeof(filter->mask)); 851 for (; n < uf.count; n++) { 852 if (!is_multicast_ether_addr(addr[n].u)) { 853 err = 0; /* no filter */ 854 goto free_addr; 855 } 856 addr_hash_set(filter->mask, addr[n].u); 857 } 858 859 /* For ALLMULTI just set the mask to all ones. 860 * This overrides the mask populated above. */ 861 if ((uf.flags & TUN_FLT_ALLMULTI)) 862 memset(filter->mask, ~0, sizeof(filter->mask)); 863 864 /* Now enable the filter */ 865 wmb(); 866 filter->count = nexact; 867 868 /* Return the number of exact filters */ 869 err = nexact; 870 free_addr: 871 kfree(addr); 872 return err; 873 } 874 875 /* Returns: 0 - drop, !=0 - accept */ 876 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 877 { 878 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 879 * at this point. */ 880 struct ethhdr *eh = (struct ethhdr *) skb->data; 881 int i; 882 883 /* Exact match */ 884 for (i = 0; i < filter->count; i++) 885 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 886 return 1; 887 888 /* Inexact match (multicast only) */ 889 if (is_multicast_ether_addr(eh->h_dest)) 890 return addr_hash_test(filter->mask, eh->h_dest); 891 892 return 0; 893 } 894 895 /* 896 * Checks whether the packet is accepted or not. 897 * Returns: 0 - drop, !=0 - accept 898 */ 899 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 900 { 901 if (!filter->count) 902 return 1; 903 904 return run_filter(filter, skb); 905 } 906 907 /* Network device part of the driver */ 908 909 static const struct ethtool_ops tun_ethtool_ops; 910 911 static int tun_net_init(struct net_device *dev) 912 { 913 struct tun_struct *tun = netdev_priv(dev); 914 struct ifreq *ifr = tun->ifr; 915 int err; 916 917 spin_lock_init(&tun->lock); 918 919 err = security_tun_dev_alloc_security(&tun->security); 920 if (err < 0) 921 return err; 922 923 tun_flow_init(tun); 924 925 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; 926 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 927 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 928 NETIF_F_HW_VLAN_STAG_TX; 929 dev->hw_enc_features = dev->hw_features; 930 dev->features = dev->hw_features; 931 dev->vlan_features = dev->features & 932 ~(NETIF_F_HW_VLAN_CTAG_TX | 933 NETIF_F_HW_VLAN_STAG_TX); 934 dev->lltx = true; 935 936 tun->flags = (tun->flags & ~TUN_FEATURES) | 937 (ifr->ifr_flags & TUN_FEATURES); 938 939 INIT_LIST_HEAD(&tun->disabled); 940 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, 941 ifr->ifr_flags & IFF_NAPI_FRAGS, false); 942 if (err < 0) { 943 tun_flow_uninit(tun); 944 security_tun_dev_free_security(tun->security); 945 return err; 946 } 947 return 0; 948 } 949 950 /* Net device detach from fd. */ 951 static void tun_net_uninit(struct net_device *dev) 952 { 953 tun_detach_all(dev); 954 } 955 956 /* Net device open. */ 957 static int tun_net_open(struct net_device *dev) 958 { 959 netif_tx_start_all_queues(dev); 960 961 return 0; 962 } 963 964 /* Net device close. */ 965 static int tun_net_close(struct net_device *dev) 966 { 967 netif_tx_stop_all_queues(dev); 968 return 0; 969 } 970 971 /* Net device start xmit */ 972 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 973 { 974 #ifdef CONFIG_RPS 975 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 976 /* Select queue was not called for the skbuff, so we extract the 977 * RPS hash and save it into the flow_table here. 978 */ 979 struct tun_flow_entry *e; 980 __u32 rxhash; 981 982 rxhash = __skb_get_hash_symmetric(skb); 983 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 984 if (e) 985 tun_flow_save_rps_rxhash(e, rxhash); 986 } 987 #endif 988 } 989 990 static unsigned int run_ebpf_filter(struct tun_struct *tun, 991 struct sk_buff *skb, 992 int len) 993 { 994 struct tun_prog *prog = rcu_dereference(tun->filter_prog); 995 996 if (prog) 997 len = bpf_prog_run_clear_cb(prog->prog, skb); 998 999 return len; 1000 } 1001 1002 /* Net device start xmit */ 1003 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 1004 { 1005 struct tun_struct *tun = netdev_priv(dev); 1006 enum skb_drop_reason drop_reason; 1007 int txq = skb->queue_mapping; 1008 struct netdev_queue *queue; 1009 struct tun_file *tfile; 1010 int len = skb->len; 1011 1012 rcu_read_lock(); 1013 tfile = rcu_dereference(tun->tfiles[txq]); 1014 1015 /* Drop packet if interface is not attached */ 1016 if (!tfile) { 1017 drop_reason = SKB_DROP_REASON_DEV_READY; 1018 goto drop; 1019 } 1020 1021 if (!rcu_dereference(tun->steering_prog)) 1022 tun_automq_xmit(tun, skb); 1023 1024 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); 1025 1026 /* Drop if the filter does not like it. 1027 * This is a noop if the filter is disabled. 1028 * Filter can be enabled only for the TAP devices. */ 1029 if (!check_filter(&tun->txflt, skb)) { 1030 drop_reason = SKB_DROP_REASON_TAP_TXFILTER; 1031 goto drop; 1032 } 1033 1034 if (tfile->socket.sk->sk_filter && 1035 sk_filter(tfile->socket.sk, skb)) { 1036 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 1037 goto drop; 1038 } 1039 1040 len = run_ebpf_filter(tun, skb, len); 1041 if (len == 0) { 1042 drop_reason = SKB_DROP_REASON_TAP_FILTER; 1043 goto drop; 1044 } 1045 1046 if (pskb_trim(skb, len)) { 1047 drop_reason = SKB_DROP_REASON_NOMEM; 1048 goto drop; 1049 } 1050 1051 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { 1052 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 1053 goto drop; 1054 } 1055 1056 skb_tx_timestamp(skb); 1057 1058 /* Orphan the skb - required as we might hang on to it 1059 * for indefinite time. 1060 */ 1061 skb_orphan(skb); 1062 1063 nf_reset_ct(skb); 1064 1065 if (ptr_ring_produce(&tfile->tx_ring, skb)) { 1066 drop_reason = SKB_DROP_REASON_FULL_RING; 1067 goto drop; 1068 } 1069 1070 /* dev->lltx requires to do our own update of trans_start */ 1071 queue = netdev_get_tx_queue(dev, txq); 1072 txq_trans_cond_update(queue); 1073 1074 /* Notify and wake up reader process */ 1075 if (tfile->flags & TUN_FASYNC) 1076 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1077 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1078 1079 rcu_read_unlock(); 1080 return NETDEV_TX_OK; 1081 1082 drop: 1083 dev_core_stats_tx_dropped_inc(dev); 1084 skb_tx_error(skb); 1085 kfree_skb_reason(skb, drop_reason); 1086 rcu_read_unlock(); 1087 return NET_XMIT_DROP; 1088 } 1089 1090 static void tun_net_mclist(struct net_device *dev) 1091 { 1092 /* 1093 * This callback is supposed to deal with mc filter in 1094 * _rx_ path and has nothing to do with the _tx_ path. 1095 * In rx path we always accept everything userspace gives us. 1096 */ 1097 } 1098 1099 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1100 netdev_features_t features) 1101 { 1102 struct tun_struct *tun = netdev_priv(dev); 1103 1104 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1105 } 1106 1107 static void tun_set_headroom(struct net_device *dev, int new_hr) 1108 { 1109 struct tun_struct *tun = netdev_priv(dev); 1110 1111 if (new_hr < NET_SKB_PAD) 1112 new_hr = NET_SKB_PAD; 1113 1114 tun->align = new_hr; 1115 } 1116 1117 static void 1118 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1119 { 1120 struct tun_struct *tun = netdev_priv(dev); 1121 1122 dev_get_tstats64(dev, stats); 1123 1124 stats->rx_frame_errors += 1125 (unsigned long)atomic_long_read(&tun->rx_frame_errors); 1126 } 1127 1128 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1129 struct netlink_ext_ack *extack) 1130 { 1131 struct tun_struct *tun = netdev_priv(dev); 1132 struct tun_file *tfile; 1133 struct bpf_prog *old_prog; 1134 int i; 1135 1136 old_prog = rtnl_dereference(tun->xdp_prog); 1137 rcu_assign_pointer(tun->xdp_prog, prog); 1138 if (old_prog) 1139 bpf_prog_put(old_prog); 1140 1141 for (i = 0; i < tun->numqueues; i++) { 1142 tfile = rtnl_dereference(tun->tfiles[i]); 1143 if (prog) 1144 sock_set_flag(&tfile->sk, SOCK_XDP); 1145 else 1146 sock_reset_flag(&tfile->sk, SOCK_XDP); 1147 } 1148 list_for_each_entry(tfile, &tun->disabled, next) { 1149 if (prog) 1150 sock_set_flag(&tfile->sk, SOCK_XDP); 1151 else 1152 sock_reset_flag(&tfile->sk, SOCK_XDP); 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1159 { 1160 switch (xdp->command) { 1161 case XDP_SETUP_PROG: 1162 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1163 default: 1164 return -EINVAL; 1165 } 1166 } 1167 1168 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 1169 { 1170 if (new_carrier) { 1171 struct tun_struct *tun = netdev_priv(dev); 1172 1173 if (!tun->numqueues) 1174 return -EPERM; 1175 1176 netif_carrier_on(dev); 1177 } else { 1178 netif_carrier_off(dev); 1179 } 1180 return 0; 1181 } 1182 1183 static const struct net_device_ops tun_netdev_ops = { 1184 .ndo_init = tun_net_init, 1185 .ndo_uninit = tun_net_uninit, 1186 .ndo_open = tun_net_open, 1187 .ndo_stop = tun_net_close, 1188 .ndo_start_xmit = tun_net_xmit, 1189 .ndo_fix_features = tun_net_fix_features, 1190 .ndo_select_queue = tun_select_queue, 1191 .ndo_set_rx_headroom = tun_set_headroom, 1192 .ndo_get_stats64 = tun_net_get_stats64, 1193 .ndo_change_carrier = tun_net_change_carrier, 1194 }; 1195 1196 static void __tun_xdp_flush_tfile(struct tun_file *tfile) 1197 { 1198 /* Notify and wake up reader process */ 1199 if (tfile->flags & TUN_FASYNC) 1200 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1201 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1202 } 1203 1204 static int tun_xdp_xmit(struct net_device *dev, int n, 1205 struct xdp_frame **frames, u32 flags) 1206 { 1207 struct tun_struct *tun = netdev_priv(dev); 1208 struct tun_file *tfile; 1209 u32 numqueues; 1210 int nxmit = 0; 1211 int i; 1212 1213 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1214 return -EINVAL; 1215 1216 rcu_read_lock(); 1217 1218 resample: 1219 numqueues = READ_ONCE(tun->numqueues); 1220 if (!numqueues) { 1221 rcu_read_unlock(); 1222 return -ENXIO; /* Caller will free/return all frames */ 1223 } 1224 1225 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1226 numqueues]); 1227 if (unlikely(!tfile)) 1228 goto resample; 1229 1230 spin_lock(&tfile->tx_ring.producer_lock); 1231 for (i = 0; i < n; i++) { 1232 struct xdp_frame *xdp = frames[i]; 1233 /* Encode the XDP flag into lowest bit for consumer to differ 1234 * XDP buffer from sk_buff. 1235 */ 1236 void *frame = tun_xdp_to_ptr(xdp); 1237 1238 if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1239 dev_core_stats_tx_dropped_inc(dev); 1240 break; 1241 } 1242 nxmit++; 1243 } 1244 spin_unlock(&tfile->tx_ring.producer_lock); 1245 1246 if (flags & XDP_XMIT_FLUSH) 1247 __tun_xdp_flush_tfile(tfile); 1248 1249 rcu_read_unlock(); 1250 return nxmit; 1251 } 1252 1253 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 1254 { 1255 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); 1256 int nxmit; 1257 1258 if (unlikely(!frame)) 1259 return -EOVERFLOW; 1260 1261 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1262 if (!nxmit) 1263 xdp_return_frame_rx_napi(frame); 1264 return nxmit; 1265 } 1266 1267 static const struct net_device_ops tap_netdev_ops = { 1268 .ndo_init = tun_net_init, 1269 .ndo_uninit = tun_net_uninit, 1270 .ndo_open = tun_net_open, 1271 .ndo_stop = tun_net_close, 1272 .ndo_start_xmit = tun_net_xmit, 1273 .ndo_fix_features = tun_net_fix_features, 1274 .ndo_set_rx_mode = tun_net_mclist, 1275 .ndo_set_mac_address = eth_mac_addr, 1276 .ndo_validate_addr = eth_validate_addr, 1277 .ndo_select_queue = tun_select_queue, 1278 .ndo_features_check = passthru_features_check, 1279 .ndo_set_rx_headroom = tun_set_headroom, 1280 .ndo_bpf = tun_xdp, 1281 .ndo_xdp_xmit = tun_xdp_xmit, 1282 .ndo_change_carrier = tun_net_change_carrier, 1283 }; 1284 1285 static void tun_flow_init(struct tun_struct *tun) 1286 { 1287 int i; 1288 1289 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1290 INIT_HLIST_HEAD(&tun->flows[i]); 1291 1292 tun->ageing_time = TUN_FLOW_EXPIRE; 1293 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1294 mod_timer(&tun->flow_gc_timer, 1295 round_jiffies_up(jiffies + tun->ageing_time)); 1296 } 1297 1298 static void tun_flow_uninit(struct tun_struct *tun) 1299 { 1300 timer_delete_sync(&tun->flow_gc_timer); 1301 tun_flow_flush(tun); 1302 } 1303 1304 #define MIN_MTU 68 1305 #define MAX_MTU 65535 1306 1307 /* Initialize net device. */ 1308 static void tun_net_initialize(struct net_device *dev) 1309 { 1310 struct tun_struct *tun = netdev_priv(dev); 1311 1312 switch (tun->flags & TUN_TYPE_MASK) { 1313 case IFF_TUN: 1314 dev->netdev_ops = &tun_netdev_ops; 1315 dev->header_ops = &ip_tunnel_header_ops; 1316 1317 /* Point-to-Point TUN Device */ 1318 dev->hard_header_len = 0; 1319 dev->addr_len = 0; 1320 dev->mtu = 1500; 1321 1322 /* Zero header length */ 1323 dev->type = ARPHRD_NONE; 1324 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1325 break; 1326 1327 case IFF_TAP: 1328 dev->netdev_ops = &tap_netdev_ops; 1329 /* Ethernet TAP Device */ 1330 ether_setup(dev); 1331 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1332 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1333 1334 eth_hw_addr_random(dev); 1335 1336 /* Currently tun does not support XDP, only tap does. */ 1337 dev->xdp_features = NETDEV_XDP_ACT_BASIC | 1338 NETDEV_XDP_ACT_REDIRECT | 1339 NETDEV_XDP_ACT_NDO_XMIT; 1340 1341 break; 1342 } 1343 1344 dev->min_mtu = MIN_MTU; 1345 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1346 } 1347 1348 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 1349 { 1350 struct sock *sk = tfile->socket.sk; 1351 1352 return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 1353 } 1354 1355 /* Character device part */ 1356 1357 /* Poll */ 1358 static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 1359 { 1360 struct tun_file *tfile = file->private_data; 1361 struct tun_struct *tun = tun_get(tfile); 1362 struct sock *sk; 1363 __poll_t mask = 0; 1364 1365 if (!tun) 1366 return EPOLLERR; 1367 1368 sk = tfile->socket.sk; 1369 1370 poll_wait(file, sk_sleep(sk), wait); 1371 1372 if (!ptr_ring_empty(&tfile->tx_ring)) 1373 mask |= EPOLLIN | EPOLLRDNORM; 1374 1375 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 1376 * guarantee EPOLLOUT to be raised by either here or 1377 * tun_sock_write_space(). Then process could get notification 1378 * after it writes to a down device and meets -EIO. 1379 */ 1380 if (tun_sock_writeable(tun, tfile) || 1381 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1382 tun_sock_writeable(tun, tfile))) 1383 mask |= EPOLLOUT | EPOLLWRNORM; 1384 1385 if (tun->dev->reg_state != NETREG_REGISTERED) 1386 mask = EPOLLERR; 1387 1388 tun_put(tun); 1389 return mask; 1390 } 1391 1392 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1393 size_t len, 1394 const struct iov_iter *it) 1395 { 1396 struct sk_buff *skb; 1397 size_t linear; 1398 int err; 1399 int i; 1400 1401 if (it->nr_segs > MAX_SKB_FRAGS + 1 || 1402 len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) 1403 return ERR_PTR(-EMSGSIZE); 1404 1405 local_bh_disable(); 1406 skb = napi_get_frags(&tfile->napi); 1407 local_bh_enable(); 1408 if (!skb) 1409 return ERR_PTR(-ENOMEM); 1410 1411 linear = iov_iter_single_seg_count(it); 1412 err = __skb_grow(skb, linear); 1413 if (err) 1414 goto free; 1415 1416 skb->len = len; 1417 skb->data_len = len - linear; 1418 skb->truesize += skb->data_len; 1419 1420 for (i = 1; i < it->nr_segs; i++) { 1421 const struct iovec *iov = iter_iov(it) + i; 1422 size_t fragsz = iov->iov_len; 1423 struct page *page; 1424 void *frag; 1425 1426 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1427 err = -EINVAL; 1428 goto free; 1429 } 1430 frag = netdev_alloc_frag(fragsz); 1431 if (!frag) { 1432 err = -ENOMEM; 1433 goto free; 1434 } 1435 page = virt_to_head_page(frag); 1436 skb_fill_page_desc(skb, i - 1, page, 1437 frag - page_address(page), fragsz); 1438 } 1439 1440 return skb; 1441 free: 1442 /* frees skb and all frags allocated with napi_alloc_frag() */ 1443 napi_free_frags(&tfile->napi); 1444 return ERR_PTR(err); 1445 } 1446 1447 /* prepad is the amount to reserve at front. len is length after that. 1448 * linear is a hint as to how much to copy (usually headers). */ 1449 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1450 size_t prepad, size_t len, 1451 size_t linear, int noblock) 1452 { 1453 struct sock *sk = tfile->socket.sk; 1454 struct sk_buff *skb; 1455 int err; 1456 1457 /* Under a page? Don't bother with paged skb. */ 1458 if (prepad + len < PAGE_SIZE) 1459 linear = len; 1460 1461 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 1462 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 1463 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1464 &err, PAGE_ALLOC_COSTLY_ORDER); 1465 if (!skb) 1466 return ERR_PTR(err); 1467 1468 skb_reserve(skb, prepad); 1469 skb_put(skb, linear); 1470 skb->data_len = len - linear; 1471 skb->len += len - linear; 1472 1473 return skb; 1474 } 1475 1476 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1477 struct sk_buff *skb, int more) 1478 { 1479 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1480 struct sk_buff_head process_queue; 1481 u32 rx_batched = tun->rx_batched; 1482 bool rcv = false; 1483 1484 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1485 local_bh_disable(); 1486 skb_record_rx_queue(skb, tfile->queue_index); 1487 netif_receive_skb(skb); 1488 local_bh_enable(); 1489 return; 1490 } 1491 1492 spin_lock(&queue->lock); 1493 if (!more || skb_queue_len(queue) == rx_batched) { 1494 __skb_queue_head_init(&process_queue); 1495 skb_queue_splice_tail_init(queue, &process_queue); 1496 rcv = true; 1497 } else { 1498 __skb_queue_tail(queue, skb); 1499 } 1500 spin_unlock(&queue->lock); 1501 1502 if (rcv) { 1503 struct sk_buff *nskb; 1504 1505 local_bh_disable(); 1506 while ((nskb = __skb_dequeue(&process_queue))) { 1507 skb_record_rx_queue(nskb, tfile->queue_index); 1508 netif_receive_skb(nskb); 1509 } 1510 skb_record_rx_queue(skb, tfile->queue_index); 1511 netif_receive_skb(skb); 1512 local_bh_enable(); 1513 } 1514 } 1515 1516 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1517 int len, int noblock, bool zerocopy) 1518 { 1519 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1520 return false; 1521 1522 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1523 return false; 1524 1525 if (!noblock) 1526 return false; 1527 1528 if (zerocopy) 1529 return false; 1530 1531 if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + 1532 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1533 return false; 1534 1535 return true; 1536 } 1537 1538 static struct sk_buff *__tun_build_skb(struct tun_file *tfile, 1539 struct page_frag *alloc_frag, char *buf, 1540 int buflen, int len, int pad, 1541 int metasize) 1542 { 1543 struct sk_buff *skb = build_skb(buf, buflen); 1544 1545 if (!skb) 1546 return ERR_PTR(-ENOMEM); 1547 1548 skb_reserve(skb, pad); 1549 skb_put(skb, len); 1550 if (metasize) 1551 skb_metadata_set(skb, metasize); 1552 skb_set_owner_w(skb, tfile->socket.sk); 1553 1554 get_page(alloc_frag->page); 1555 alloc_frag->offset += buflen; 1556 1557 return skb; 1558 } 1559 1560 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 1561 struct xdp_buff *xdp, u32 act) 1562 { 1563 int err; 1564 1565 switch (act) { 1566 case XDP_REDIRECT: 1567 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 1568 if (err) { 1569 dev_core_stats_rx_dropped_inc(tun->dev); 1570 return err; 1571 } 1572 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1573 break; 1574 case XDP_TX: 1575 err = tun_xdp_tx(tun->dev, xdp); 1576 if (err < 0) { 1577 dev_core_stats_rx_dropped_inc(tun->dev); 1578 return err; 1579 } 1580 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1581 break; 1582 case XDP_PASS: 1583 break; 1584 default: 1585 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); 1586 fallthrough; 1587 case XDP_ABORTED: 1588 trace_xdp_exception(tun->dev, xdp_prog, act); 1589 fallthrough; 1590 case XDP_DROP: 1591 dev_core_stats_rx_dropped_inc(tun->dev); 1592 break; 1593 } 1594 1595 return act; 1596 } 1597 1598 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1599 struct tun_file *tfile, 1600 struct iov_iter *from, 1601 struct virtio_net_hdr *hdr, 1602 int len, int *skb_xdp) 1603 { 1604 struct page_frag *alloc_frag = ¤t->task_frag; 1605 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 1606 struct bpf_prog *xdp_prog; 1607 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1608 char *buf; 1609 size_t copied; 1610 int pad = TUN_RX_PAD; 1611 int metasize = 0; 1612 int err = 0; 1613 1614 rcu_read_lock(); 1615 xdp_prog = rcu_dereference(tun->xdp_prog); 1616 if (xdp_prog) 1617 pad += XDP_PACKET_HEADROOM; 1618 buflen += SKB_DATA_ALIGN(len + pad); 1619 rcu_read_unlock(); 1620 1621 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1622 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1623 return ERR_PTR(-ENOMEM); 1624 1625 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1626 copied = copy_page_from_iter(alloc_frag->page, 1627 alloc_frag->offset + pad, 1628 len, from); 1629 if (copied != len) 1630 return ERR_PTR(-EFAULT); 1631 1632 /* There's a small window that XDP may be set after the check 1633 * of xdp_prog above, this should be rare and for simplicity 1634 * we do XDP on skb in case the headroom is not enough. 1635 */ 1636 if (hdr->gso_type || !xdp_prog) { 1637 *skb_xdp = 1; 1638 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, 1639 pad, metasize); 1640 } 1641 1642 *skb_xdp = 0; 1643 1644 local_bh_disable(); 1645 rcu_read_lock(); 1646 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 1647 xdp_prog = rcu_dereference(tun->xdp_prog); 1648 if (xdp_prog) { 1649 struct xdp_buff xdp; 1650 u32 act; 1651 1652 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); 1653 xdp_prepare_buff(&xdp, buf, pad, len, true); 1654 1655 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1656 if (act == XDP_REDIRECT || act == XDP_TX) { 1657 get_page(alloc_frag->page); 1658 alloc_frag->offset += buflen; 1659 } 1660 err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1661 if (err < 0) { 1662 if (act == XDP_REDIRECT || act == XDP_TX) 1663 put_page(alloc_frag->page); 1664 goto out; 1665 } 1666 1667 if (err == XDP_REDIRECT) 1668 xdp_do_flush(); 1669 if (err != XDP_PASS) 1670 goto out; 1671 1672 pad = xdp.data - xdp.data_hard_start; 1673 len = xdp.data_end - xdp.data; 1674 1675 /* It is known that the xdp_buff was prepared with metadata 1676 * support, so the metasize will never be negative. 1677 */ 1678 metasize = xdp.data - xdp.data_meta; 1679 } 1680 bpf_net_ctx_clear(bpf_net_ctx); 1681 rcu_read_unlock(); 1682 local_bh_enable(); 1683 1684 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, 1685 metasize); 1686 1687 out: 1688 bpf_net_ctx_clear(bpf_net_ctx); 1689 rcu_read_unlock(); 1690 local_bh_enable(); 1691 return NULL; 1692 } 1693 1694 /* Get packet from user space buffer */ 1695 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1696 void *msg_control, struct iov_iter *from, 1697 int noblock, bool more) 1698 { 1699 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1700 struct sk_buff *skb; 1701 size_t total_len = iov_iter_count(from); 1702 size_t len = total_len, align = tun->align, linear; 1703 struct virtio_net_hdr_v1_hash_tunnel hdr; 1704 struct virtio_net_hdr *gso; 1705 int good_linear; 1706 int copylen; 1707 int hdr_len = 0; 1708 bool zerocopy = false; 1709 int err; 1710 u32 rxhash = 0; 1711 int skb_xdp = 1; 1712 bool frags = tun_napi_frags_enabled(tfile); 1713 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 1714 netdev_features_t features = 0; 1715 1716 /* 1717 * Keep it easy and always zero the whole buffer, even if the 1718 * tunnel-related field will be touched only when the feature 1719 * is enabled and the hdr size id compatible. 1720 */ 1721 memset(&hdr, 0, sizeof(hdr)); 1722 gso = (struct virtio_net_hdr *)&hdr; 1723 1724 if (!(tun->flags & IFF_NO_PI)) { 1725 if (len < sizeof(pi)) 1726 return -EINVAL; 1727 len -= sizeof(pi); 1728 1729 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1730 return -EFAULT; 1731 } 1732 1733 if (tun->flags & IFF_VNET_HDR) { 1734 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1735 1736 features = tun_vnet_hdr_guest_features(vnet_hdr_sz); 1737 hdr_len = __tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, 1738 features, from, gso); 1739 if (hdr_len < 0) 1740 return hdr_len; 1741 1742 len -= vnet_hdr_sz; 1743 } 1744 1745 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1746 align += NET_IP_ALIGN; 1747 if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN))) 1748 return -EINVAL; 1749 } 1750 1751 good_linear = SKB_MAX_HEAD(align); 1752 1753 if (msg_control) { 1754 struct iov_iter i = *from; 1755 1756 /* There are 256 bytes to be copied in skb, so there is 1757 * enough room for skb expand head in case it is used. 1758 * The rest of the buffer is mapped from userspace. 1759 */ 1760 copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear); 1761 linear = copylen; 1762 iov_iter_advance(&i, copylen); 1763 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1764 zerocopy = true; 1765 } 1766 1767 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1768 /* For the packet that is not easy to be processed 1769 * (e.g gso or jumbo packet), we will do it at after 1770 * skb was created with generic XDP routine. 1771 */ 1772 skb = tun_build_skb(tun, tfile, from, gso, len, &skb_xdp); 1773 err = PTR_ERR_OR_ZERO(skb); 1774 if (err) 1775 goto drop; 1776 if (!skb) 1777 return total_len; 1778 } else { 1779 if (!zerocopy) { 1780 copylen = len; 1781 linear = min(hdr_len, good_linear); 1782 } 1783 1784 if (frags) { 1785 mutex_lock(&tfile->napi_mutex); 1786 skb = tun_napi_alloc_frags(tfile, copylen, from); 1787 /* tun_napi_alloc_frags() enforces a layout for the skb. 1788 * If zerocopy is enabled, then this layout will be 1789 * overwritten by zerocopy_sg_from_iter(). 1790 */ 1791 zerocopy = false; 1792 } else { 1793 if (!linear) 1794 linear = min_t(size_t, good_linear, copylen); 1795 1796 skb = tun_alloc_skb(tfile, align, copylen, linear, 1797 noblock); 1798 } 1799 1800 err = PTR_ERR_OR_ZERO(skb); 1801 if (err) 1802 goto drop; 1803 1804 if (zerocopy) 1805 err = zerocopy_sg_from_iter(skb, from); 1806 else 1807 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1808 1809 if (err) { 1810 err = -EFAULT; 1811 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 1812 goto drop; 1813 } 1814 } 1815 1816 if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, &hdr)) { 1817 atomic_long_inc(&tun->rx_frame_errors); 1818 err = -EINVAL; 1819 goto free_skb; 1820 } 1821 1822 switch (tun->flags & TUN_TYPE_MASK) { 1823 case IFF_TUN: 1824 if (tun->flags & IFF_NO_PI) { 1825 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 1826 1827 switch (ip_version) { 1828 case 4: 1829 pi.proto = htons(ETH_P_IP); 1830 break; 1831 case 6: 1832 pi.proto = htons(ETH_P_IPV6); 1833 break; 1834 default: 1835 err = -EINVAL; 1836 goto drop; 1837 } 1838 } 1839 1840 skb_reset_mac_header(skb); 1841 skb->protocol = pi.proto; 1842 skb->dev = tun->dev; 1843 break; 1844 case IFF_TAP: 1845 if (frags && !pskb_may_pull(skb, ETH_HLEN)) { 1846 err = -ENOMEM; 1847 drop_reason = SKB_DROP_REASON_HDR_TRUNC; 1848 goto drop; 1849 } 1850 skb->protocol = eth_type_trans(skb, tun->dev); 1851 break; 1852 } 1853 1854 /* copy skb_ubuf_info for callback when skb has no error */ 1855 if (zerocopy) { 1856 skb_zcopy_init(skb, msg_control); 1857 } else if (msg_control) { 1858 struct ubuf_info *uarg = msg_control; 1859 uarg->ops->complete(NULL, uarg, false); 1860 } 1861 1862 skb_reset_network_header(skb); 1863 skb_probe_transport_header(skb); 1864 skb_record_rx_queue(skb, tfile->queue_index); 1865 1866 if (skb_xdp) { 1867 struct bpf_prog *xdp_prog; 1868 int ret; 1869 1870 local_bh_disable(); 1871 rcu_read_lock(); 1872 xdp_prog = rcu_dereference(tun->xdp_prog); 1873 if (xdp_prog) { 1874 ret = do_xdp_generic(xdp_prog, &skb); 1875 if (ret != XDP_PASS) { 1876 rcu_read_unlock(); 1877 local_bh_enable(); 1878 goto unlock_frags; 1879 } 1880 } 1881 rcu_read_unlock(); 1882 local_bh_enable(); 1883 } 1884 1885 /* Compute the costly rx hash only if needed for flow updates. 1886 * We may get a very small possibility of OOO during switching, not 1887 * worth to optimize. 1888 */ 1889 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1890 !tfile->detached) 1891 rxhash = __skb_get_hash_symmetric(skb); 1892 1893 rcu_read_lock(); 1894 if (unlikely(!(tun->dev->flags & IFF_UP))) { 1895 err = -EIO; 1896 rcu_read_unlock(); 1897 drop_reason = SKB_DROP_REASON_DEV_READY; 1898 goto drop; 1899 } 1900 1901 if (frags) { 1902 u32 headlen; 1903 1904 /* Exercise flow dissector code path. */ 1905 skb_push(skb, ETH_HLEN); 1906 headlen = eth_get_headlen(tun->dev, skb->data, 1907 skb_headlen(skb)); 1908 1909 if (unlikely(headlen > skb_headlen(skb))) { 1910 WARN_ON_ONCE(1); 1911 err = -ENOMEM; 1912 dev_core_stats_rx_dropped_inc(tun->dev); 1913 napi_busy: 1914 napi_free_frags(&tfile->napi); 1915 rcu_read_unlock(); 1916 mutex_unlock(&tfile->napi_mutex); 1917 return err; 1918 } 1919 1920 if (likely(napi_schedule_prep(&tfile->napi))) { 1921 local_bh_disable(); 1922 napi_gro_frags(&tfile->napi); 1923 napi_complete(&tfile->napi); 1924 local_bh_enable(); 1925 } else { 1926 err = -EBUSY; 1927 goto napi_busy; 1928 } 1929 mutex_unlock(&tfile->napi_mutex); 1930 } else if (tfile->napi_enabled) { 1931 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1932 int queue_len; 1933 1934 spin_lock_bh(&queue->lock); 1935 1936 if (unlikely(tfile->detached)) { 1937 spin_unlock_bh(&queue->lock); 1938 rcu_read_unlock(); 1939 err = -EBUSY; 1940 goto free_skb; 1941 } 1942 1943 __skb_queue_tail(queue, skb); 1944 queue_len = skb_queue_len(queue); 1945 spin_unlock(&queue->lock); 1946 1947 if (!more || queue_len > NAPI_POLL_WEIGHT) 1948 napi_schedule(&tfile->napi); 1949 1950 local_bh_enable(); 1951 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1952 tun_rx_batched(tun, tfile, skb, more); 1953 } else { 1954 netif_rx(skb); 1955 } 1956 rcu_read_unlock(); 1957 1958 preempt_disable(); 1959 dev_sw_netstats_rx_add(tun->dev, len); 1960 preempt_enable(); 1961 1962 if (rxhash) 1963 tun_flow_update(tun, rxhash, tfile); 1964 1965 return total_len; 1966 1967 drop: 1968 if (err != -EAGAIN) 1969 dev_core_stats_rx_dropped_inc(tun->dev); 1970 1971 free_skb: 1972 if (!IS_ERR_OR_NULL(skb)) 1973 kfree_skb_reason(skb, drop_reason); 1974 1975 unlock_frags: 1976 if (frags) { 1977 tfile->napi.skb = NULL; 1978 mutex_unlock(&tfile->napi_mutex); 1979 } 1980 1981 return err ?: total_len; 1982 } 1983 1984 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 1985 { 1986 struct file *file = iocb->ki_filp; 1987 struct tun_file *tfile = file->private_data; 1988 struct tun_struct *tun = tun_get(tfile); 1989 ssize_t result; 1990 int noblock = 0; 1991 1992 if (!tun) 1993 return -EBADFD; 1994 1995 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 1996 noblock = 1; 1997 1998 result = tun_get_user(tun, tfile, NULL, from, noblock, false); 1999 2000 tun_put(tun); 2001 return result; 2002 } 2003 2004 static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2005 struct tun_file *tfile, 2006 struct xdp_frame *xdp_frame, 2007 struct iov_iter *iter) 2008 { 2009 int vnet_hdr_sz = 0; 2010 size_t size = xdp_frame->len; 2011 ssize_t ret; 2012 2013 if (tun->flags & IFF_VNET_HDR) { 2014 struct virtio_net_hdr gso = { 0 }; 2015 2016 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2017 ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); 2018 if (ret) 2019 return ret; 2020 } 2021 2022 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2023 2024 preempt_disable(); 2025 dev_sw_netstats_tx_add(tun->dev, 1, ret); 2026 preempt_enable(); 2027 2028 return ret; 2029 } 2030 2031 /* Put packet to the user space buffer */ 2032 static ssize_t tun_put_user(struct tun_struct *tun, 2033 struct tun_file *tfile, 2034 struct sk_buff *skb, 2035 struct iov_iter *iter) 2036 { 2037 struct tun_pi pi = { 0, skb->protocol }; 2038 ssize_t total; 2039 int vlan_offset = 0; 2040 int vlan_hlen = 0; 2041 int vnet_hdr_sz = 0; 2042 int ret; 2043 2044 if (skb_vlan_tag_present(skb)) 2045 vlan_hlen = VLAN_HLEN; 2046 2047 if (tun->flags & IFF_VNET_HDR) 2048 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2049 2050 total = skb->len + vlan_hlen + vnet_hdr_sz; 2051 2052 if (!(tun->flags & IFF_NO_PI)) { 2053 if (iov_iter_count(iter) < sizeof(pi)) 2054 return -EINVAL; 2055 2056 total += sizeof(pi); 2057 if (iov_iter_count(iter) < total) { 2058 /* Packet will be striped */ 2059 pi.flags |= TUN_PKT_STRIP; 2060 } 2061 2062 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 2063 return -EFAULT; 2064 } 2065 2066 if (vnet_hdr_sz) { 2067 struct virtio_net_hdr_v1_hash_tunnel hdr; 2068 struct virtio_net_hdr *gso; 2069 2070 ret = tun_vnet_hdr_tnl_from_skb(tun->flags, tun->dev, skb, 2071 &hdr); 2072 if (ret) 2073 return ret; 2074 2075 /* 2076 * Drop the packet if the configured header size is too small 2077 * WRT the enabled offloads. 2078 */ 2079 gso = (struct virtio_net_hdr *)&hdr; 2080 ret = __tun_vnet_hdr_put(vnet_hdr_sz, tun->dev->features, 2081 iter, gso); 2082 if (ret) 2083 return ret; 2084 } 2085 2086 if (vlan_hlen) { 2087 int ret; 2088 struct veth veth; 2089 2090 veth.h_vlan_proto = skb->vlan_proto; 2091 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 2092 2093 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 2094 2095 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2096 if (ret || !iov_iter_count(iter)) 2097 goto done; 2098 2099 ret = copy_to_iter(&veth, sizeof(veth), iter); 2100 if (ret != sizeof(veth) || !iov_iter_count(iter)) 2101 goto done; 2102 } 2103 2104 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 2105 2106 done: 2107 /* caller is in process context, */ 2108 preempt_disable(); 2109 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); 2110 preempt_enable(); 2111 2112 return total; 2113 } 2114 2115 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 2116 { 2117 DECLARE_WAITQUEUE(wait, current); 2118 void *ptr = NULL; 2119 int error = 0; 2120 2121 ptr = ptr_ring_consume(&tfile->tx_ring); 2122 if (ptr) 2123 goto out; 2124 if (noblock) { 2125 error = -EAGAIN; 2126 goto out; 2127 } 2128 2129 add_wait_queue(&tfile->socket.wq.wait, &wait); 2130 2131 while (1) { 2132 set_current_state(TASK_INTERRUPTIBLE); 2133 ptr = ptr_ring_consume(&tfile->tx_ring); 2134 if (ptr) 2135 break; 2136 if (signal_pending(current)) { 2137 error = -ERESTARTSYS; 2138 break; 2139 } 2140 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2141 error = -EFAULT; 2142 break; 2143 } 2144 2145 schedule(); 2146 } 2147 2148 __set_current_state(TASK_RUNNING); 2149 remove_wait_queue(&tfile->socket.wq.wait, &wait); 2150 2151 out: 2152 *err = error; 2153 return ptr; 2154 } 2155 2156 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 2157 struct iov_iter *to, 2158 int noblock, void *ptr) 2159 { 2160 ssize_t ret; 2161 int err; 2162 2163 if (!iov_iter_count(to)) { 2164 tun_ptr_free(ptr); 2165 return 0; 2166 } 2167 2168 if (!ptr) { 2169 /* Read frames from ring */ 2170 ptr = tun_ring_recv(tfile, noblock, &err); 2171 if (!ptr) 2172 return err; 2173 } 2174 2175 if (tun_is_xdp_frame(ptr)) { 2176 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2177 2178 ret = tun_put_user_xdp(tun, tfile, xdpf, to); 2179 xdp_return_frame(xdpf); 2180 } else { 2181 struct sk_buff *skb = ptr; 2182 2183 ret = tun_put_user(tun, tfile, skb, to); 2184 if (unlikely(ret < 0)) 2185 kfree_skb(skb); 2186 else 2187 consume_skb(skb); 2188 } 2189 2190 return ret; 2191 } 2192 2193 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 2194 { 2195 struct file *file = iocb->ki_filp; 2196 struct tun_file *tfile = file->private_data; 2197 struct tun_struct *tun = tun_get(tfile); 2198 ssize_t len = iov_iter_count(to), ret; 2199 int noblock = 0; 2200 2201 if (!tun) 2202 return -EBADFD; 2203 2204 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 2205 noblock = 1; 2206 2207 ret = tun_do_read(tun, tfile, to, noblock, NULL); 2208 ret = min_t(ssize_t, ret, len); 2209 if (ret > 0) 2210 iocb->ki_pos = ret; 2211 tun_put(tun); 2212 return ret; 2213 } 2214 2215 static void tun_prog_free(struct rcu_head *rcu) 2216 { 2217 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 2218 2219 bpf_prog_destroy(prog->prog); 2220 kfree(prog); 2221 } 2222 2223 static int __tun_set_ebpf(struct tun_struct *tun, 2224 struct tun_prog __rcu **prog_p, 2225 struct bpf_prog *prog) 2226 { 2227 struct tun_prog *old, *new = NULL; 2228 2229 if (prog) { 2230 new = kmalloc(sizeof(*new), GFP_KERNEL); 2231 if (!new) 2232 return -ENOMEM; 2233 new->prog = prog; 2234 } 2235 2236 spin_lock_bh(&tun->lock); 2237 old = rcu_dereference_protected(*prog_p, 2238 lockdep_is_held(&tun->lock)); 2239 rcu_assign_pointer(*prog_p, new); 2240 spin_unlock_bh(&tun->lock); 2241 2242 if (old) 2243 call_rcu(&old->rcu, tun_prog_free); 2244 2245 return 0; 2246 } 2247 2248 static void tun_free_netdev(struct net_device *dev) 2249 { 2250 struct tun_struct *tun = netdev_priv(dev); 2251 2252 BUG_ON(!(list_empty(&tun->disabled))); 2253 2254 tun_flow_uninit(tun); 2255 security_tun_dev_free_security(tun->security); 2256 __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2257 __tun_set_ebpf(tun, &tun->filter_prog, NULL); 2258 } 2259 2260 static void tun_setup(struct net_device *dev) 2261 { 2262 struct tun_struct *tun = netdev_priv(dev); 2263 2264 tun->owner = INVALID_UID; 2265 tun->group = INVALID_GID; 2266 tun_default_link_ksettings(dev, &tun->link_ksettings); 2267 2268 dev->ethtool_ops = &tun_ethtool_ops; 2269 dev->needs_free_netdev = true; 2270 dev->priv_destructor = tun_free_netdev; 2271 /* We prefer our own queue length */ 2272 dev->tx_queue_len = TUN_READQ_SIZE; 2273 } 2274 2275 /* Trivial set of netlink ops to allow deleting tun or tap 2276 * device with netlink. 2277 */ 2278 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2279 struct netlink_ext_ack *extack) 2280 { 2281 NL_SET_ERR_MSG(extack, 2282 "tun/tap creation via rtnetlink is not supported."); 2283 return -EOPNOTSUPP; 2284 } 2285 2286 static size_t tun_get_size(const struct net_device *dev) 2287 { 2288 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 2289 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 2290 2291 return nla_total_size(sizeof(uid_t)) + /* OWNER */ 2292 nla_total_size(sizeof(gid_t)) + /* GROUP */ 2293 nla_total_size(sizeof(u8)) + /* TYPE */ 2294 nla_total_size(sizeof(u8)) + /* PI */ 2295 nla_total_size(sizeof(u8)) + /* VNET_HDR */ 2296 nla_total_size(sizeof(u8)) + /* PERSIST */ 2297 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 2298 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 2299 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 2300 0; 2301 } 2302 2303 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 2304 { 2305 struct tun_struct *tun = netdev_priv(dev); 2306 2307 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 2308 goto nla_put_failure; 2309 if (uid_valid(tun->owner) && 2310 nla_put_u32(skb, IFLA_TUN_OWNER, 2311 from_kuid_munged(current_user_ns(), tun->owner))) 2312 goto nla_put_failure; 2313 if (gid_valid(tun->group) && 2314 nla_put_u32(skb, IFLA_TUN_GROUP, 2315 from_kgid_munged(current_user_ns(), tun->group))) 2316 goto nla_put_failure; 2317 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 2318 goto nla_put_failure; 2319 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 2320 goto nla_put_failure; 2321 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 2322 goto nla_put_failure; 2323 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 2324 !!(tun->flags & IFF_MULTI_QUEUE))) 2325 goto nla_put_failure; 2326 if (tun->flags & IFF_MULTI_QUEUE) { 2327 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 2328 goto nla_put_failure; 2329 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 2330 tun->numdisabled)) 2331 goto nla_put_failure; 2332 } 2333 2334 return 0; 2335 2336 nla_put_failure: 2337 return -EMSGSIZE; 2338 } 2339 2340 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2341 .kind = DRV_NAME, 2342 .priv_size = sizeof(struct tun_struct), 2343 .setup = tun_setup, 2344 .validate = tun_validate, 2345 .get_size = tun_get_size, 2346 .fill_info = tun_fill_info, 2347 }; 2348 2349 static void tun_sock_write_space(struct sock *sk) 2350 { 2351 struct tun_file *tfile; 2352 wait_queue_head_t *wqueue; 2353 2354 if (!sock_writeable(sk)) 2355 return; 2356 2357 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2358 return; 2359 2360 wqueue = sk_sleep(sk); 2361 if (wqueue && waitqueue_active(wqueue)) 2362 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2363 EPOLLWRNORM | EPOLLWRBAND); 2364 2365 tfile = container_of(sk, struct tun_file, sk); 2366 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2367 } 2368 2369 static void tun_put_page(struct tun_page *tpage) 2370 { 2371 if (tpage->page) 2372 __page_frag_cache_drain(tpage->page, tpage->count); 2373 } 2374 2375 static int tun_xdp_one(struct tun_struct *tun, 2376 struct tun_file *tfile, 2377 struct xdp_buff *xdp, int *flush, 2378 struct tun_page *tpage) 2379 { 2380 unsigned int datasize = xdp->data_end - xdp->data; 2381 struct virtio_net_hdr *gso = xdp->data_hard_start; 2382 struct virtio_net_hdr_v1_hash_tunnel *tnl_hdr; 2383 struct bpf_prog *xdp_prog; 2384 struct sk_buff *skb = NULL; 2385 struct sk_buff_head *queue; 2386 netdev_features_t features; 2387 u32 rxhash = 0, act; 2388 int buflen = xdp->frame_sz; 2389 int metasize = 0; 2390 int ret = 0; 2391 bool skb_xdp = false; 2392 struct page *page; 2393 2394 if (unlikely(datasize < ETH_HLEN)) 2395 return -EINVAL; 2396 2397 xdp_prog = rcu_dereference(tun->xdp_prog); 2398 if (xdp_prog) { 2399 if (gso->gso_type) { 2400 skb_xdp = true; 2401 goto build; 2402 } 2403 2404 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); 2405 2406 act = bpf_prog_run_xdp(xdp_prog, xdp); 2407 ret = tun_xdp_act(tun, xdp_prog, xdp, act); 2408 if (ret < 0) { 2409 put_page(virt_to_head_page(xdp->data)); 2410 return ret; 2411 } 2412 2413 switch (ret) { 2414 case XDP_REDIRECT: 2415 *flush = true; 2416 fallthrough; 2417 case XDP_TX: 2418 return 0; 2419 case XDP_PASS: 2420 break; 2421 default: 2422 page = virt_to_head_page(xdp->data); 2423 if (tpage->page == page) { 2424 ++tpage->count; 2425 } else { 2426 tun_put_page(tpage); 2427 tpage->page = page; 2428 tpage->count = 1; 2429 } 2430 return 0; 2431 } 2432 } 2433 2434 build: 2435 skb = build_skb(xdp->data_hard_start, buflen); 2436 if (!skb) { 2437 ret = -ENOMEM; 2438 goto out; 2439 } 2440 2441 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2442 skb_put(skb, xdp->data_end - xdp->data); 2443 2444 /* The externally provided xdp_buff may have no metadata support, which 2445 * is marked by xdp->data_meta being xdp->data + 1. This will lead to a 2446 * metasize of -1 and is the reason why the condition checks for > 0. 2447 */ 2448 metasize = xdp->data - xdp->data_meta; 2449 if (metasize > 0) 2450 skb_metadata_set(skb, metasize); 2451 2452 features = tun_vnet_hdr_guest_features(READ_ONCE(tun->vnet_hdr_sz)); 2453 tnl_hdr = (struct virtio_net_hdr_v1_hash_tunnel *)gso; 2454 if (tun_vnet_hdr_tnl_to_skb(tun->flags, features, skb, tnl_hdr)) { 2455 atomic_long_inc(&tun->rx_frame_errors); 2456 kfree_skb(skb); 2457 ret = -EINVAL; 2458 goto out; 2459 } 2460 2461 skb->protocol = eth_type_trans(skb, tun->dev); 2462 skb_reset_network_header(skb); 2463 skb_probe_transport_header(skb); 2464 skb_record_rx_queue(skb, tfile->queue_index); 2465 2466 if (skb_xdp) { 2467 ret = do_xdp_generic(xdp_prog, &skb); 2468 if (ret != XDP_PASS) { 2469 ret = 0; 2470 goto out; 2471 } 2472 } 2473 2474 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2475 !tfile->detached) 2476 rxhash = __skb_get_hash_symmetric(skb); 2477 2478 if (tfile->napi_enabled) { 2479 queue = &tfile->sk.sk_write_queue; 2480 spin_lock(&queue->lock); 2481 2482 if (unlikely(tfile->detached)) { 2483 spin_unlock(&queue->lock); 2484 kfree_skb(skb); 2485 return -EBUSY; 2486 } 2487 2488 __skb_queue_tail(queue, skb); 2489 spin_unlock(&queue->lock); 2490 ret = 1; 2491 } else { 2492 netif_receive_skb(skb); 2493 ret = 0; 2494 } 2495 2496 /* No need to disable preemption here since this function is 2497 * always called with bh disabled 2498 */ 2499 dev_sw_netstats_rx_add(tun->dev, datasize); 2500 2501 if (rxhash) 2502 tun_flow_update(tun, rxhash, tfile); 2503 2504 out: 2505 return ret; 2506 } 2507 2508 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2509 { 2510 int ret, i; 2511 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2512 struct tun_struct *tun = tun_get(tfile); 2513 struct tun_msg_ctl *ctl = m->msg_control; 2514 struct xdp_buff *xdp; 2515 2516 if (!tun) 2517 return -EBADFD; 2518 2519 if (m->msg_controllen == sizeof(struct tun_msg_ctl) && 2520 ctl && ctl->type == TUN_MSG_PTR) { 2521 struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; 2522 struct tun_page tpage; 2523 int n = ctl->num; 2524 int flush = 0, queued = 0; 2525 2526 memset(&tpage, 0, sizeof(tpage)); 2527 2528 local_bh_disable(); 2529 rcu_read_lock(); 2530 bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); 2531 2532 for (i = 0; i < n; i++) { 2533 xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2534 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2535 if (ret > 0) 2536 queued += ret; 2537 } 2538 2539 if (flush) 2540 xdp_do_flush(); 2541 2542 if (tfile->napi_enabled && queued > 0) 2543 napi_schedule(&tfile->napi); 2544 2545 bpf_net_ctx_clear(bpf_net_ctx); 2546 rcu_read_unlock(); 2547 local_bh_enable(); 2548 2549 tun_put_page(&tpage); 2550 2551 ret = total_len; 2552 goto out; 2553 } 2554 2555 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 2556 m->msg_flags & MSG_DONTWAIT, 2557 m->msg_flags & MSG_MORE); 2558 out: 2559 tun_put(tun); 2560 return ret; 2561 } 2562 2563 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2564 int flags) 2565 { 2566 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2567 struct tun_struct *tun = tun_get(tfile); 2568 void *ptr = m->msg_control; 2569 int ret; 2570 2571 if (!tun) { 2572 ret = -EBADFD; 2573 goto out_free; 2574 } 2575 2576 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2577 ret = -EINVAL; 2578 goto out_put_tun; 2579 } 2580 if (flags & MSG_ERRQUEUE) { 2581 ret = sock_recv_errqueue(sock->sk, m, total_len, 2582 SOL_PACKET, TUN_TX_TIMESTAMP); 2583 goto out; 2584 } 2585 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 2586 if (ret > (ssize_t)total_len) { 2587 m->msg_flags |= MSG_TRUNC; 2588 ret = flags & MSG_TRUNC ? ret : total_len; 2589 } 2590 out: 2591 tun_put(tun); 2592 return ret; 2593 2594 out_put_tun: 2595 tun_put(tun); 2596 out_free: 2597 tun_ptr_free(ptr); 2598 return ret; 2599 } 2600 2601 static int tun_ptr_peek_len(void *ptr) 2602 { 2603 if (likely(ptr)) { 2604 if (tun_is_xdp_frame(ptr)) { 2605 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2606 2607 return xdpf->len; 2608 } 2609 return __skb_array_len_with_tag(ptr); 2610 } else { 2611 return 0; 2612 } 2613 } 2614 2615 static int tun_peek_len(struct socket *sock) 2616 { 2617 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2618 struct tun_struct *tun; 2619 int ret = 0; 2620 2621 tun = tun_get(tfile); 2622 if (!tun) 2623 return 0; 2624 2625 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 2626 tun_put(tun); 2627 2628 return ret; 2629 } 2630 2631 /* Ops structure to mimic raw sockets with tun */ 2632 static const struct proto_ops tun_socket_ops = { 2633 .peek_len = tun_peek_len, 2634 .sendmsg = tun_sendmsg, 2635 .recvmsg = tun_recvmsg, 2636 }; 2637 2638 static struct proto tun_proto = { 2639 .name = "tun", 2640 .owner = THIS_MODULE, 2641 .obj_size = sizeof(struct tun_file), 2642 }; 2643 2644 static int tun_flags(struct tun_struct *tun) 2645 { 2646 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2647 } 2648 2649 static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, 2650 char *buf) 2651 { 2652 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2653 return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); 2654 } 2655 2656 static ssize_t owner_show(struct device *dev, struct device_attribute *attr, 2657 char *buf) 2658 { 2659 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2660 return uid_valid(tun->owner)? 2661 sysfs_emit(buf, "%u\n", 2662 from_kuid_munged(current_user_ns(), tun->owner)) : 2663 sysfs_emit(buf, "-1\n"); 2664 } 2665 2666 static ssize_t group_show(struct device *dev, struct device_attribute *attr, 2667 char *buf) 2668 { 2669 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2670 return gid_valid(tun->group) ? 2671 sysfs_emit(buf, "%u\n", 2672 from_kgid_munged(current_user_ns(), tun->group)) : 2673 sysfs_emit(buf, "-1\n"); 2674 } 2675 2676 static DEVICE_ATTR_RO(tun_flags); 2677 static DEVICE_ATTR_RO(owner); 2678 static DEVICE_ATTR_RO(group); 2679 2680 static struct attribute *tun_dev_attrs[] = { 2681 &dev_attr_tun_flags.attr, 2682 &dev_attr_owner.attr, 2683 &dev_attr_group.attr, 2684 NULL 2685 }; 2686 2687 static const struct attribute_group tun_attr_group = { 2688 .attrs = tun_dev_attrs 2689 }; 2690 2691 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2692 { 2693 struct tun_struct *tun; 2694 struct tun_file *tfile = file->private_data; 2695 struct net_device *dev; 2696 int err; 2697 2698 if (tfile->detached) 2699 return -EINVAL; 2700 2701 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2702 if (!capable(CAP_NET_ADMIN)) 2703 return -EPERM; 2704 2705 if (!(ifr->ifr_flags & IFF_NAPI) || 2706 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2707 return -EINVAL; 2708 } 2709 2710 dev = __dev_get_by_name(net, ifr->ifr_name); 2711 if (dev) { 2712 if (ifr->ifr_flags & IFF_TUN_EXCL) 2713 return -EBUSY; 2714 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2715 tun = netdev_priv(dev); 2716 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2717 tun = netdev_priv(dev); 2718 else 2719 return -EINVAL; 2720 2721 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2722 !!(tun->flags & IFF_MULTI_QUEUE)) 2723 return -EINVAL; 2724 2725 if (tun_not_capable(tun)) 2726 return -EPERM; 2727 err = security_tun_dev_open(tun->security); 2728 if (err < 0) 2729 return err; 2730 2731 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2732 ifr->ifr_flags & IFF_NAPI, 2733 ifr->ifr_flags & IFF_NAPI_FRAGS, true); 2734 if (err < 0) 2735 return err; 2736 2737 if (tun->flags & IFF_MULTI_QUEUE && 2738 (tun->numqueues + tun->numdisabled > 1)) { 2739 /* One or more queue has already been attached, no need 2740 * to initialize the device again. 2741 */ 2742 netdev_state_change(dev); 2743 return 0; 2744 } 2745 2746 tun->flags = (tun->flags & ~TUN_FEATURES) | 2747 (ifr->ifr_flags & TUN_FEATURES); 2748 2749 netdev_state_change(dev); 2750 } else { 2751 char *name; 2752 unsigned long flags = 0; 2753 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2754 MAX_TAP_QUEUES : 1; 2755 2756 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2757 return -EPERM; 2758 err = security_tun_dev_create(); 2759 if (err < 0) 2760 return err; 2761 2762 /* Set dev type */ 2763 if (ifr->ifr_flags & IFF_TUN) { 2764 /* TUN device */ 2765 flags |= IFF_TUN; 2766 name = "tun%d"; 2767 } else if (ifr->ifr_flags & IFF_TAP) { 2768 /* TAP device */ 2769 flags |= IFF_TAP; 2770 name = "tap%d"; 2771 } else 2772 return -EINVAL; 2773 2774 if (*ifr->ifr_name) 2775 name = ifr->ifr_name; 2776 2777 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2778 NET_NAME_UNKNOWN, tun_setup, queues, 2779 queues); 2780 2781 if (!dev) 2782 return -ENOMEM; 2783 2784 dev_net_set(dev, net); 2785 dev->rtnl_link_ops = &tun_link_ops; 2786 dev->ifindex = tfile->ifindex; 2787 dev->sysfs_groups[0] = &tun_attr_group; 2788 2789 tun = netdev_priv(dev); 2790 tun->dev = dev; 2791 tun->flags = flags; 2792 tun->txflt.count = 0; 2793 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2794 2795 tun->align = NET_SKB_PAD; 2796 tun->filter_attached = false; 2797 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2798 tun->rx_batched = 0; 2799 RCU_INIT_POINTER(tun->steering_prog, NULL); 2800 2801 tun->ifr = ifr; 2802 tun->file = file; 2803 2804 tun_net_initialize(dev); 2805 2806 err = register_netdevice(tun->dev); 2807 if (err < 0) { 2808 free_netdev(dev); 2809 return err; 2810 } 2811 /* free_netdev() won't check refcnt, to avoid race 2812 * with dev_put() we need publish tun after registration. 2813 */ 2814 rcu_assign_pointer(tfile->tun, tun); 2815 } 2816 2817 if (ifr->ifr_flags & IFF_NO_CARRIER) 2818 netif_carrier_off(tun->dev); 2819 else 2820 netif_carrier_on(tun->dev); 2821 2822 /* Make sure persistent devices do not get stuck in 2823 * xoff state. 2824 */ 2825 if (netif_running(tun->dev)) 2826 netif_tx_wake_all_queues(tun->dev); 2827 2828 strcpy(ifr->ifr_name, tun->dev->name); 2829 return 0; 2830 } 2831 2832 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2833 { 2834 strcpy(ifr->ifr_name, tun->dev->name); 2835 2836 ifr->ifr_flags = tun_flags(tun); 2837 2838 } 2839 2840 #define PLAIN_GSO (NETIF_F_GSO_UDP_L4 | NETIF_F_TSO | NETIF_F_TSO6) 2841 2842 /* This is like a cut-down ethtool ops, except done via tun fd so no 2843 * privs required. */ 2844 static int set_offload(struct tun_struct *tun, unsigned long arg) 2845 { 2846 netdev_features_t features = 0; 2847 2848 if (arg & TUN_F_CSUM) { 2849 features |= NETIF_F_HW_CSUM; 2850 arg &= ~TUN_F_CSUM; 2851 2852 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2853 if (arg & TUN_F_TSO_ECN) { 2854 features |= NETIF_F_TSO_ECN; 2855 arg &= ~TUN_F_TSO_ECN; 2856 } 2857 if (arg & TUN_F_TSO4) 2858 features |= NETIF_F_TSO; 2859 if (arg & TUN_F_TSO6) 2860 features |= NETIF_F_TSO6; 2861 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2862 } 2863 2864 arg &= ~TUN_F_UFO; 2865 2866 /* TODO: for now USO4 and USO6 should work simultaneously */ 2867 if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { 2868 features |= NETIF_F_GSO_UDP_L4; 2869 arg &= ~(TUN_F_USO4 | TUN_F_USO6); 2870 } 2871 2872 /* 2873 * Tunnel offload is allowed only if some plain offload is 2874 * available, too. 2875 */ 2876 if (features & PLAIN_GSO && arg & TUN_F_UDP_TUNNEL_GSO) { 2877 features |= NETIF_F_GSO_UDP_TUNNEL; 2878 if (arg & TUN_F_UDP_TUNNEL_GSO_CSUM) 2879 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; 2880 arg &= ~(TUN_F_UDP_TUNNEL_GSO | 2881 TUN_F_UDP_TUNNEL_GSO_CSUM); 2882 } 2883 } 2884 2885 /* This gives the user a way to test for new features in future by 2886 * trying to set them. */ 2887 if (arg) 2888 return -EINVAL; 2889 2890 tun->set_features = features; 2891 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2892 tun->dev->wanted_features |= features; 2893 netdev_update_features(tun->dev); 2894 2895 return 0; 2896 } 2897 2898 static void tun_detach_filter(struct tun_struct *tun, int n) 2899 { 2900 int i; 2901 struct tun_file *tfile; 2902 2903 for (i = 0; i < n; i++) { 2904 tfile = rtnl_dereference(tun->tfiles[i]); 2905 lock_sock(tfile->socket.sk); 2906 sk_detach_filter(tfile->socket.sk); 2907 release_sock(tfile->socket.sk); 2908 } 2909 2910 tun->filter_attached = false; 2911 } 2912 2913 static int tun_attach_filter(struct tun_struct *tun) 2914 { 2915 int i, ret = 0; 2916 struct tun_file *tfile; 2917 2918 for (i = 0; i < tun->numqueues; i++) { 2919 tfile = rtnl_dereference(tun->tfiles[i]); 2920 lock_sock(tfile->socket.sk); 2921 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2922 release_sock(tfile->socket.sk); 2923 if (ret) { 2924 tun_detach_filter(tun, i); 2925 return ret; 2926 } 2927 } 2928 2929 tun->filter_attached = true; 2930 return ret; 2931 } 2932 2933 static void tun_set_sndbuf(struct tun_struct *tun) 2934 { 2935 struct tun_file *tfile; 2936 int i; 2937 2938 for (i = 0; i < tun->numqueues; i++) { 2939 tfile = rtnl_dereference(tun->tfiles[i]); 2940 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2941 } 2942 } 2943 2944 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2945 { 2946 struct tun_file *tfile = file->private_data; 2947 struct tun_struct *tun; 2948 int ret = 0; 2949 2950 rtnl_lock(); 2951 2952 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2953 tun = tfile->detached; 2954 if (!tun) { 2955 ret = -EINVAL; 2956 goto unlock; 2957 } 2958 ret = security_tun_dev_attach_queue(tun->security); 2959 if (ret < 0) 2960 goto unlock; 2961 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2962 tun->flags & IFF_NAPI_FRAGS, true); 2963 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2964 tun = rtnl_dereference(tfile->tun); 2965 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2966 ret = -EINVAL; 2967 else 2968 __tun_detach(tfile, false); 2969 } else 2970 ret = -EINVAL; 2971 2972 if (ret >= 0) 2973 netdev_state_change(tun->dev); 2974 2975 unlock: 2976 rtnl_unlock(); 2977 return ret; 2978 } 2979 2980 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, 2981 void __user *data) 2982 { 2983 struct bpf_prog *prog; 2984 int fd; 2985 2986 if (copy_from_user(&fd, data, sizeof(fd))) 2987 return -EFAULT; 2988 2989 if (fd == -1) { 2990 prog = NULL; 2991 } else { 2992 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 2993 if (IS_ERR(prog)) 2994 return PTR_ERR(prog); 2995 } 2996 2997 return __tun_set_ebpf(tun, prog_p, prog); 2998 } 2999 3000 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ 3001 static unsigned char tun_get_addr_len(unsigned short type) 3002 { 3003 switch (type) { 3004 case ARPHRD_IP6GRE: 3005 case ARPHRD_TUNNEL6: 3006 return sizeof(struct in6_addr); 3007 case ARPHRD_IPGRE: 3008 case ARPHRD_TUNNEL: 3009 case ARPHRD_SIT: 3010 return 4; 3011 case ARPHRD_ETHER: 3012 return ETH_ALEN; 3013 case ARPHRD_IEEE802154: 3014 case ARPHRD_IEEE802154_MONITOR: 3015 return IEEE802154_EXTENDED_ADDR_LEN; 3016 case ARPHRD_PHONET_PIPE: 3017 case ARPHRD_PPP: 3018 case ARPHRD_NONE: 3019 return 0; 3020 case ARPHRD_6LOWPAN: 3021 return EUI64_ADDR_LEN; 3022 case ARPHRD_FDDI: 3023 return FDDI_K_ALEN; 3024 case ARPHRD_HIPPI: 3025 return HIPPI_ALEN; 3026 case ARPHRD_IEEE802: 3027 return FC_ALEN; 3028 case ARPHRD_ROSE: 3029 return ROSE_ADDR_LEN; 3030 case ARPHRD_NETROM: 3031 return AX25_ADDR_LEN; 3032 case ARPHRD_LOCALTLK: 3033 return LTALK_ALEN; 3034 default: 3035 return 0; 3036 } 3037 } 3038 3039 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 3040 unsigned long arg, int ifreq_len) 3041 { 3042 struct tun_file *tfile = file->private_data; 3043 struct net *net = sock_net(&tfile->sk); 3044 struct tun_struct *tun; 3045 void __user* argp = (void __user*)arg; 3046 unsigned int carrier; 3047 struct ifreq ifr; 3048 kuid_t owner; 3049 kgid_t group; 3050 int ifindex; 3051 int sndbuf; 3052 int ret; 3053 bool do_notify = false; 3054 3055 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 3056 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 3057 if (copy_from_user(&ifr, argp, ifreq_len)) 3058 return -EFAULT; 3059 } else { 3060 memset(&ifr, 0, sizeof(ifr)); 3061 } 3062 if (cmd == TUNGETFEATURES) { 3063 /* Currently this just means: "what IFF flags are valid?". 3064 * This is needed because we never checked for invalid flags on 3065 * TUNSETIFF. 3066 */ 3067 return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | 3068 TUN_FEATURES, (unsigned int __user*)argp); 3069 } else if (cmd == TUNSETQUEUE) { 3070 return tun_set_queue(file, &ifr); 3071 } else if (cmd == SIOCGSKNS) { 3072 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3073 return -EPERM; 3074 return open_related_ns(&net->ns, get_net_ns); 3075 } 3076 3077 rtnl_lock(); 3078 3079 tun = tun_get(tfile); 3080 if (cmd == TUNSETIFF) { 3081 ret = -EEXIST; 3082 if (tun) 3083 goto unlock; 3084 3085 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 3086 3087 ret = tun_set_iff(net, file, &ifr); 3088 3089 if (ret) 3090 goto unlock; 3091 3092 if (copy_to_user(argp, &ifr, ifreq_len)) 3093 ret = -EFAULT; 3094 goto unlock; 3095 } 3096 if (cmd == TUNSETIFINDEX) { 3097 ret = -EPERM; 3098 if (tun) 3099 goto unlock; 3100 3101 ret = -EFAULT; 3102 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3103 goto unlock; 3104 ret = -EINVAL; 3105 if (ifindex < 0) 3106 goto unlock; 3107 ret = 0; 3108 tfile->ifindex = ifindex; 3109 goto unlock; 3110 } 3111 3112 ret = -EBADFD; 3113 if (!tun) 3114 goto unlock; 3115 3116 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); 3117 3118 net = dev_net(tun->dev); 3119 ret = 0; 3120 switch (cmd) { 3121 case TUNGETIFF: 3122 tun_get_iff(tun, &ifr); 3123 3124 if (tfile->detached) 3125 ifr.ifr_flags |= IFF_DETACH_QUEUE; 3126 if (!tfile->socket.sk->sk_filter) 3127 ifr.ifr_flags |= IFF_NOFILTER; 3128 3129 if (copy_to_user(argp, &ifr, ifreq_len)) 3130 ret = -EFAULT; 3131 break; 3132 3133 case TUNSETNOCSUM: 3134 /* Disable/Enable checksum */ 3135 3136 /* [unimplemented] */ 3137 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", 3138 arg ? "disabled" : "enabled"); 3139 break; 3140 3141 case TUNSETPERSIST: 3142 /* Disable/Enable persist mode. Keep an extra reference to the 3143 * module to prevent the module being unprobed. 3144 */ 3145 if (arg && !(tun->flags & IFF_PERSIST)) { 3146 tun->flags |= IFF_PERSIST; 3147 __module_get(THIS_MODULE); 3148 do_notify = true; 3149 } 3150 if (!arg && (tun->flags & IFF_PERSIST)) { 3151 tun->flags &= ~IFF_PERSIST; 3152 module_put(THIS_MODULE); 3153 do_notify = true; 3154 } 3155 3156 netif_info(tun, drv, tun->dev, "persist %s\n", 3157 arg ? "enabled" : "disabled"); 3158 break; 3159 3160 case TUNSETOWNER: 3161 /* Set owner of the device */ 3162 owner = make_kuid(current_user_ns(), arg); 3163 if (!uid_valid(owner)) { 3164 ret = -EINVAL; 3165 break; 3166 } 3167 tun->owner = owner; 3168 do_notify = true; 3169 netif_info(tun, drv, tun->dev, "owner set to %u\n", 3170 from_kuid(&init_user_ns, tun->owner)); 3171 break; 3172 3173 case TUNSETGROUP: 3174 /* Set group of the device */ 3175 group = make_kgid(current_user_ns(), arg); 3176 if (!gid_valid(group)) { 3177 ret = -EINVAL; 3178 break; 3179 } 3180 tun->group = group; 3181 do_notify = true; 3182 netif_info(tun, drv, tun->dev, "group set to %u\n", 3183 from_kgid(&init_user_ns, tun->group)); 3184 break; 3185 3186 case TUNSETLINK: 3187 /* Only allow setting the type when the interface is down */ 3188 if (tun->dev->flags & IFF_UP) { 3189 netif_info(tun, drv, tun->dev, 3190 "Linktype set failed because interface is up\n"); 3191 ret = -EBUSY; 3192 } else { 3193 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 3194 tun->dev); 3195 ret = notifier_to_errno(ret); 3196 if (ret) { 3197 netif_info(tun, drv, tun->dev, 3198 "Refused to change device type\n"); 3199 break; 3200 } 3201 tun->dev->type = (int) arg; 3202 tun->dev->addr_len = tun_get_addr_len(tun->dev->type); 3203 netif_info(tun, drv, tun->dev, "linktype set to %d\n", 3204 tun->dev->type); 3205 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 3206 tun->dev); 3207 } 3208 break; 3209 3210 case TUNSETDEBUG: 3211 tun->msg_enable = (u32)arg; 3212 break; 3213 3214 case TUNSETOFFLOAD: 3215 ret = set_offload(tun, arg); 3216 break; 3217 3218 case TUNSETTXFILTER: 3219 /* Can be set only for TAPs */ 3220 ret = -EINVAL; 3221 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3222 break; 3223 ret = update_filter(&tun->txflt, (void __user *)arg); 3224 break; 3225 3226 case SIOCGIFHWADDR: 3227 /* Get hw address */ 3228 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); 3229 if (copy_to_user(argp, &ifr, ifreq_len)) 3230 ret = -EFAULT; 3231 break; 3232 3233 case SIOCSIFHWADDR: 3234 /* Set hw address */ 3235 if (tun->dev->addr_len > sizeof(ifr.ifr_hwaddr)) { 3236 ret = -EINVAL; 3237 break; 3238 } 3239 ret = dev_set_mac_address_user(tun->dev, 3240 (struct sockaddr_storage *)&ifr.ifr_hwaddr, 3241 NULL); 3242 break; 3243 3244 case TUNGETSNDBUF: 3245 sndbuf = tfile->socket.sk->sk_sndbuf; 3246 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 3247 ret = -EFAULT; 3248 break; 3249 3250 case TUNSETSNDBUF: 3251 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 3252 ret = -EFAULT; 3253 break; 3254 } 3255 if (sndbuf <= 0) { 3256 ret = -EINVAL; 3257 break; 3258 } 3259 3260 tun->sndbuf = sndbuf; 3261 tun_set_sndbuf(tun); 3262 break; 3263 3264 case TUNATTACHFILTER: 3265 /* Can be set only for TAPs */ 3266 ret = -EINVAL; 3267 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3268 break; 3269 ret = -EFAULT; 3270 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 3271 break; 3272 3273 ret = tun_attach_filter(tun); 3274 break; 3275 3276 case TUNDETACHFILTER: 3277 /* Can be set only for TAPs */ 3278 ret = -EINVAL; 3279 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3280 break; 3281 ret = 0; 3282 tun_detach_filter(tun, tun->numqueues); 3283 break; 3284 3285 case TUNGETFILTER: 3286 ret = -EINVAL; 3287 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3288 break; 3289 ret = -EFAULT; 3290 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 3291 break; 3292 ret = 0; 3293 break; 3294 3295 case TUNSETSTEERINGEBPF: 3296 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 3297 break; 3298 3299 case TUNSETFILTEREBPF: 3300 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3301 break; 3302 3303 case TUNSETCARRIER: 3304 ret = -EFAULT; 3305 if (copy_from_user(&carrier, argp, sizeof(carrier))) 3306 goto unlock; 3307 3308 ret = tun_net_change_carrier(tun->dev, (bool)carrier); 3309 break; 3310 3311 case TUNGETDEVNETNS: 3312 ret = -EPERM; 3313 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3314 goto unlock; 3315 ret = open_related_ns(&net->ns, get_net_ns); 3316 break; 3317 3318 default: 3319 ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp); 3320 break; 3321 } 3322 3323 if (do_notify) 3324 netdev_state_change(tun->dev); 3325 3326 unlock: 3327 rtnl_unlock(); 3328 if (tun) 3329 tun_put(tun); 3330 return ret; 3331 } 3332 3333 static long tun_chr_ioctl(struct file *file, 3334 unsigned int cmd, unsigned long arg) 3335 { 3336 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 3337 } 3338 3339 #ifdef CONFIG_COMPAT 3340 static long tun_chr_compat_ioctl(struct file *file, 3341 unsigned int cmd, unsigned long arg) 3342 { 3343 switch (cmd) { 3344 case TUNSETIFF: 3345 case TUNGETIFF: 3346 case TUNSETTXFILTER: 3347 case TUNGETSNDBUF: 3348 case TUNSETSNDBUF: 3349 case SIOCGIFHWADDR: 3350 case SIOCSIFHWADDR: 3351 arg = (unsigned long)compat_ptr(arg); 3352 break; 3353 default: 3354 arg = (compat_ulong_t)arg; 3355 break; 3356 } 3357 3358 /* 3359 * compat_ifreq is shorter than ifreq, so we must not access beyond 3360 * the end of that structure. All fields that are used in this 3361 * driver are compatible though, we don't need to convert the 3362 * contents. 3363 */ 3364 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 3365 } 3366 #endif /* CONFIG_COMPAT */ 3367 3368 static int tun_chr_fasync(int fd, struct file *file, int on) 3369 { 3370 struct tun_file *tfile = file->private_data; 3371 int ret; 3372 3373 if (on) { 3374 ret = file_f_owner_allocate(file); 3375 if (ret) 3376 goto out; 3377 } 3378 3379 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 3380 goto out; 3381 3382 if (on) { 3383 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 3384 tfile->flags |= TUN_FASYNC; 3385 } else 3386 tfile->flags &= ~TUN_FASYNC; 3387 ret = 0; 3388 out: 3389 return ret; 3390 } 3391 3392 static int tun_chr_open(struct inode *inode, struct file * file) 3393 { 3394 struct net *net = current->nsproxy->net_ns; 3395 struct tun_file *tfile; 3396 3397 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 3398 &tun_proto, 0); 3399 if (!tfile) 3400 return -ENOMEM; 3401 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3402 sk_free(&tfile->sk); 3403 return -ENOMEM; 3404 } 3405 3406 mutex_init(&tfile->napi_mutex); 3407 RCU_INIT_POINTER(tfile->tun, NULL); 3408 tfile->flags = 0; 3409 tfile->ifindex = 0; 3410 3411 init_waitqueue_head(&tfile->socket.wq.wait); 3412 3413 tfile->socket.file = file; 3414 tfile->socket.ops = &tun_socket_ops; 3415 3416 sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); 3417 3418 tfile->sk.sk_write_space = tun_sock_write_space; 3419 tfile->sk.sk_sndbuf = INT_MAX; 3420 3421 file->private_data = tfile; 3422 INIT_LIST_HEAD(&tfile->next); 3423 3424 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3425 3426 /* tun groks IOCB_NOWAIT just fine, mark it as such */ 3427 file->f_mode |= FMODE_NOWAIT; 3428 return 0; 3429 } 3430 3431 static int tun_chr_close(struct inode *inode, struct file *file) 3432 { 3433 struct tun_file *tfile = file->private_data; 3434 3435 tun_detach(tfile, true); 3436 3437 return 0; 3438 } 3439 3440 #ifdef CONFIG_PROC_FS 3441 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 3442 { 3443 struct tun_file *tfile = file->private_data; 3444 struct tun_struct *tun; 3445 struct ifreq ifr; 3446 3447 memset(&ifr, 0, sizeof(ifr)); 3448 3449 rtnl_lock(); 3450 tun = tun_get(tfile); 3451 if (tun) 3452 tun_get_iff(tun, &ifr); 3453 rtnl_unlock(); 3454 3455 if (tun) 3456 tun_put(tun); 3457 3458 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 3459 } 3460 #endif 3461 3462 static const struct file_operations tun_fops = { 3463 .owner = THIS_MODULE, 3464 .read_iter = tun_chr_read_iter, 3465 .write_iter = tun_chr_write_iter, 3466 .poll = tun_chr_poll, 3467 .unlocked_ioctl = tun_chr_ioctl, 3468 #ifdef CONFIG_COMPAT 3469 .compat_ioctl = tun_chr_compat_ioctl, 3470 #endif 3471 .open = tun_chr_open, 3472 .release = tun_chr_close, 3473 .fasync = tun_chr_fasync, 3474 #ifdef CONFIG_PROC_FS 3475 .show_fdinfo = tun_chr_show_fdinfo, 3476 #endif 3477 }; 3478 3479 static struct miscdevice tun_miscdev = { 3480 .minor = TUN_MINOR, 3481 .name = "tun", 3482 .nodename = "net/tun", 3483 .fops = &tun_fops, 3484 }; 3485 3486 /* ethtool interface */ 3487 3488 static void tun_default_link_ksettings(struct net_device *dev, 3489 struct ethtool_link_ksettings *cmd) 3490 { 3491 ethtool_link_ksettings_zero_link_mode(cmd, supported); 3492 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3493 cmd->base.speed = SPEED_10000; 3494 cmd->base.duplex = DUPLEX_FULL; 3495 cmd->base.port = PORT_TP; 3496 cmd->base.phy_address = 0; 3497 cmd->base.autoneg = AUTONEG_DISABLE; 3498 } 3499 3500 static int tun_get_link_ksettings(struct net_device *dev, 3501 struct ethtool_link_ksettings *cmd) 3502 { 3503 struct tun_struct *tun = netdev_priv(dev); 3504 3505 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 3506 return 0; 3507 } 3508 3509 static int tun_set_link_ksettings(struct net_device *dev, 3510 const struct ethtool_link_ksettings *cmd) 3511 { 3512 struct tun_struct *tun = netdev_priv(dev); 3513 3514 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 3515 return 0; 3516 } 3517 3518 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3519 { 3520 struct tun_struct *tun = netdev_priv(dev); 3521 3522 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 3523 strscpy(info->version, DRV_VERSION, sizeof(info->version)); 3524 3525 switch (tun->flags & TUN_TYPE_MASK) { 3526 case IFF_TUN: 3527 strscpy(info->bus_info, "tun", sizeof(info->bus_info)); 3528 break; 3529 case IFF_TAP: 3530 strscpy(info->bus_info, "tap", sizeof(info->bus_info)); 3531 break; 3532 } 3533 } 3534 3535 static u32 tun_get_msglevel(struct net_device *dev) 3536 { 3537 struct tun_struct *tun = netdev_priv(dev); 3538 3539 return tun->msg_enable; 3540 } 3541 3542 static void tun_set_msglevel(struct net_device *dev, u32 value) 3543 { 3544 struct tun_struct *tun = netdev_priv(dev); 3545 3546 tun->msg_enable = value; 3547 } 3548 3549 static int tun_get_coalesce(struct net_device *dev, 3550 struct ethtool_coalesce *ec, 3551 struct kernel_ethtool_coalesce *kernel_coal, 3552 struct netlink_ext_ack *extack) 3553 { 3554 struct tun_struct *tun = netdev_priv(dev); 3555 3556 ec->rx_max_coalesced_frames = tun->rx_batched; 3557 3558 return 0; 3559 } 3560 3561 static int tun_set_coalesce(struct net_device *dev, 3562 struct ethtool_coalesce *ec, 3563 struct kernel_ethtool_coalesce *kernel_coal, 3564 struct netlink_ext_ack *extack) 3565 { 3566 struct tun_struct *tun = netdev_priv(dev); 3567 3568 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 3569 tun->rx_batched = NAPI_POLL_WEIGHT; 3570 else 3571 tun->rx_batched = ec->rx_max_coalesced_frames; 3572 3573 return 0; 3574 } 3575 3576 static void tun_get_channels(struct net_device *dev, 3577 struct ethtool_channels *channels) 3578 { 3579 struct tun_struct *tun = netdev_priv(dev); 3580 3581 channels->combined_count = tun->numqueues; 3582 channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; 3583 } 3584 3585 static const struct ethtool_ops tun_ethtool_ops = { 3586 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, 3587 .get_drvinfo = tun_get_drvinfo, 3588 .get_msglevel = tun_get_msglevel, 3589 .set_msglevel = tun_set_msglevel, 3590 .get_link = ethtool_op_get_link, 3591 .get_channels = tun_get_channels, 3592 .get_ts_info = ethtool_op_get_ts_info, 3593 .get_coalesce = tun_get_coalesce, 3594 .set_coalesce = tun_set_coalesce, 3595 .get_link_ksettings = tun_get_link_ksettings, 3596 .set_link_ksettings = tun_set_link_ksettings, 3597 }; 3598 3599 static int tun_queue_resize(struct tun_struct *tun) 3600 { 3601 struct net_device *dev = tun->dev; 3602 struct tun_file *tfile; 3603 struct ptr_ring **rings; 3604 int n = tun->numqueues + tun->numdisabled; 3605 int ret, i; 3606 3607 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 3608 if (!rings) 3609 return -ENOMEM; 3610 3611 for (i = 0; i < tun->numqueues; i++) { 3612 tfile = rtnl_dereference(tun->tfiles[i]); 3613 rings[i] = &tfile->tx_ring; 3614 } 3615 list_for_each_entry(tfile, &tun->disabled, next) 3616 rings[i++] = &tfile->tx_ring; 3617 3618 ret = ptr_ring_resize_multiple_bh(rings, n, 3619 dev->tx_queue_len, GFP_KERNEL, 3620 tun_ptr_free); 3621 3622 kfree(rings); 3623 return ret; 3624 } 3625 3626 static int tun_device_event(struct notifier_block *unused, 3627 unsigned long event, void *ptr) 3628 { 3629 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3630 struct tun_struct *tun = netdev_priv(dev); 3631 int i; 3632 3633 if (dev->rtnl_link_ops != &tun_link_ops) 3634 return NOTIFY_DONE; 3635 3636 switch (event) { 3637 case NETDEV_CHANGE_TX_QUEUE_LEN: 3638 if (tun_queue_resize(tun)) 3639 return NOTIFY_BAD; 3640 break; 3641 case NETDEV_UP: 3642 for (i = 0; i < tun->numqueues; i++) { 3643 struct tun_file *tfile; 3644 3645 tfile = rtnl_dereference(tun->tfiles[i]); 3646 tfile->socket.sk->sk_write_space(tfile->socket.sk); 3647 } 3648 break; 3649 default: 3650 break; 3651 } 3652 3653 return NOTIFY_DONE; 3654 } 3655 3656 static struct notifier_block tun_notifier_block __read_mostly = { 3657 .notifier_call = tun_device_event, 3658 }; 3659 3660 static int __init tun_init(void) 3661 { 3662 int ret = 0; 3663 3664 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3665 3666 ret = rtnl_link_register(&tun_link_ops); 3667 if (ret) { 3668 pr_err("Can't register link_ops\n"); 3669 goto err_linkops; 3670 } 3671 3672 ret = misc_register(&tun_miscdev); 3673 if (ret) { 3674 pr_err("Can't register misc device %d\n", TUN_MINOR); 3675 goto err_misc; 3676 } 3677 3678 ret = register_netdevice_notifier(&tun_notifier_block); 3679 if (ret) { 3680 pr_err("Can't register netdevice notifier\n"); 3681 goto err_notifier; 3682 } 3683 3684 return 0; 3685 3686 err_notifier: 3687 misc_deregister(&tun_miscdev); 3688 err_misc: 3689 rtnl_link_unregister(&tun_link_ops); 3690 err_linkops: 3691 return ret; 3692 } 3693 3694 static void __exit tun_cleanup(void) 3695 { 3696 misc_deregister(&tun_miscdev); 3697 rtnl_link_unregister(&tun_link_ops); 3698 unregister_netdevice_notifier(&tun_notifier_block); 3699 } 3700 3701 /* Get an underlying socket object from tun file. Returns error unless file is 3702 * attached to a device. The returned object works like a packet socket, it 3703 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3704 * holding a reference to the file for as long as the socket is in use. */ 3705 struct socket *tun_get_socket(struct file *file) 3706 { 3707 struct tun_file *tfile; 3708 if (file->f_op != &tun_fops) 3709 return ERR_PTR(-EINVAL); 3710 tfile = file->private_data; 3711 if (!tfile) 3712 return ERR_PTR(-EBADFD); 3713 return &tfile->socket; 3714 } 3715 EXPORT_SYMBOL_GPL(tun_get_socket); 3716 3717 struct ptr_ring *tun_get_tx_ring(struct file *file) 3718 { 3719 struct tun_file *tfile; 3720 3721 if (file->f_op != &tun_fops) 3722 return ERR_PTR(-EINVAL); 3723 tfile = file->private_data; 3724 if (!tfile) 3725 return ERR_PTR(-EBADFD); 3726 return &tfile->tx_ring; 3727 } 3728 EXPORT_SYMBOL_GPL(tun_get_tx_ring); 3729 3730 module_init(tun_init); 3731 module_exit(tun_cleanup); 3732 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3733 MODULE_AUTHOR(DRV_COPYRIGHT); 3734 MODULE_LICENSE("GPL"); 3735 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3736 MODULE_ALIAS("devname:net/tun"); 3737