1 /* 2 * GENEVE: Generic Network Virtualization Encapsulation 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/netdevice.h> 16 #include <linux/etherdevice.h> 17 #include <linux/hash.h> 18 #include <net/dst_metadata.h> 19 #include <net/gro_cells.h> 20 #include <net/rtnetlink.h> 21 #include <net/geneve.h> 22 #include <net/protocol.h> 23 24 #define GENEVE_NETDEV_VER "0.6" 25 26 #define GENEVE_UDP_PORT 6081 27 28 #define GENEVE_N_VID (1u << 24) 29 #define GENEVE_VID_MASK (GENEVE_N_VID - 1) 30 31 #define VNI_HASH_BITS 10 32 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 33 34 static bool log_ecn_error = true; 35 module_param(log_ecn_error, bool, 0644); 36 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 37 38 #define GENEVE_VER 0 39 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) 40 41 /* per-network namespace private data for this module */ 42 struct geneve_net { 43 struct list_head geneve_list; 44 struct list_head sock_list; 45 }; 46 47 static int geneve_net_id; 48 49 /* Pseudo network device */ 50 struct geneve_dev { 51 struct hlist_node hlist; /* vni hash table */ 52 struct net *net; /* netns for packet i/o */ 53 struct net_device *dev; /* netdev for geneve tunnel */ 54 struct geneve_sock *sock; /* socket used for geneve tunnel */ 55 u8 vni[3]; /* virtual network ID for tunnel */ 56 u8 ttl; /* TTL override */ 57 u8 tos; /* TOS override */ 58 struct sockaddr_in remote; /* IPv4 address for link partner */ 59 struct list_head next; /* geneve's per namespace list */ 60 __be16 dst_port; 61 bool collect_md; 62 struct gro_cells gro_cells; 63 }; 64 65 struct geneve_sock { 66 bool collect_md; 67 struct list_head list; 68 struct socket *sock; 69 struct rcu_head rcu; 70 int refcnt; 71 struct udp_offload udp_offloads; 72 struct hlist_head vni_list[VNI_HASH_SIZE]; 73 }; 74 75 static inline __u32 geneve_net_vni_hash(u8 vni[3]) 76 { 77 __u32 vnid; 78 79 vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; 80 return hash_32(vnid, VNI_HASH_BITS); 81 } 82 83 static __be64 vni_to_tunnel_id(const __u8 *vni) 84 { 85 #ifdef __BIG_ENDIAN 86 return (vni[0] << 16) | (vni[1] << 8) | vni[2]; 87 #else 88 return (__force __be64)(((__force u64)vni[0] << 40) | 89 ((__force u64)vni[1] << 48) | 90 ((__force u64)vni[2] << 56)); 91 #endif 92 } 93 94 static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, 95 __be32 addr, u8 vni[]) 96 { 97 struct hlist_head *vni_list_head; 98 struct geneve_dev *geneve; 99 __u32 hash; 100 101 /* Find the device for this VNI */ 102 hash = geneve_net_vni_hash(vni); 103 vni_list_head = &gs->vni_list[hash]; 104 hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { 105 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 106 addr == geneve->remote.sin_addr.s_addr) 107 return geneve; 108 } 109 return NULL; 110 } 111 112 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) 113 { 114 return (struct genevehdr *)(udp_hdr(skb) + 1); 115 } 116 117 /* geneve receive/decap routine */ 118 static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) 119 { 120 struct genevehdr *gnvh = geneve_hdr(skb); 121 struct metadata_dst *tun_dst = NULL; 122 struct geneve_dev *geneve = NULL; 123 struct pcpu_sw_netstats *stats; 124 struct iphdr *iph; 125 u8 *vni; 126 __be32 addr; 127 int err; 128 129 iph = ip_hdr(skb); /* outer IP header... */ 130 131 if (gs->collect_md) { 132 static u8 zero_vni[3]; 133 134 vni = zero_vni; 135 addr = 0; 136 } else { 137 vni = gnvh->vni; 138 addr = iph->saddr; 139 } 140 141 geneve = geneve_lookup(gs, addr, vni); 142 if (!geneve) 143 goto drop; 144 145 if (ip_tunnel_collect_metadata() || gs->collect_md) { 146 __be16 flags; 147 148 flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT | 149 (gnvh->oam ? TUNNEL_OAM : 0) | 150 (gnvh->critical ? TUNNEL_CRIT_OPT : 0); 151 152 tun_dst = udp_tun_rx_dst(skb, AF_INET, flags, 153 vni_to_tunnel_id(gnvh->vni), 154 gnvh->opt_len * 4); 155 if (!tun_dst) 156 goto drop; 157 /* Update tunnel dst according to Geneve options. */ 158 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 159 gnvh->options, gnvh->opt_len * 4); 160 } else { 161 /* Drop packets w/ critical options, 162 * since we don't support any... 163 */ 164 if (gnvh->critical) 165 goto drop; 166 } 167 168 skb_reset_mac_header(skb); 169 skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); 170 skb->protocol = eth_type_trans(skb, geneve->dev); 171 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 172 173 if (tun_dst) 174 skb_dst_set(skb, &tun_dst->dst); 175 176 /* Ignore packet loops (and multicast echo) */ 177 if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) 178 goto drop; 179 180 skb_reset_network_header(skb); 181 182 err = IP_ECN_decapsulate(iph, skb); 183 184 if (unlikely(err)) { 185 if (log_ecn_error) 186 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 187 &iph->saddr, iph->tos); 188 if (err > 1) { 189 ++geneve->dev->stats.rx_frame_errors; 190 ++geneve->dev->stats.rx_errors; 191 goto drop; 192 } 193 } 194 195 stats = this_cpu_ptr(geneve->dev->tstats); 196 u64_stats_update_begin(&stats->syncp); 197 stats->rx_packets++; 198 stats->rx_bytes += skb->len; 199 u64_stats_update_end(&stats->syncp); 200 201 gro_cells_receive(&geneve->gro_cells, skb); 202 return; 203 drop: 204 /* Consume bad packet */ 205 kfree_skb(skb); 206 } 207 208 /* Setup stats when device is created */ 209 static int geneve_init(struct net_device *dev) 210 { 211 struct geneve_dev *geneve = netdev_priv(dev); 212 int err; 213 214 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 215 if (!dev->tstats) 216 return -ENOMEM; 217 218 err = gro_cells_init(&geneve->gro_cells, dev); 219 if (err) { 220 free_percpu(dev->tstats); 221 return err; 222 } 223 224 return 0; 225 } 226 227 static void geneve_uninit(struct net_device *dev) 228 { 229 struct geneve_dev *geneve = netdev_priv(dev); 230 231 gro_cells_destroy(&geneve->gro_cells); 232 free_percpu(dev->tstats); 233 } 234 235 /* Callback from net/ipv4/udp.c to receive packets */ 236 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 237 { 238 struct genevehdr *geneveh; 239 struct geneve_sock *gs; 240 int opts_len; 241 242 /* Need Geneve and inner Ethernet header to be present */ 243 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) 244 goto error; 245 246 /* Return packets with reserved bits set */ 247 geneveh = geneve_hdr(skb); 248 if (unlikely(geneveh->ver != GENEVE_VER)) 249 goto error; 250 251 if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) 252 goto error; 253 254 opts_len = geneveh->opt_len * 4; 255 if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, 256 htons(ETH_P_TEB))) 257 goto drop; 258 259 gs = rcu_dereference_sk_user_data(sk); 260 if (!gs) 261 goto drop; 262 263 geneve_rx(gs, skb); 264 return 0; 265 266 drop: 267 /* Consume bad packet */ 268 kfree_skb(skb); 269 return 0; 270 271 error: 272 /* Let the UDP layer deal with the skb */ 273 return 1; 274 } 275 276 static struct socket *geneve_create_sock(struct net *net, bool ipv6, 277 __be16 port) 278 { 279 struct socket *sock; 280 struct udp_port_cfg udp_conf; 281 int err; 282 283 memset(&udp_conf, 0, sizeof(udp_conf)); 284 285 if (ipv6) { 286 udp_conf.family = AF_INET6; 287 } else { 288 udp_conf.family = AF_INET; 289 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 290 } 291 292 udp_conf.local_udp_port = port; 293 294 /* Open UDP socket */ 295 err = udp_sock_create(net, &udp_conf, &sock); 296 if (err < 0) 297 return ERR_PTR(err); 298 299 return sock; 300 } 301 302 static void geneve_notify_add_rx_port(struct geneve_sock *gs) 303 { 304 struct sock *sk = gs->sock->sk; 305 sa_family_t sa_family = sk->sk_family; 306 int err; 307 308 if (sa_family == AF_INET) { 309 err = udp_add_offload(&gs->udp_offloads); 310 if (err) 311 pr_warn("geneve: udp_add_offload failed with status %d\n", 312 err); 313 } 314 } 315 316 static int geneve_hlen(struct genevehdr *gh) 317 { 318 return sizeof(*gh) + gh->opt_len * 4; 319 } 320 321 static struct sk_buff **geneve_gro_receive(struct sk_buff **head, 322 struct sk_buff *skb, 323 struct udp_offload *uoff) 324 { 325 struct sk_buff *p, **pp = NULL; 326 struct genevehdr *gh, *gh2; 327 unsigned int hlen, gh_len, off_gnv; 328 const struct packet_offload *ptype; 329 __be16 type; 330 int flush = 1; 331 332 off_gnv = skb_gro_offset(skb); 333 hlen = off_gnv + sizeof(*gh); 334 gh = skb_gro_header_fast(skb, off_gnv); 335 if (skb_gro_header_hard(skb, hlen)) { 336 gh = skb_gro_header_slow(skb, hlen, off_gnv); 337 if (unlikely(!gh)) 338 goto out; 339 } 340 341 if (gh->ver != GENEVE_VER || gh->oam) 342 goto out; 343 gh_len = geneve_hlen(gh); 344 345 hlen = off_gnv + gh_len; 346 if (skb_gro_header_hard(skb, hlen)) { 347 gh = skb_gro_header_slow(skb, hlen, off_gnv); 348 if (unlikely(!gh)) 349 goto out; 350 } 351 352 flush = 0; 353 354 for (p = *head; p; p = p->next) { 355 if (!NAPI_GRO_CB(p)->same_flow) 356 continue; 357 358 gh2 = (struct genevehdr *)(p->data + off_gnv); 359 if (gh->opt_len != gh2->opt_len || 360 memcmp(gh, gh2, gh_len)) { 361 NAPI_GRO_CB(p)->same_flow = 0; 362 continue; 363 } 364 } 365 366 type = gh->proto_type; 367 368 rcu_read_lock(); 369 ptype = gro_find_receive_by_type(type); 370 if (!ptype) { 371 flush = 1; 372 goto out_unlock; 373 } 374 375 skb_gro_pull(skb, gh_len); 376 skb_gro_postpull_rcsum(skb, gh, gh_len); 377 pp = ptype->callbacks.gro_receive(head, skb); 378 379 out_unlock: 380 rcu_read_unlock(); 381 out: 382 NAPI_GRO_CB(skb)->flush |= flush; 383 384 return pp; 385 } 386 387 static int geneve_gro_complete(struct sk_buff *skb, int nhoff, 388 struct udp_offload *uoff) 389 { 390 struct genevehdr *gh; 391 struct packet_offload *ptype; 392 __be16 type; 393 int gh_len; 394 int err = -ENOSYS; 395 396 udp_tunnel_gro_complete(skb, nhoff); 397 398 gh = (struct genevehdr *)(skb->data + nhoff); 399 gh_len = geneve_hlen(gh); 400 type = gh->proto_type; 401 402 rcu_read_lock(); 403 ptype = gro_find_complete_by_type(type); 404 if (ptype) 405 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 406 407 rcu_read_unlock(); 408 return err; 409 } 410 411 /* Create new listen socket if needed */ 412 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, 413 bool ipv6) 414 { 415 struct geneve_net *gn = net_generic(net, geneve_net_id); 416 struct geneve_sock *gs; 417 struct socket *sock; 418 struct udp_tunnel_sock_cfg tunnel_cfg; 419 int h; 420 421 gs = kzalloc(sizeof(*gs), GFP_KERNEL); 422 if (!gs) 423 return ERR_PTR(-ENOMEM); 424 425 sock = geneve_create_sock(net, ipv6, port); 426 if (IS_ERR(sock)) { 427 kfree(gs); 428 return ERR_CAST(sock); 429 } 430 431 gs->sock = sock; 432 gs->refcnt = 1; 433 for (h = 0; h < VNI_HASH_SIZE; ++h) 434 INIT_HLIST_HEAD(&gs->vni_list[h]); 435 436 /* Initialize the geneve udp offloads structure */ 437 gs->udp_offloads.port = port; 438 gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive; 439 gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete; 440 geneve_notify_add_rx_port(gs); 441 442 /* Mark socket as an encapsulation socket */ 443 tunnel_cfg.sk_user_data = gs; 444 tunnel_cfg.encap_type = 1; 445 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 446 tunnel_cfg.encap_destroy = NULL; 447 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 448 list_add(&gs->list, &gn->sock_list); 449 return gs; 450 } 451 452 static void geneve_notify_del_rx_port(struct geneve_sock *gs) 453 { 454 struct sock *sk = gs->sock->sk; 455 sa_family_t sa_family = sk->sk_family; 456 457 if (sa_family == AF_INET) 458 udp_del_offload(&gs->udp_offloads); 459 } 460 461 static void geneve_sock_release(struct geneve_sock *gs) 462 { 463 if (--gs->refcnt) 464 return; 465 466 list_del(&gs->list); 467 geneve_notify_del_rx_port(gs); 468 udp_tunnel_sock_release(gs->sock); 469 kfree_rcu(gs, rcu); 470 } 471 472 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, 473 __be16 dst_port) 474 { 475 struct geneve_sock *gs; 476 477 list_for_each_entry(gs, &gn->sock_list, list) { 478 if (inet_sk(gs->sock->sk)->inet_sport == dst_port && 479 inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) { 480 return gs; 481 } 482 } 483 return NULL; 484 } 485 486 static int geneve_open(struct net_device *dev) 487 { 488 struct geneve_dev *geneve = netdev_priv(dev); 489 struct net *net = geneve->net; 490 struct geneve_net *gn = net_generic(net, geneve_net_id); 491 struct geneve_sock *gs; 492 __u32 hash; 493 494 gs = geneve_find_sock(gn, geneve->dst_port); 495 if (gs) { 496 gs->refcnt++; 497 goto out; 498 } 499 500 gs = geneve_socket_create(net, geneve->dst_port, false); 501 if (IS_ERR(gs)) 502 return PTR_ERR(gs); 503 504 out: 505 gs->collect_md = geneve->collect_md; 506 geneve->sock = gs; 507 508 hash = geneve_net_vni_hash(geneve->vni); 509 hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); 510 return 0; 511 } 512 513 static int geneve_stop(struct net_device *dev) 514 { 515 struct geneve_dev *geneve = netdev_priv(dev); 516 struct geneve_sock *gs = geneve->sock; 517 518 if (!hlist_unhashed(&geneve->hlist)) 519 hlist_del_rcu(&geneve->hlist); 520 geneve_sock_release(gs); 521 return 0; 522 } 523 524 static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, 525 __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt, 526 bool csum) 527 { 528 struct genevehdr *gnvh; 529 int min_headroom; 530 int err; 531 532 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 533 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); 534 err = skb_cow_head(skb, min_headroom); 535 if (unlikely(err)) { 536 kfree_skb(skb); 537 goto free_rt; 538 } 539 540 skb = udp_tunnel_handle_offloads(skb, csum); 541 if (IS_ERR(skb)) { 542 err = PTR_ERR(skb); 543 goto free_rt; 544 } 545 546 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 547 gnvh->ver = GENEVE_VER; 548 gnvh->opt_len = opt_len / 4; 549 gnvh->oam = !!(tun_flags & TUNNEL_OAM); 550 gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT); 551 gnvh->rsvd1 = 0; 552 memcpy(gnvh->vni, vni, 3); 553 gnvh->proto_type = htons(ETH_P_TEB); 554 gnvh->rsvd2 = 0; 555 memcpy(gnvh->options, opt, opt_len); 556 557 skb_set_inner_protocol(skb, htons(ETH_P_TEB)); 558 return 0; 559 560 free_rt: 561 ip_rt_put(rt); 562 return err; 563 } 564 565 static struct rtable *geneve_get_rt(struct sk_buff *skb, 566 struct net_device *dev, 567 struct flowi4 *fl4, 568 struct ip_tunnel_info *info) 569 { 570 struct geneve_dev *geneve = netdev_priv(dev); 571 struct rtable *rt = NULL; 572 __u8 tos; 573 574 memset(fl4, 0, sizeof(*fl4)); 575 fl4->flowi4_mark = skb->mark; 576 fl4->flowi4_proto = IPPROTO_UDP; 577 578 if (info) { 579 fl4->daddr = info->key.u.ipv4.dst; 580 fl4->saddr = info->key.u.ipv4.src; 581 fl4->flowi4_tos = RT_TOS(info->key.tos); 582 } else { 583 tos = geneve->tos; 584 if (tos == 1) { 585 const struct iphdr *iip = ip_hdr(skb); 586 587 tos = ip_tunnel_get_dsfield(iip, skb); 588 } 589 590 fl4->flowi4_tos = RT_TOS(tos); 591 fl4->daddr = geneve->remote.sin_addr.s_addr; 592 } 593 594 rt = ip_route_output_key(geneve->net, fl4); 595 if (IS_ERR(rt)) { 596 netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr); 597 return ERR_PTR(-ENETUNREACH); 598 } 599 if (rt->dst.dev == dev) { /* is this necessary? */ 600 netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr); 601 ip_rt_put(rt); 602 return ERR_PTR(-ELOOP); 603 } 604 return rt; 605 } 606 607 /* Convert 64 bit tunnel ID to 24 bit VNI. */ 608 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) 609 { 610 #ifdef __BIG_ENDIAN 611 vni[0] = (__force __u8)(tun_id >> 16); 612 vni[1] = (__force __u8)(tun_id >> 8); 613 vni[2] = (__force __u8)tun_id; 614 #else 615 vni[0] = (__force __u8)((__force u64)tun_id >> 40); 616 vni[1] = (__force __u8)((__force u64)tun_id >> 48); 617 vni[2] = (__force __u8)((__force u64)tun_id >> 56); 618 #endif 619 } 620 621 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) 622 { 623 struct geneve_dev *geneve = netdev_priv(dev); 624 struct geneve_sock *gs = geneve->sock; 625 struct ip_tunnel_info *info = NULL; 626 struct rtable *rt = NULL; 627 const struct iphdr *iip; /* interior IP header */ 628 int err = -EINVAL; 629 struct flowi4 fl4; 630 __u8 tos, ttl; 631 __be16 sport; 632 bool udp_csum; 633 __be16 df; 634 635 if (geneve->collect_md) { 636 info = skb_tunnel_info(skb); 637 if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) { 638 netdev_dbg(dev, "no tunnel metadata\n"); 639 goto tx_error; 640 } 641 if (info && ip_tunnel_info_af(info) != AF_INET) 642 goto tx_error; 643 } 644 645 rt = geneve_get_rt(skb, dev, &fl4, info); 646 if (IS_ERR(rt)) { 647 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr); 648 err = PTR_ERR(rt); 649 goto tx_error; 650 } 651 652 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 653 skb_reset_mac_header(skb); 654 655 iip = ip_hdr(skb); 656 657 if (info) { 658 const struct ip_tunnel_key *key = &info->key; 659 u8 *opts = NULL; 660 u8 vni[3]; 661 662 tunnel_id_to_vni(key->tun_id, vni); 663 if (key->tun_flags & TUNNEL_GENEVE_OPT) 664 opts = ip_tunnel_info_opts(info); 665 666 udp_csum = !!(key->tun_flags & TUNNEL_CSUM); 667 err = geneve_build_skb(rt, skb, key->tun_flags, vni, 668 info->options_len, opts, udp_csum); 669 if (unlikely(err)) 670 goto err; 671 672 tos = ip_tunnel_ecn_encap(key->tos, iip, skb); 673 ttl = key->ttl; 674 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 675 } else { 676 udp_csum = false; 677 err = geneve_build_skb(rt, skb, 0, geneve->vni, 678 0, NULL, udp_csum); 679 if (unlikely(err)) 680 goto err; 681 682 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 683 ttl = geneve->ttl; 684 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 685 ttl = 1; 686 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 687 df = 0; 688 } 689 err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr, 690 tos, ttl, df, sport, geneve->dst_port, 691 !net_eq(geneve->net, dev_net(geneve->dev)), 692 !udp_csum); 693 694 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 695 return NETDEV_TX_OK; 696 697 tx_error: 698 dev_kfree_skb(skb); 699 err: 700 if (err == -ELOOP) 701 dev->stats.collisions++; 702 else if (err == -ENETUNREACH) 703 dev->stats.tx_carrier_errors++; 704 else 705 dev->stats.tx_errors++; 706 return NETDEV_TX_OK; 707 } 708 709 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 710 { 711 struct ip_tunnel_info *info = skb_tunnel_info(skb); 712 struct geneve_dev *geneve = netdev_priv(dev); 713 struct rtable *rt; 714 struct flowi4 fl4; 715 716 if (ip_tunnel_info_af(info) != AF_INET) 717 return -EINVAL; 718 719 rt = geneve_get_rt(skb, dev, &fl4, info); 720 if (IS_ERR(rt)) 721 return PTR_ERR(rt); 722 723 ip_rt_put(rt); 724 info->key.u.ipv4.src = fl4.saddr; 725 info->key.tp_src = udp_flow_src_port(geneve->net, skb, 726 1, USHRT_MAX, true); 727 info->key.tp_dst = geneve->dst_port; 728 return 0; 729 } 730 731 static const struct net_device_ops geneve_netdev_ops = { 732 .ndo_init = geneve_init, 733 .ndo_uninit = geneve_uninit, 734 .ndo_open = geneve_open, 735 .ndo_stop = geneve_stop, 736 .ndo_start_xmit = geneve_xmit, 737 .ndo_get_stats64 = ip_tunnel_get_stats64, 738 .ndo_change_mtu = eth_change_mtu, 739 .ndo_validate_addr = eth_validate_addr, 740 .ndo_set_mac_address = eth_mac_addr, 741 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 742 }; 743 744 static void geneve_get_drvinfo(struct net_device *dev, 745 struct ethtool_drvinfo *drvinfo) 746 { 747 strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); 748 strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); 749 } 750 751 static const struct ethtool_ops geneve_ethtool_ops = { 752 .get_drvinfo = geneve_get_drvinfo, 753 .get_link = ethtool_op_get_link, 754 }; 755 756 /* Info for udev, that this is a virtual tunnel endpoint */ 757 static struct device_type geneve_type = { 758 .name = "geneve", 759 }; 760 761 /* Initialize the device structure. */ 762 static void geneve_setup(struct net_device *dev) 763 { 764 ether_setup(dev); 765 766 dev->netdev_ops = &geneve_netdev_ops; 767 dev->ethtool_ops = &geneve_ethtool_ops; 768 dev->destructor = free_netdev; 769 770 SET_NETDEV_DEVTYPE(dev, &geneve_type); 771 772 dev->features |= NETIF_F_LLTX; 773 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 774 dev->features |= NETIF_F_RXCSUM; 775 dev->features |= NETIF_F_GSO_SOFTWARE; 776 777 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 778 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 779 780 netif_keep_dst(dev); 781 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 782 eth_hw_addr_random(dev); 783 } 784 785 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 786 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 787 [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 788 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 789 [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, 790 [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, 791 [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, 792 }; 793 794 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) 795 { 796 if (tb[IFLA_ADDRESS]) { 797 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 798 return -EINVAL; 799 800 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 801 return -EADDRNOTAVAIL; 802 } 803 804 if (!data) 805 return -EINVAL; 806 807 if (data[IFLA_GENEVE_ID]) { 808 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 809 810 if (vni >= GENEVE_VID_MASK) 811 return -ERANGE; 812 } 813 814 return 0; 815 } 816 817 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, 818 __be16 dst_port, 819 __be32 rem_addr, 820 u8 vni[], 821 bool *tun_on_same_port, 822 bool *tun_collect_md) 823 { 824 struct geneve_dev *geneve, *t; 825 826 *tun_on_same_port = false; 827 *tun_collect_md = false; 828 t = NULL; 829 list_for_each_entry(geneve, &gn->geneve_list, next) { 830 if (geneve->dst_port == dst_port) { 831 *tun_collect_md = geneve->collect_md; 832 *tun_on_same_port = true; 833 } 834 if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) && 835 rem_addr == geneve->remote.sin_addr.s_addr && 836 dst_port == geneve->dst_port) 837 t = geneve; 838 } 839 return t; 840 } 841 842 static int geneve_configure(struct net *net, struct net_device *dev, 843 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 844 __be16 dst_port, bool metadata) 845 { 846 struct geneve_net *gn = net_generic(net, geneve_net_id); 847 struct geneve_dev *t, *geneve = netdev_priv(dev); 848 bool tun_collect_md, tun_on_same_port; 849 int err; 850 851 if (metadata) { 852 if (rem_addr || vni || tos || ttl) 853 return -EINVAL; 854 } 855 856 geneve->net = net; 857 geneve->dev = dev; 858 859 geneve->vni[0] = (vni & 0x00ff0000) >> 16; 860 geneve->vni[1] = (vni & 0x0000ff00) >> 8; 861 geneve->vni[2] = vni & 0x000000ff; 862 863 geneve->remote.sin_addr.s_addr = rem_addr; 864 if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr))) 865 return -EINVAL; 866 867 geneve->ttl = ttl; 868 geneve->tos = tos; 869 geneve->dst_port = dst_port; 870 geneve->collect_md = metadata; 871 872 t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni, 873 &tun_on_same_port, &tun_collect_md); 874 if (t) 875 return -EBUSY; 876 877 if (metadata) { 878 if (tun_on_same_port) 879 return -EPERM; 880 } else { 881 if (tun_collect_md) 882 return -EPERM; 883 } 884 885 err = register_netdevice(dev); 886 if (err) 887 return err; 888 889 list_add(&geneve->next, &gn->geneve_list); 890 return 0; 891 } 892 893 static int geneve_newlink(struct net *net, struct net_device *dev, 894 struct nlattr *tb[], struct nlattr *data[]) 895 { 896 __be16 dst_port = htons(GENEVE_UDP_PORT); 897 __u8 ttl = 0, tos = 0; 898 bool metadata = false; 899 __be32 rem_addr = 0; 900 __u32 vni = 0; 901 902 if (data[IFLA_GENEVE_ID]) 903 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 904 905 if (data[IFLA_GENEVE_REMOTE]) 906 rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 907 908 if (data[IFLA_GENEVE_TTL]) 909 ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 910 911 if (data[IFLA_GENEVE_TOS]) 912 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 913 914 if (data[IFLA_GENEVE_PORT]) 915 dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); 916 917 if (data[IFLA_GENEVE_COLLECT_METADATA]) 918 metadata = true; 919 920 return geneve_configure(net, dev, rem_addr, vni, 921 ttl, tos, dst_port, metadata); 922 } 923 924 static void geneve_dellink(struct net_device *dev, struct list_head *head) 925 { 926 struct geneve_dev *geneve = netdev_priv(dev); 927 928 list_del(&geneve->next); 929 unregister_netdevice_queue(dev, head); 930 } 931 932 static size_t geneve_get_size(const struct net_device *dev) 933 { 934 return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ 935 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 936 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 937 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 938 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ 939 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 940 0; 941 } 942 943 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) 944 { 945 struct geneve_dev *geneve = netdev_priv(dev); 946 __u32 vni; 947 948 vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; 949 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 950 goto nla_put_failure; 951 952 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 953 geneve->remote.sin_addr.s_addr)) 954 goto nla_put_failure; 955 956 if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || 957 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 958 goto nla_put_failure; 959 960 if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) 961 goto nla_put_failure; 962 963 if (geneve->collect_md) { 964 if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) 965 goto nla_put_failure; 966 } 967 968 return 0; 969 970 nla_put_failure: 971 return -EMSGSIZE; 972 } 973 974 static struct rtnl_link_ops geneve_link_ops __read_mostly = { 975 .kind = "geneve", 976 .maxtype = IFLA_GENEVE_MAX, 977 .policy = geneve_policy, 978 .priv_size = sizeof(struct geneve_dev), 979 .setup = geneve_setup, 980 .validate = geneve_validate, 981 .newlink = geneve_newlink, 982 .dellink = geneve_dellink, 983 .get_size = geneve_get_size, 984 .fill_info = geneve_fill_info, 985 }; 986 987 struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 988 u8 name_assign_type, u16 dst_port) 989 { 990 struct nlattr *tb[IFLA_MAX + 1]; 991 struct net_device *dev; 992 int err; 993 994 memset(tb, 0, sizeof(tb)); 995 dev = rtnl_create_link(net, name, name_assign_type, 996 &geneve_link_ops, tb); 997 if (IS_ERR(dev)) 998 return dev; 999 1000 err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true); 1001 if (err) { 1002 free_netdev(dev); 1003 return ERR_PTR(err); 1004 } 1005 return dev; 1006 } 1007 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1008 1009 static __net_init int geneve_init_net(struct net *net) 1010 { 1011 struct geneve_net *gn = net_generic(net, geneve_net_id); 1012 1013 INIT_LIST_HEAD(&gn->geneve_list); 1014 INIT_LIST_HEAD(&gn->sock_list); 1015 return 0; 1016 } 1017 1018 static void __net_exit geneve_exit_net(struct net *net) 1019 { 1020 struct geneve_net *gn = net_generic(net, geneve_net_id); 1021 struct geneve_dev *geneve, *next; 1022 struct net_device *dev, *aux; 1023 LIST_HEAD(list); 1024 1025 rtnl_lock(); 1026 1027 /* gather any geneve devices that were moved into this ns */ 1028 for_each_netdev_safe(net, dev, aux) 1029 if (dev->rtnl_link_ops == &geneve_link_ops) 1030 unregister_netdevice_queue(dev, &list); 1031 1032 /* now gather any other geneve devices that were created in this ns */ 1033 list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { 1034 /* If geneve->dev is in the same netns, it was already added 1035 * to the list by the previous loop. 1036 */ 1037 if (!net_eq(dev_net(geneve->dev), net)) 1038 unregister_netdevice_queue(geneve->dev, &list); 1039 } 1040 1041 /* unregister the devices gathered above */ 1042 unregister_netdevice_many(&list); 1043 rtnl_unlock(); 1044 } 1045 1046 static struct pernet_operations geneve_net_ops = { 1047 .init = geneve_init_net, 1048 .exit = geneve_exit_net, 1049 .id = &geneve_net_id, 1050 .size = sizeof(struct geneve_net), 1051 }; 1052 1053 static int __init geneve_init_module(void) 1054 { 1055 int rc; 1056 1057 rc = register_pernet_subsys(&geneve_net_ops); 1058 if (rc) 1059 goto out1; 1060 1061 rc = rtnl_link_register(&geneve_link_ops); 1062 if (rc) 1063 goto out2; 1064 1065 return 0; 1066 out2: 1067 unregister_pernet_subsys(&geneve_net_ops); 1068 out1: 1069 return rc; 1070 } 1071 late_initcall(geneve_init_module); 1072 1073 static void __exit geneve_cleanup_module(void) 1074 { 1075 rtnl_link_unregister(&geneve_link_ops); 1076 unregister_pernet_subsys(&geneve_net_ops); 1077 } 1078 module_exit(geneve_cleanup_module); 1079 1080 MODULE_LICENSE("GPL"); 1081 MODULE_VERSION(GENEVE_NETDEV_VER); 1082 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); 1083 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); 1084 MODULE_ALIAS_RTNL_LINK("geneve"); 1085