1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * GENEVE: Generic Network Virtualization Encapsulation 4 * 5 * Copyright (c) 2015 Red Hat, Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/ethtool.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/etherdevice.h> 14 #include <linux/hash.h> 15 #include <net/dst_metadata.h> 16 #include <net/gro_cells.h> 17 #include <net/rtnetlink.h> 18 #include <net/geneve.h> 19 #include <net/gro.h> 20 #include <net/netdev_lock.h> 21 #include <net/protocol.h> 22 23 #define GENEVE_NETDEV_VER "0.6" 24 25 #define GENEVE_N_VID (1u << 24) 26 #define GENEVE_VID_MASK (GENEVE_N_VID - 1) 27 28 #define VNI_HASH_BITS 10 29 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 30 31 static bool log_ecn_error = true; 32 module_param(log_ecn_error, bool, 0644); 33 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 34 35 #define GENEVE_VER 0 36 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) 37 #define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN) 38 #define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN) 39 40 #define GENEVE_OPT_NETDEV_CLASS 0x100 41 #define GENEVE_OPT_GRO_HINT_SIZE 8 42 #define GENEVE_OPT_GRO_HINT_TYPE 1 43 #define GENEVE_OPT_GRO_HINT_LEN 1 44 45 struct geneve_opt_gro_hint { 46 u8 inner_proto_id:2, 47 nested_is_v6:1; 48 u8 nested_nh_offset; 49 u8 nested_tp_offset; 50 u8 nested_hdr_len; 51 }; 52 53 struct geneve_skb_cb { 54 unsigned int gro_hint_len; 55 struct geneve_opt_gro_hint gro_hint; 56 }; 57 58 #define GENEVE_SKB_CB(__skb) ((struct geneve_skb_cb *)&((__skb)->cb[0])) 59 60 /* per-network namespace private data for this module */ 61 struct geneve_net { 62 struct list_head geneve_list; 63 /* sock_list is protected by rtnl lock */ 64 struct list_head sock_list; 65 }; 66 67 static unsigned int geneve_net_id; 68 69 struct geneve_dev_node { 70 struct hlist_node hlist; 71 struct geneve_dev *geneve; 72 }; 73 74 struct geneve_config { 75 bool collect_md; 76 bool use_udp6_rx_checksums; 77 bool ttl_inherit; 78 bool gro_hint; 79 enum ifla_geneve_df df; 80 bool inner_proto_inherit; 81 u16 port_min; 82 u16 port_max; 83 84 /* Must be last --ends in a flexible-array member. */ 85 struct ip_tunnel_info info; 86 }; 87 88 /* Pseudo network device */ 89 struct geneve_dev { 90 struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */ 91 #if IS_ENABLED(CONFIG_IPV6) 92 struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */ 93 #endif 94 struct net *net; /* netns for packet i/o */ 95 struct net_device *dev; /* netdev for geneve tunnel */ 96 struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ 97 #if IS_ENABLED(CONFIG_IPV6) 98 struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ 99 #endif 100 struct list_head next; /* geneve's per namespace list */ 101 struct gro_cells gro_cells; 102 struct geneve_config cfg; 103 }; 104 105 struct geneve_sock { 106 bool collect_md; 107 bool gro_hint; 108 struct list_head list; 109 struct socket *sock; 110 struct rcu_head rcu; 111 int refcnt; 112 struct hlist_head vni_list[VNI_HASH_SIZE]; 113 }; 114 115 static const __be16 proto_id_map[] = { htons(ETH_P_TEB), 116 htons(ETH_P_IPV6), 117 htons(ETH_P_IP) }; 118 119 static int proto_to_id(__be16 proto) 120 { 121 int i; 122 123 for (i = 0; i < ARRAY_SIZE(proto_id_map); i++) 124 if (proto_id_map[i] == proto) 125 return i; 126 127 return -1; 128 } 129 130 static inline __u32 geneve_net_vni_hash(u8 vni[3]) 131 { 132 __u32 vnid; 133 134 vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2]; 135 return hash_32(vnid, VNI_HASH_BITS); 136 } 137 138 static __be64 vni_to_tunnel_id(const __u8 *vni) 139 { 140 #ifdef __BIG_ENDIAN 141 return (vni[0] << 16) | (vni[1] << 8) | vni[2]; 142 #else 143 return (__force __be64)(((__force u64)vni[0] << 40) | 144 ((__force u64)vni[1] << 48) | 145 ((__force u64)vni[2] << 56)); 146 #endif 147 } 148 149 /* Convert 64 bit tunnel ID to 24 bit VNI. */ 150 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) 151 { 152 #ifdef __BIG_ENDIAN 153 vni[0] = (__force __u8)(tun_id >> 16); 154 vni[1] = (__force __u8)(tun_id >> 8); 155 vni[2] = (__force __u8)tun_id; 156 #else 157 vni[0] = (__force __u8)((__force u64)tun_id >> 40); 158 vni[1] = (__force __u8)((__force u64)tun_id >> 48); 159 vni[2] = (__force __u8)((__force u64)tun_id >> 56); 160 #endif 161 } 162 163 static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 164 { 165 return !memcmp(vni, &tun_id[5], 3); 166 } 167 168 static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 169 { 170 return gs->sock->sk->sk_family; 171 } 172 173 static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, 174 __be32 addr, u8 vni[]) 175 { 176 struct hlist_head *vni_list_head; 177 struct geneve_dev_node *node; 178 __u32 hash; 179 180 /* Find the device for this VNI */ 181 hash = geneve_net_vni_hash(vni); 182 vni_list_head = &gs->vni_list[hash]; 183 hlist_for_each_entry_rcu(node, vni_list_head, hlist) { 184 if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && 185 addr == node->geneve->cfg.info.key.u.ipv4.dst) 186 return node->geneve; 187 } 188 return NULL; 189 } 190 191 #if IS_ENABLED(CONFIG_IPV6) 192 static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, 193 struct in6_addr addr6, u8 vni[]) 194 { 195 struct hlist_head *vni_list_head; 196 struct geneve_dev_node *node; 197 __u32 hash; 198 199 /* Find the device for this VNI */ 200 hash = geneve_net_vni_hash(vni); 201 vni_list_head = &gs->vni_list[hash]; 202 hlist_for_each_entry_rcu(node, vni_list_head, hlist) { 203 if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && 204 ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst)) 205 return node->geneve; 206 } 207 return NULL; 208 } 209 #endif 210 211 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) 212 { 213 return (struct genevehdr *)(udp_hdr(skb) + 1); 214 } 215 216 static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, 217 struct sk_buff *skb) 218 { 219 static u8 zero_vni[3]; 220 u8 *vni; 221 222 if (geneve_get_sk_family(gs) == AF_INET) { 223 struct iphdr *iph; 224 __be32 addr; 225 226 iph = ip_hdr(skb); /* outer IP header... */ 227 228 if (gs->collect_md) { 229 vni = zero_vni; 230 addr = 0; 231 } else { 232 vni = geneve_hdr(skb)->vni; 233 addr = iph->saddr; 234 } 235 236 return geneve_lookup(gs, addr, vni); 237 #if IS_ENABLED(CONFIG_IPV6) 238 } else if (geneve_get_sk_family(gs) == AF_INET6) { 239 static struct in6_addr zero_addr6; 240 struct ipv6hdr *ip6h; 241 struct in6_addr addr6; 242 243 ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ 244 245 if (gs->collect_md) { 246 vni = zero_vni; 247 addr6 = zero_addr6; 248 } else { 249 vni = geneve_hdr(skb)->vni; 250 addr6 = ip6h->saddr; 251 } 252 253 return geneve6_lookup(gs, addr6, vni); 254 #endif 255 } 256 return NULL; 257 } 258 259 /* geneve receive/decap routine */ 260 static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, 261 struct sk_buff *skb, const struct genevehdr *gnvh) 262 { 263 struct metadata_dst *tun_dst = NULL; 264 unsigned int len; 265 int nh, err = 0; 266 void *oiph; 267 268 if (ip_tunnel_collect_metadata() || gs->collect_md) { 269 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 270 271 __set_bit(IP_TUNNEL_KEY_BIT, flags); 272 __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam); 273 __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical); 274 275 tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, 276 vni_to_tunnel_id(gnvh->vni), 277 gnvh->opt_len * 4); 278 if (!tun_dst) { 279 dev_dstats_rx_dropped(geneve->dev); 280 goto drop; 281 } 282 /* Update tunnel dst according to Geneve options. */ 283 ip_tunnel_flags_zero(flags); 284 __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags); 285 ip_tunnel_info_opts_set(&tun_dst->u.tun_info, 286 gnvh->options, gnvh->opt_len * 4, 287 flags); 288 } else { 289 /* Drop packets w/ critical options, 290 * since we don't support any... 291 */ 292 if (gnvh->critical) { 293 DEV_STATS_INC(geneve->dev, rx_frame_errors); 294 DEV_STATS_INC(geneve->dev, rx_errors); 295 goto drop; 296 } 297 } 298 299 if (tun_dst) 300 skb_dst_set(skb, &tun_dst->dst); 301 302 if (gnvh->proto_type == htons(ETH_P_TEB)) { 303 skb_reset_mac_header(skb); 304 skb->protocol = eth_type_trans(skb, geneve->dev); 305 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 306 307 /* Ignore packet loops (and multicast echo) */ 308 if (ether_addr_equal(eth_hdr(skb)->h_source, 309 geneve->dev->dev_addr)) { 310 DEV_STATS_INC(geneve->dev, rx_errors); 311 goto drop; 312 } 313 } else { 314 skb_reset_mac_header(skb); 315 skb->dev = geneve->dev; 316 skb->pkt_type = PACKET_HOST; 317 } 318 319 /* Save offset of outer header relative to skb->head, 320 * because we are going to reset the network header to the inner header 321 * and might change skb->head. 322 */ 323 nh = skb_network_header(skb) - skb->head; 324 325 skb_reset_network_header(skb); 326 327 if (!pskb_inet_may_pull(skb)) { 328 DEV_STATS_INC(geneve->dev, rx_length_errors); 329 DEV_STATS_INC(geneve->dev, rx_errors); 330 goto drop; 331 } 332 333 /* Get the outer header. */ 334 oiph = skb->head + nh; 335 336 if (geneve_get_sk_family(gs) == AF_INET) 337 err = IP_ECN_decapsulate(oiph, skb); 338 #if IS_ENABLED(CONFIG_IPV6) 339 else 340 err = IP6_ECN_decapsulate(oiph, skb); 341 #endif 342 343 if (unlikely(err)) { 344 if (log_ecn_error) { 345 if (geneve_get_sk_family(gs) == AF_INET) 346 net_info_ratelimited("non-ECT from %pI4 " 347 "with TOS=%#x\n", 348 &((struct iphdr *)oiph)->saddr, 349 ((struct iphdr *)oiph)->tos); 350 #if IS_ENABLED(CONFIG_IPV6) 351 else 352 net_info_ratelimited("non-ECT from %pI6\n", 353 &((struct ipv6hdr *)oiph)->saddr); 354 #endif 355 } 356 if (err > 1) { 357 DEV_STATS_INC(geneve->dev, rx_frame_errors); 358 DEV_STATS_INC(geneve->dev, rx_errors); 359 goto drop; 360 } 361 } 362 363 /* Skip the additional GRO stage when hints are in use. */ 364 len = skb->len; 365 if (skb->encapsulation) 366 err = netif_rx(skb); 367 else 368 err = gro_cells_receive(&geneve->gro_cells, skb); 369 if (likely(err == NET_RX_SUCCESS)) 370 dev_dstats_rx_add(geneve->dev, len); 371 372 return; 373 drop: 374 /* Consume bad packet */ 375 kfree_skb(skb); 376 } 377 378 /* Setup stats when device is created */ 379 static int geneve_init(struct net_device *dev) 380 { 381 struct geneve_dev *geneve = netdev_priv(dev); 382 int err; 383 384 err = gro_cells_init(&geneve->gro_cells, dev); 385 if (err) 386 return err; 387 388 err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL); 389 if (err) { 390 gro_cells_destroy(&geneve->gro_cells); 391 return err; 392 } 393 netdev_lockdep_set_classes(dev); 394 return 0; 395 } 396 397 static void geneve_uninit(struct net_device *dev) 398 { 399 struct geneve_dev *geneve = netdev_priv(dev); 400 401 dst_cache_destroy(&geneve->cfg.info.dst_cache); 402 gro_cells_destroy(&geneve->gro_cells); 403 } 404 405 static int geneve_hlen(const struct genevehdr *gh) 406 { 407 return sizeof(*gh) + gh->opt_len * 4; 408 } 409 410 /* 411 * Look for GRO hint in the genenve options; if not found or does not pass basic 412 * sanitization return 0, otherwise the offset WRT the geneve hdr start. 413 */ 414 static unsigned int 415 geneve_opt_gro_hint_off(const struct genevehdr *gh, __be16 *type, 416 unsigned int *gh_len) 417 { 418 struct geneve_opt *opt = (void *)(gh + 1); 419 unsigned int id, opt_len = gh->opt_len; 420 struct geneve_opt_gro_hint *gro_hint; 421 422 while (opt_len >= (GENEVE_OPT_GRO_HINT_SIZE >> 2)) { 423 if (opt->opt_class == htons(GENEVE_OPT_NETDEV_CLASS) && 424 opt->type == GENEVE_OPT_GRO_HINT_TYPE && 425 opt->length == GENEVE_OPT_GRO_HINT_LEN) 426 goto found; 427 428 /* check for bad opt len */ 429 if (opt->length + 1 >= opt_len) 430 return 0; 431 432 /* next opt */ 433 opt_len -= opt->length + 1; 434 opt = ((void *)opt) + ((opt->length + 1) << 2); 435 } 436 return 0; 437 438 found: 439 gro_hint = (struct geneve_opt_gro_hint *)opt->opt_data; 440 441 /* 442 * Sanitize the hinted hdrs: the nested transport is UDP and must fit 443 * the overall hinted hdr size. 444 */ 445 if (gro_hint->nested_tp_offset + sizeof(struct udphdr) > 446 gro_hint->nested_hdr_len) 447 return 0; 448 449 if (gro_hint->nested_nh_offset + 450 (gro_hint->nested_is_v6 ? sizeof(struct ipv6hdr) : 451 sizeof(struct iphdr)) > 452 gro_hint->nested_tp_offset) 453 return 0; 454 455 /* Allow only supported L2. */ 456 id = gro_hint->inner_proto_id; 457 if (id >= ARRAY_SIZE(proto_id_map)) 458 return 0; 459 460 *type = proto_id_map[id]; 461 *gh_len += gro_hint->nested_hdr_len; 462 463 return (void *)gro_hint - (void *)gh; 464 } 465 466 static const struct geneve_opt_gro_hint * 467 geneve_opt_gro_hint(const struct genevehdr *gh, unsigned int hint_off) 468 { 469 return (const struct geneve_opt_gro_hint *)((void *)gh + hint_off); 470 } 471 472 static unsigned int 473 geneve_sk_gro_hint_off(const struct sock *sk, const struct genevehdr *gh, 474 __be16 *type, unsigned int *gh_len) 475 { 476 const struct geneve_sock *gs = rcu_dereference_sk_user_data(sk); 477 478 if (!gs || !gs->gro_hint) 479 return 0; 480 return geneve_opt_gro_hint_off(gh, type, gh_len); 481 } 482 483 /* Validate the packet headers pointed by data WRT the provided hint */ 484 static bool 485 geneve_opt_gro_hint_validate(void *data, 486 const struct geneve_opt_gro_hint *gro_hint) 487 { 488 void *nested_nh = data + gro_hint->nested_nh_offset; 489 struct iphdr *iph; 490 491 if (gro_hint->nested_is_v6) { 492 struct ipv6hdr *ipv6h = nested_nh; 493 struct ipv6_opt_hdr *opth; 494 int offset, len; 495 496 if (ipv6h->nexthdr == IPPROTO_UDP) 497 return true; 498 499 offset = sizeof(*ipv6h) + gro_hint->nested_nh_offset; 500 while (offset + sizeof(*opth) <= gro_hint->nested_tp_offset) { 501 opth = data + offset; 502 503 len = ipv6_optlen(opth); 504 if (len + offset > gro_hint->nested_tp_offset) 505 return false; 506 if (opth->nexthdr == IPPROTO_UDP) 507 return true; 508 509 offset += len; 510 } 511 return false; 512 } 513 514 iph = nested_nh; 515 if (*(u8 *)iph != 0x45 || ip_is_fragment(iph) || 516 iph->protocol != IPPROTO_UDP || ip_fast_csum((u8 *)iph, 5)) 517 return false; 518 519 return true; 520 } 521 522 /* 523 * Validate the skb headers following the specified geneve hdr vs the 524 * provided hint, including nested L4 checksum. 525 * The caller already ensured that the relevant amount of data is available 526 * in the linear part. 527 */ 528 static bool 529 geneve_opt_gro_hint_validate_csum(const struct sk_buff *skb, 530 const struct genevehdr *gh, 531 const struct geneve_opt_gro_hint *gro_hint) 532 { 533 unsigned int plen, gh_len = geneve_hlen(gh); 534 void *nested = (void *)gh + gh_len; 535 struct udphdr *nested_uh; 536 unsigned int nested_len; 537 struct ipv6hdr *ipv6h; 538 struct iphdr *iph; 539 __wsum csum, psum; 540 541 if (!geneve_opt_gro_hint_validate(nested, gro_hint)) 542 return false; 543 544 /* Use GRO hints with nested csum only if the outer header has csum. */ 545 nested_uh = nested + gro_hint->nested_tp_offset; 546 if (!nested_uh->check || skb->ip_summed == CHECKSUM_PARTIAL) 547 return true; 548 549 if (!NAPI_GRO_CB(skb)->csum_valid) 550 return false; 551 552 /* Compute the complete checksum up to the nested transport. */ 553 plen = gh_len + gro_hint->nested_tp_offset; 554 csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(gh, plen, 0)); 555 nested_len = skb_gro_len(skb) - plen; 556 557 /* Compute the nested pseudo header csum. */ 558 ipv6h = nested + gro_hint->nested_nh_offset; 559 iph = (struct iphdr *)ipv6h; 560 psum = gro_hint->nested_is_v6 ? 561 ~csum_unfold(csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 562 nested_len, IPPROTO_UDP, 0)) : 563 csum_tcpudp_nofold(iph->saddr, iph->daddr, 564 nested_len, IPPROTO_UDP, 0); 565 566 return !csum_fold(csum_add(psum, csum)); 567 } 568 569 static int geneve_post_decap_hint(const struct sock *sk, struct sk_buff *skb, 570 unsigned int gh_len, 571 struct genevehdr **geneveh) 572 { 573 const struct geneve_opt_gro_hint *gro_hint; 574 unsigned int len, total_len, hint_off; 575 struct ipv6hdr *ipv6h; 576 struct iphdr *iph; 577 struct udphdr *uh; 578 __be16 p; 579 580 hint_off = geneve_sk_gro_hint_off(sk, *geneveh, &p, &len); 581 if (!hint_off) 582 return 0; 583 584 if (!skb_is_gso(skb)) 585 return 0; 586 587 gro_hint = geneve_opt_gro_hint(*geneveh, hint_off); 588 if (unlikely(!pskb_may_pull(skb, gro_hint->nested_hdr_len))) 589 return -ENOMEM; 590 591 *geneveh = geneve_hdr(skb); 592 gro_hint = geneve_opt_gro_hint(*geneveh, hint_off); 593 594 /* 595 * Validate hints from untrusted source before accessing 596 * the headers; csum will be checked later by the nested 597 * protocol rx path. 598 */ 599 if (unlikely(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY && 600 !geneve_opt_gro_hint_validate(skb->data, gro_hint))) 601 return -EINVAL; 602 603 ipv6h = (void *)skb->data + gro_hint->nested_nh_offset; 604 iph = (struct iphdr *)ipv6h; 605 total_len = skb->len - gro_hint->nested_nh_offset; 606 if (total_len > GRO_LEGACY_MAX_SIZE) 607 return -E2BIG; 608 609 /* 610 * After stripping the outer encap, the packet still carries a 611 * tunnel encapsulation: the nested one. 612 */ 613 skb->encapsulation = 1; 614 615 /* GSO expect a valid transpor header, move it to the current one. */ 616 skb_set_transport_header(skb, gro_hint->nested_tp_offset); 617 618 /* Adjust the nested IP{6} hdr to actual GSO len. */ 619 if (gro_hint->nested_is_v6) { 620 ipv6h->payload_len = htons(total_len - sizeof(*ipv6h)); 621 } else { 622 __be16 old_len = iph->tot_len; 623 624 iph->tot_len = htons(total_len); 625 626 /* For IPv4 additionally adjust the nested csum. */ 627 csum_replace2(&iph->check, old_len, iph->tot_len); 628 ip_send_check(iph); 629 } 630 631 /* Adjust the nested UDP header len and checksum. */ 632 uh = udp_hdr(skb); 633 uh->len = htons(skb->len - gro_hint->nested_tp_offset); 634 if (uh->check) { 635 len = skb->len - gro_hint->nested_nh_offset; 636 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 637 if (gro_hint->nested_is_v6) 638 uh->check = ~udp_v6_check(len, &ipv6h->saddr, 639 &ipv6h->daddr, 0); 640 else 641 uh->check = ~udp_v4_check(len, iph->saddr, 642 iph->daddr, 0); 643 } else { 644 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 645 } 646 return 0; 647 } 648 649 /* Callback from net/ipv4/udp.c to receive packets */ 650 static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 651 { 652 struct genevehdr *geneveh; 653 struct geneve_dev *geneve; 654 struct geneve_sock *gs; 655 __be16 inner_proto; 656 int opts_len; 657 658 /* Need UDP and Geneve header to be present */ 659 if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) 660 goto drop; 661 662 /* Return packets with reserved bits set */ 663 geneveh = geneve_hdr(skb); 664 if (unlikely(geneveh->ver != GENEVE_VER)) 665 goto drop; 666 667 gs = rcu_dereference_sk_user_data(sk); 668 if (!gs) 669 goto drop; 670 671 geneve = geneve_lookup_skb(gs, skb); 672 if (!geneve) 673 goto drop; 674 675 inner_proto = geneveh->proto_type; 676 677 if (unlikely((!geneve->cfg.inner_proto_inherit && 678 inner_proto != htons(ETH_P_TEB)))) { 679 dev_dstats_rx_dropped(geneve->dev); 680 goto drop; 681 } 682 683 opts_len = geneveh->opt_len * 4; 684 if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, inner_proto, 685 !net_eq(geneve->net, dev_net(geneve->dev)))) { 686 dev_dstats_rx_dropped(geneve->dev); 687 goto drop; 688 } 689 690 /* 691 * After hint processing, the transport header points to the inner one 692 * and we can't use anymore on geneve_hdr(). 693 */ 694 geneveh = geneve_hdr(skb); 695 if (geneve_post_decap_hint(sk, skb, sizeof(struct genevehdr) + 696 opts_len, &geneveh)) { 697 DEV_STATS_INC(geneve->dev, rx_errors); 698 goto drop; 699 } 700 701 geneve_rx(geneve, gs, skb, geneveh); 702 return 0; 703 704 drop: 705 /* Consume bad packet */ 706 kfree_skb(skb); 707 return 0; 708 } 709 710 /* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */ 711 static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) 712 { 713 struct genevehdr *geneveh; 714 struct geneve_sock *gs; 715 u8 zero_vni[3] = { 0 }; 716 u8 *vni = zero_vni; 717 718 if (!pskb_may_pull(skb, skb_transport_offset(skb) + GENEVE_BASE_HLEN)) 719 return -EINVAL; 720 721 geneveh = geneve_hdr(skb); 722 if (geneveh->ver != GENEVE_VER) 723 return -EINVAL; 724 725 if (geneveh->proto_type != htons(ETH_P_TEB)) 726 return -EINVAL; 727 728 gs = rcu_dereference_sk_user_data(sk); 729 if (!gs) 730 return -ENOENT; 731 732 if (geneve_get_sk_family(gs) == AF_INET) { 733 struct iphdr *iph = ip_hdr(skb); 734 __be32 addr4 = 0; 735 736 if (!gs->collect_md) { 737 vni = geneve_hdr(skb)->vni; 738 addr4 = iph->daddr; 739 } 740 741 return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT; 742 } 743 744 #if IS_ENABLED(CONFIG_IPV6) 745 if (geneve_get_sk_family(gs) == AF_INET6) { 746 struct ipv6hdr *ip6h = ipv6_hdr(skb); 747 struct in6_addr addr6; 748 749 memset(&addr6, 0, sizeof(struct in6_addr)); 750 751 if (!gs->collect_md) { 752 vni = geneve_hdr(skb)->vni; 753 addr6 = ip6h->daddr; 754 } 755 756 return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT; 757 } 758 #endif 759 760 return -EPFNOSUPPORT; 761 } 762 763 static struct socket *geneve_create_sock(struct net *net, bool ipv6, 764 __be16 port, bool ipv6_rx_csum) 765 { 766 struct socket *sock; 767 struct udp_port_cfg udp_conf; 768 int err; 769 770 memset(&udp_conf, 0, sizeof(udp_conf)); 771 772 if (ipv6) { 773 udp_conf.family = AF_INET6; 774 udp_conf.ipv6_v6only = 1; 775 udp_conf.use_udp6_rx_checksums = ipv6_rx_csum; 776 } else { 777 udp_conf.family = AF_INET; 778 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 779 } 780 781 udp_conf.local_udp_port = port; 782 783 /* Open UDP socket */ 784 err = udp_sock_create(net, &udp_conf, &sock); 785 if (err < 0) 786 return ERR_PTR(err); 787 788 udp_allow_gso(sock->sk); 789 return sock; 790 } 791 792 static bool geneve_hdr_match(struct sk_buff *skb, 793 const struct genevehdr *gh, 794 const struct genevehdr *gh2, 795 unsigned int hint_off) 796 { 797 const struct geneve_opt_gro_hint *gro_hint; 798 void *nested, *nested2, *nh, *nh2; 799 struct udphdr *udp, *udp2; 800 unsigned int gh_len; 801 802 /* Match the geneve hdr and options */ 803 if (gh->opt_len != gh2->opt_len) 804 return false; 805 806 gh_len = geneve_hlen(gh); 807 if (memcmp(gh, gh2, gh_len)) 808 return false; 809 810 if (!hint_off) 811 return true; 812 813 /* 814 * When gro is present consider the nested headers as part 815 * of the geneve options 816 */ 817 nested = (void *)gh + gh_len; 818 nested2 = (void *)gh2 + gh_len; 819 gro_hint = geneve_opt_gro_hint(gh, hint_off); 820 if (!memcmp(nested, nested2, gro_hint->nested_hdr_len)) 821 return true; 822 823 /* 824 * The nested headers differ; the packets can still belong to 825 * the same flow when IPs/proto/ports match; if so flushing is 826 * required. 827 */ 828 nh = nested + gro_hint->nested_nh_offset; 829 nh2 = nested2 + gro_hint->nested_nh_offset; 830 if (gro_hint->nested_is_v6) { 831 struct ipv6hdr *iph = nh, *iph2 = nh2; 832 unsigned int nested_nlen; 833 __be32 first_word; 834 835 first_word = *(__be32 *)iph ^ *(__be32 *)iph2; 836 if ((first_word & htonl(0xF00FFFFF)) || 837 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) || 838 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) || 839 iph->nexthdr != iph2->nexthdr) 840 return false; 841 842 nested_nlen = gro_hint->nested_tp_offset - 843 gro_hint->nested_nh_offset; 844 if (nested_nlen > sizeof(struct ipv6hdr) && 845 (memcmp(iph + 1, iph2 + 1, 846 nested_nlen - sizeof(struct ipv6hdr)))) 847 return false; 848 } else { 849 struct iphdr *iph = nh, *iph2 = nh2; 850 851 if ((iph->protocol ^ iph2->protocol) | 852 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | 853 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) 854 return false; 855 } 856 857 udp = nested + gro_hint->nested_tp_offset; 858 udp2 = nested2 + gro_hint->nested_tp_offset; 859 if (udp->source != udp2->source || udp->dest != udp2->dest || 860 udp->check != udp2->check) 861 return false; 862 863 NAPI_GRO_CB(skb)->flush = 1; 864 return true; 865 } 866 867 static struct sk_buff *geneve_gro_receive(struct sock *sk, 868 struct list_head *head, 869 struct sk_buff *skb) 870 { 871 unsigned int hlen, gh_len, off_gnv, hint_off; 872 const struct geneve_opt_gro_hint *gro_hint; 873 const struct packet_offload *ptype; 874 struct genevehdr *gh, *gh2; 875 struct sk_buff *pp = NULL; 876 struct sk_buff *p; 877 int flush = 1; 878 __be16 type; 879 880 off_gnv = skb_gro_offset(skb); 881 hlen = off_gnv + sizeof(*gh); 882 gh = skb_gro_header(skb, hlen, off_gnv); 883 if (unlikely(!gh)) 884 goto out; 885 886 if (gh->ver != GENEVE_VER || gh->oam) 887 goto out; 888 gh_len = geneve_hlen(gh); 889 type = gh->proto_type; 890 891 hlen = off_gnv + gh_len; 892 if (!skb_gro_may_pull(skb, hlen)) { 893 gh = skb_gro_header_slow(skb, hlen, off_gnv); 894 if (unlikely(!gh)) 895 goto out; 896 } 897 898 /* The GRO hint/nested hdr could use a different ethernet type. */ 899 hint_off = geneve_sk_gro_hint_off(sk, gh, &type, &gh_len); 900 if (hint_off) { 901 902 /* 903 * If the hint is present, and nested hdr validation fails, do 904 * not attempt plain GRO: it will ignore inner hdrs and cause 905 * OoO. 906 */ 907 gh = skb_gro_header(skb, off_gnv + gh_len, off_gnv); 908 if (unlikely(!gh)) 909 goto out; 910 911 gro_hint = geneve_opt_gro_hint(gh, hint_off); 912 if (!geneve_opt_gro_hint_validate_csum(skb, gh, gro_hint)) 913 goto out; 914 } 915 916 list_for_each_entry(p, head, list) { 917 if (!NAPI_GRO_CB(p)->same_flow) 918 continue; 919 920 gh2 = (struct genevehdr *)(p->data + off_gnv); 921 if (!geneve_hdr_match(skb, gh, gh2, hint_off)) { 922 NAPI_GRO_CB(p)->same_flow = 0; 923 continue; 924 } 925 } 926 927 skb_gro_pull(skb, gh_len); 928 skb_gro_postpull_rcsum(skb, gh, gh_len); 929 if (likely(type == htons(ETH_P_TEB))) 930 return call_gro_receive(eth_gro_receive, head, skb); 931 932 ptype = gro_find_receive_by_type(type); 933 if (!ptype) 934 goto out; 935 936 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 937 flush = 0; 938 939 out: 940 skb_gro_flush_final(skb, pp, flush); 941 942 return pp; 943 } 944 945 static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb, 946 int nhoff) 947 { 948 struct genevehdr *gh; 949 struct packet_offload *ptype; 950 __be16 type; 951 int gh_len; 952 int err = -ENOSYS; 953 954 gh = (struct genevehdr *)(skb->data + nhoff); 955 gh_len = geneve_hlen(gh); 956 type = gh->proto_type; 957 geneve_opt_gro_hint_off(gh, &type, &gh_len); 958 959 /* since skb->encapsulation is set, eth_gro_complete() sets the inner mac header */ 960 if (likely(type == htons(ETH_P_TEB))) 961 return eth_gro_complete(skb, nhoff + gh_len); 962 963 ptype = gro_find_complete_by_type(type); 964 if (ptype) 965 err = ptype->callbacks.gro_complete(skb, nhoff + gh_len); 966 967 skb_set_inner_mac_header(skb, nhoff + gh_len); 968 969 return err; 970 } 971 972 /* Create new listen socket if needed */ 973 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, 974 bool ipv6, bool ipv6_rx_csum) 975 { 976 struct geneve_net *gn = net_generic(net, geneve_net_id); 977 struct geneve_sock *gs; 978 struct socket *sock; 979 struct udp_tunnel_sock_cfg tunnel_cfg; 980 int h; 981 982 gs = kzalloc_obj(*gs); 983 if (!gs) 984 return ERR_PTR(-ENOMEM); 985 986 sock = geneve_create_sock(net, ipv6, port, ipv6_rx_csum); 987 if (IS_ERR(sock)) { 988 kfree(gs); 989 return ERR_CAST(sock); 990 } 991 992 gs->sock = sock; 993 gs->refcnt = 1; 994 for (h = 0; h < VNI_HASH_SIZE; ++h) 995 INIT_HLIST_HEAD(&gs->vni_list[h]); 996 997 /* Initialize the geneve udp offloads structure */ 998 udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); 999 1000 /* Mark socket as an encapsulation socket */ 1001 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 1002 tunnel_cfg.sk_user_data = gs; 1003 tunnel_cfg.encap_type = 1; 1004 tunnel_cfg.gro_receive = geneve_gro_receive; 1005 tunnel_cfg.gro_complete = geneve_gro_complete; 1006 tunnel_cfg.encap_rcv = geneve_udp_encap_recv; 1007 tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup; 1008 tunnel_cfg.encap_destroy = NULL; 1009 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 1010 list_add(&gs->list, &gn->sock_list); 1011 return gs; 1012 } 1013 1014 static void __geneve_sock_release(struct geneve_sock *gs) 1015 { 1016 if (!gs || --gs->refcnt) 1017 return; 1018 1019 list_del(&gs->list); 1020 udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); 1021 udp_tunnel_sock_release(gs->sock); 1022 kfree_rcu(gs, rcu); 1023 } 1024 1025 static void geneve_sock_release(struct geneve_dev *geneve) 1026 { 1027 struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4); 1028 #if IS_ENABLED(CONFIG_IPV6) 1029 struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6); 1030 1031 rcu_assign_pointer(geneve->sock6, NULL); 1032 #endif 1033 1034 rcu_assign_pointer(geneve->sock4, NULL); 1035 synchronize_net(); 1036 1037 __geneve_sock_release(gs4); 1038 #if IS_ENABLED(CONFIG_IPV6) 1039 __geneve_sock_release(gs6); 1040 #endif 1041 } 1042 1043 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, 1044 sa_family_t family, 1045 __be16 dst_port, 1046 bool gro_hint) 1047 { 1048 struct geneve_sock *gs; 1049 1050 list_for_each_entry(gs, &gn->sock_list, list) { 1051 if (inet_sk(gs->sock->sk)->inet_sport == dst_port && 1052 geneve_get_sk_family(gs) == family && 1053 gs->gro_hint == gro_hint) { 1054 return gs; 1055 } 1056 } 1057 return NULL; 1058 } 1059 1060 static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) 1061 { 1062 struct net *net = geneve->net; 1063 struct geneve_net *gn = net_generic(net, geneve_net_id); 1064 bool gro_hint = geneve->cfg.gro_hint; 1065 struct geneve_dev_node *node; 1066 struct geneve_sock *gs; 1067 __u8 vni[3]; 1068 __u32 hash; 1069 1070 gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, 1071 geneve->cfg.info.key.tp_dst, gro_hint); 1072 if (gs) { 1073 gs->refcnt++; 1074 goto out; 1075 } 1076 1077 gs = geneve_socket_create(net, geneve->cfg.info.key.tp_dst, ipv6, 1078 geneve->cfg.use_udp6_rx_checksums); 1079 if (IS_ERR(gs)) 1080 return PTR_ERR(gs); 1081 1082 out: 1083 gs->collect_md = geneve->cfg.collect_md; 1084 gs->gro_hint = gro_hint; 1085 #if IS_ENABLED(CONFIG_IPV6) 1086 if (ipv6) { 1087 rcu_assign_pointer(geneve->sock6, gs); 1088 node = &geneve->hlist6; 1089 } else 1090 #endif 1091 { 1092 rcu_assign_pointer(geneve->sock4, gs); 1093 node = &geneve->hlist4; 1094 } 1095 node->geneve = geneve; 1096 1097 tunnel_id_to_vni(geneve->cfg.info.key.tun_id, vni); 1098 hash = geneve_net_vni_hash(vni); 1099 hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]); 1100 return 0; 1101 } 1102 1103 static int geneve_open(struct net_device *dev) 1104 { 1105 struct geneve_dev *geneve = netdev_priv(dev); 1106 bool metadata = geneve->cfg.collect_md; 1107 bool ipv4, ipv6; 1108 int ret = 0; 1109 1110 ipv6 = geneve->cfg.info.mode & IP_TUNNEL_INFO_IPV6 || metadata; 1111 ipv4 = !ipv6 || metadata; 1112 #if IS_ENABLED(CONFIG_IPV6) 1113 if (ipv6) { 1114 ret = geneve_sock_add(geneve, true); 1115 if (ret < 0 && ret != -EAFNOSUPPORT) 1116 ipv4 = false; 1117 } 1118 #endif 1119 if (ipv4) 1120 ret = geneve_sock_add(geneve, false); 1121 if (ret < 0) 1122 geneve_sock_release(geneve); 1123 1124 return ret; 1125 } 1126 1127 static int geneve_stop(struct net_device *dev) 1128 { 1129 struct geneve_dev *geneve = netdev_priv(dev); 1130 1131 hlist_del_init_rcu(&geneve->hlist4.hlist); 1132 #if IS_ENABLED(CONFIG_IPV6) 1133 hlist_del_init_rcu(&geneve->hlist6.hlist); 1134 #endif 1135 geneve_sock_release(geneve); 1136 return 0; 1137 } 1138 1139 static void geneve_build_header(struct genevehdr *geneveh, 1140 const struct ip_tunnel_info *info, 1141 __be16 inner_proto) 1142 { 1143 geneveh->ver = GENEVE_VER; 1144 geneveh->opt_len = info->options_len / 4; 1145 geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags); 1146 geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, 1147 info->key.tun_flags); 1148 geneveh->rsvd1 = 0; 1149 tunnel_id_to_vni(info->key.tun_id, geneveh->vni); 1150 geneveh->proto_type = inner_proto; 1151 geneveh->rsvd2 = 0; 1152 1153 if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) 1154 ip_tunnel_info_opts_get(geneveh->options, info); 1155 } 1156 1157 static int geneve_build_gro_hint_opt(const struct geneve_dev *geneve, 1158 struct sk_buff *skb) 1159 { 1160 struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb); 1161 struct geneve_opt_gro_hint *hint; 1162 unsigned int nhlen; 1163 bool nested_is_v6; 1164 int id; 1165 1166 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct geneve_skb_cb)); 1167 cb->gro_hint_len = 0; 1168 1169 /* Try to add the GRO hint only in case of double encap. */ 1170 if (!geneve->cfg.gro_hint || !skb->encapsulation) 1171 return 0; 1172 1173 /* 1174 * The nested headers must fit the geneve opt len fields and the 1175 * nested encap must carry a nested transport (UDP) header. 1176 */ 1177 nhlen = skb_inner_mac_header(skb) - skb->data; 1178 if (nhlen > 255 || !skb_transport_header_was_set(skb) || 1179 skb->inner_protocol_type != ENCAP_TYPE_ETHER || 1180 (skb_transport_offset(skb) + sizeof(struct udphdr) > nhlen)) 1181 return 0; 1182 1183 id = proto_to_id(skb->inner_protocol); 1184 if (id < 0) 1185 return 0; 1186 1187 nested_is_v6 = skb->protocol == htons(ETH_P_IPV6); 1188 if (nested_is_v6) { 1189 int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); 1190 u8 proto = ipv6_hdr(skb)->nexthdr; 1191 __be16 foff; 1192 1193 if (ipv6_skip_exthdr(skb, start, &proto, &foff) < 0 || 1194 proto != IPPROTO_UDP) 1195 return 0; 1196 } else { 1197 if (ip_hdr(skb)->protocol != IPPROTO_UDP) 1198 return 0; 1199 } 1200 1201 hint = &cb->gro_hint; 1202 memset(hint, 0, sizeof(*hint)); 1203 hint->inner_proto_id = id; 1204 hint->nested_is_v6 = skb->protocol == htons(ETH_P_IPV6); 1205 hint->nested_nh_offset = skb_network_offset(skb); 1206 hint->nested_tp_offset = skb_transport_offset(skb); 1207 hint->nested_hdr_len = nhlen; 1208 cb->gro_hint_len = GENEVE_OPT_GRO_HINT_SIZE; 1209 return GENEVE_OPT_GRO_HINT_SIZE; 1210 } 1211 1212 static void geneve_put_gro_hint_opt(struct genevehdr *gnvh, int opt_size, 1213 const struct geneve_opt_gro_hint *hint) 1214 { 1215 struct geneve_opt *gro_opt; 1216 1217 /* geneve_build_header() did not took in account the GRO hint. */ 1218 gnvh->opt_len = (opt_size + GENEVE_OPT_GRO_HINT_SIZE) >> 2; 1219 1220 gro_opt = (void *)(gnvh + 1) + opt_size; 1221 memset(gro_opt, 0, sizeof(*gro_opt)); 1222 1223 gro_opt->opt_class = htons(GENEVE_OPT_NETDEV_CLASS); 1224 gro_opt->type = GENEVE_OPT_GRO_HINT_TYPE; 1225 gro_opt->length = GENEVE_OPT_GRO_HINT_LEN; 1226 memcpy(gro_opt + 1, hint, sizeof(*hint)); 1227 } 1228 1229 static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, 1230 const struct ip_tunnel_info *info, 1231 const struct geneve_dev *geneve, int ip_hdr_len) 1232 { 1233 bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 1234 bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; 1235 bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); 1236 struct geneve_skb_cb *cb = GENEVE_SKB_CB(skb); 1237 struct genevehdr *gnvh; 1238 __be16 inner_proto; 1239 bool double_encap; 1240 int min_headroom; 1241 int opt_size; 1242 int err; 1243 1244 skb_reset_mac_header(skb); 1245 skb_scrub_packet(skb, xnet); 1246 1247 opt_size = info->options_len + cb->gro_hint_len; 1248 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + 1249 GENEVE_BASE_HLEN + opt_size + ip_hdr_len; 1250 err = skb_cow_head(skb, min_headroom); 1251 if (unlikely(err)) 1252 goto free_dst; 1253 1254 double_encap = udp_tunnel_handle_partial(skb); 1255 err = udp_tunnel_handle_offloads(skb, udp_sum); 1256 if (err) 1257 goto free_dst; 1258 1259 gnvh = __skb_push(skb, sizeof(*gnvh) + opt_size); 1260 inner_proto = inner_proto_inherit ? skb->protocol : htons(ETH_P_TEB); 1261 geneve_build_header(gnvh, info, inner_proto); 1262 1263 if (cb->gro_hint_len) 1264 geneve_put_gro_hint_opt(gnvh, info->options_len, &cb->gro_hint); 1265 1266 udp_tunnel_set_inner_protocol(skb, double_encap, inner_proto); 1267 return 0; 1268 1269 free_dst: 1270 dst_release(dst); 1271 return err; 1272 } 1273 1274 static u8 geneve_get_dsfield(struct sk_buff *skb, struct net_device *dev, 1275 const struct ip_tunnel_info *info, 1276 bool *use_cache) 1277 { 1278 struct geneve_dev *geneve = netdev_priv(dev); 1279 u8 dsfield; 1280 1281 dsfield = info->key.tos; 1282 if (dsfield == 1 && !geneve->cfg.collect_md) { 1283 dsfield = ip_tunnel_get_dsfield(ip_hdr(skb), skb); 1284 *use_cache = false; 1285 } 1286 1287 return dsfield; 1288 } 1289 1290 static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, 1291 struct geneve_dev *geneve, 1292 const struct ip_tunnel_info *info) 1293 { 1294 struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); 1295 const struct ip_tunnel_key *key = &info->key; 1296 struct rtable *rt; 1297 bool use_cache; 1298 __u8 tos, ttl; 1299 __be16 df = 0; 1300 __be32 saddr; 1301 __be16 sport; 1302 int err; 1303 1304 if (skb_vlan_inet_prepare(skb, geneve->cfg.inner_proto_inherit)) 1305 return -EINVAL; 1306 1307 if (!gs4) 1308 return -EIO; 1309 1310 use_cache = ip_tunnel_dst_cache_usable(skb, info); 1311 tos = geneve_get_dsfield(skb, dev, info, &use_cache); 1312 sport = udp_flow_src_port(geneve->net, skb, 1313 geneve->cfg.port_min, 1314 geneve->cfg.port_max, true); 1315 1316 rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, 1317 &info->key, 1318 sport, geneve->cfg.info.key.tp_dst, tos, 1319 use_cache ? 1320 (struct dst_cache *)&info->dst_cache : NULL); 1321 if (IS_ERR(rt)) 1322 return PTR_ERR(rt); 1323 1324 err = skb_tunnel_check_pmtu(skb, &rt->dst, 1325 GENEVE_IPV4_HLEN + info->options_len + 1326 geneve_build_gro_hint_opt(geneve, skb), 1327 netif_is_any_bridge_port(dev)); 1328 if (err < 0) { 1329 dst_release(&rt->dst); 1330 return err; 1331 } else if (err) { 1332 struct ip_tunnel_info *info; 1333 1334 info = skb_tunnel_info(skb); 1335 if (info) { 1336 struct ip_tunnel_info *unclone; 1337 1338 unclone = skb_tunnel_info_unclone(skb); 1339 if (unlikely(!unclone)) { 1340 dst_release(&rt->dst); 1341 return -ENOMEM; 1342 } 1343 1344 unclone->key.u.ipv4.dst = saddr; 1345 unclone->key.u.ipv4.src = info->key.u.ipv4.dst; 1346 } 1347 1348 if (!pskb_may_pull(skb, ETH_HLEN)) { 1349 dst_release(&rt->dst); 1350 return -EINVAL; 1351 } 1352 1353 skb->protocol = eth_type_trans(skb, geneve->dev); 1354 __netif_rx(skb); 1355 dst_release(&rt->dst); 1356 return -EMSGSIZE; 1357 } 1358 1359 tos = ip_tunnel_ecn_encap(tos, ip_hdr(skb), skb); 1360 if (geneve->cfg.collect_md) { 1361 ttl = key->ttl; 1362 1363 df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? 1364 htons(IP_DF) : 0; 1365 } else { 1366 if (geneve->cfg.ttl_inherit) 1367 ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); 1368 else 1369 ttl = key->ttl; 1370 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 1371 1372 if (geneve->cfg.df == GENEVE_DF_SET) { 1373 df = htons(IP_DF); 1374 } else if (geneve->cfg.df == GENEVE_DF_INHERIT) { 1375 struct ethhdr *eth = skb_eth_hdr(skb); 1376 1377 if (ntohs(eth->h_proto) == ETH_P_IPV6) { 1378 df = htons(IP_DF); 1379 } else if (ntohs(eth->h_proto) == ETH_P_IP) { 1380 struct iphdr *iph = ip_hdr(skb); 1381 1382 if (iph->frag_off & htons(IP_DF)) 1383 df = htons(IP_DF); 1384 } 1385 } 1386 } 1387 1388 err = geneve_build_skb(&rt->dst, skb, info, geneve, 1389 sizeof(struct iphdr)); 1390 if (unlikely(err)) 1391 return err; 1392 1393 udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, 1394 tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, 1395 !net_eq(geneve->net, dev_net(geneve->dev)), 1396 !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags), 1397 0); 1398 return 0; 1399 } 1400 1401 #if IS_ENABLED(CONFIG_IPV6) 1402 static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, 1403 struct geneve_dev *geneve, 1404 const struct ip_tunnel_info *info) 1405 { 1406 struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); 1407 const struct ip_tunnel_key *key = &info->key; 1408 struct dst_entry *dst = NULL; 1409 struct in6_addr saddr; 1410 bool use_cache; 1411 __u8 prio, ttl; 1412 __be16 sport; 1413 int err; 1414 1415 if (skb_vlan_inet_prepare(skb, geneve->cfg.inner_proto_inherit)) 1416 return -EINVAL; 1417 1418 if (!gs6) 1419 return -EIO; 1420 1421 use_cache = ip_tunnel_dst_cache_usable(skb, info); 1422 prio = geneve_get_dsfield(skb, dev, info, &use_cache); 1423 sport = udp_flow_src_port(geneve->net, skb, 1424 geneve->cfg.port_min, 1425 geneve->cfg.port_max, true); 1426 1427 dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, 1428 &saddr, key, sport, 1429 geneve->cfg.info.key.tp_dst, prio, 1430 use_cache ? 1431 (struct dst_cache *)&info->dst_cache : NULL); 1432 if (IS_ERR(dst)) 1433 return PTR_ERR(dst); 1434 1435 err = skb_tunnel_check_pmtu(skb, dst, 1436 GENEVE_IPV6_HLEN + info->options_len + 1437 geneve_build_gro_hint_opt(geneve, skb), 1438 netif_is_any_bridge_port(dev)); 1439 if (err < 0) { 1440 dst_release(dst); 1441 return err; 1442 } else if (err) { 1443 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1444 1445 if (info) { 1446 struct ip_tunnel_info *unclone; 1447 1448 unclone = skb_tunnel_info_unclone(skb); 1449 if (unlikely(!unclone)) { 1450 dst_release(dst); 1451 return -ENOMEM; 1452 } 1453 1454 unclone->key.u.ipv6.dst = saddr; 1455 unclone->key.u.ipv6.src = info->key.u.ipv6.dst; 1456 } 1457 1458 if (!pskb_may_pull(skb, ETH_HLEN)) { 1459 dst_release(dst); 1460 return -EINVAL; 1461 } 1462 1463 skb->protocol = eth_type_trans(skb, geneve->dev); 1464 __netif_rx(skb); 1465 dst_release(dst); 1466 return -EMSGSIZE; 1467 } 1468 1469 prio = ip_tunnel_ecn_encap(prio, ip_hdr(skb), skb); 1470 if (geneve->cfg.collect_md) { 1471 ttl = key->ttl; 1472 } else { 1473 if (geneve->cfg.ttl_inherit) 1474 ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); 1475 else 1476 ttl = key->ttl; 1477 ttl = ttl ? : ip6_dst_hoplimit(dst); 1478 } 1479 err = geneve_build_skb(dst, skb, info, geneve, sizeof(struct ipv6hdr)); 1480 if (unlikely(err)) 1481 return err; 1482 1483 udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 1484 &saddr, &key->u.ipv6.dst, prio, ttl, 1485 info->key.label, sport, geneve->cfg.info.key.tp_dst, 1486 !test_bit(IP_TUNNEL_CSUM_BIT, 1487 info->key.tun_flags), 1488 0); 1489 return 0; 1490 } 1491 #endif 1492 1493 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) 1494 { 1495 struct geneve_dev *geneve = netdev_priv(dev); 1496 struct ip_tunnel_info *info = NULL; 1497 int err; 1498 1499 if (geneve->cfg.collect_md) { 1500 info = skb_tunnel_info(skb); 1501 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 1502 netdev_dbg(dev, "no tunnel metadata\n"); 1503 dev_kfree_skb(skb); 1504 dev_dstats_tx_dropped(dev); 1505 return NETDEV_TX_OK; 1506 } 1507 } else { 1508 info = &geneve->cfg.info; 1509 } 1510 1511 rcu_read_lock(); 1512 #if IS_ENABLED(CONFIG_IPV6) 1513 if (info->mode & IP_TUNNEL_INFO_IPV6) 1514 err = geneve6_xmit_skb(skb, dev, geneve, info); 1515 else 1516 #endif 1517 err = geneve_xmit_skb(skb, dev, geneve, info); 1518 rcu_read_unlock(); 1519 1520 if (likely(!err)) 1521 return NETDEV_TX_OK; 1522 1523 if (err != -EMSGSIZE) 1524 dev_kfree_skb(skb); 1525 1526 if (err == -ELOOP) 1527 DEV_STATS_INC(dev, collisions); 1528 else if (err == -ENETUNREACH) 1529 DEV_STATS_INC(dev, tx_carrier_errors); 1530 1531 DEV_STATS_INC(dev, tx_errors); 1532 return NETDEV_TX_OK; 1533 } 1534 1535 static int geneve_change_mtu(struct net_device *dev, int new_mtu) 1536 { 1537 if (new_mtu > dev->max_mtu) 1538 new_mtu = dev->max_mtu; 1539 else if (new_mtu < dev->min_mtu) 1540 new_mtu = dev->min_mtu; 1541 1542 WRITE_ONCE(dev->mtu, new_mtu); 1543 return 0; 1544 } 1545 1546 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1547 { 1548 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1549 struct geneve_dev *geneve = netdev_priv(dev); 1550 __be16 sport; 1551 1552 if (ip_tunnel_info_af(info) == AF_INET) { 1553 struct rtable *rt; 1554 struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); 1555 bool use_cache; 1556 __be32 saddr; 1557 u8 tos; 1558 1559 if (!gs4) 1560 return -EIO; 1561 1562 use_cache = ip_tunnel_dst_cache_usable(skb, info); 1563 tos = geneve_get_dsfield(skb, dev, info, &use_cache); 1564 sport = udp_flow_src_port(geneve->net, skb, 1565 geneve->cfg.port_min, 1566 geneve->cfg.port_max, true); 1567 1568 rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, 1569 &info->key, 1570 sport, geneve->cfg.info.key.tp_dst, 1571 tos, 1572 use_cache ? &info->dst_cache : NULL); 1573 if (IS_ERR(rt)) 1574 return PTR_ERR(rt); 1575 1576 ip_rt_put(rt); 1577 info->key.u.ipv4.src = saddr; 1578 #if IS_ENABLED(CONFIG_IPV6) 1579 } else if (ip_tunnel_info_af(info) == AF_INET6) { 1580 struct dst_entry *dst; 1581 struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); 1582 struct in6_addr saddr; 1583 bool use_cache; 1584 u8 prio; 1585 1586 if (!gs6) 1587 return -EIO; 1588 1589 use_cache = ip_tunnel_dst_cache_usable(skb, info); 1590 prio = geneve_get_dsfield(skb, dev, info, &use_cache); 1591 sport = udp_flow_src_port(geneve->net, skb, 1592 geneve->cfg.port_min, 1593 geneve->cfg.port_max, true); 1594 1595 dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, 1596 &saddr, &info->key, sport, 1597 geneve->cfg.info.key.tp_dst, prio, 1598 use_cache ? &info->dst_cache : NULL); 1599 if (IS_ERR(dst)) 1600 return PTR_ERR(dst); 1601 1602 dst_release(dst); 1603 info->key.u.ipv6.src = saddr; 1604 #endif 1605 } else { 1606 return -EINVAL; 1607 } 1608 1609 info->key.tp_src = sport; 1610 info->key.tp_dst = geneve->cfg.info.key.tp_dst; 1611 return 0; 1612 } 1613 1614 static const struct net_device_ops geneve_netdev_ops = { 1615 .ndo_init = geneve_init, 1616 .ndo_uninit = geneve_uninit, 1617 .ndo_open = geneve_open, 1618 .ndo_stop = geneve_stop, 1619 .ndo_start_xmit = geneve_xmit, 1620 .ndo_change_mtu = geneve_change_mtu, 1621 .ndo_validate_addr = eth_validate_addr, 1622 .ndo_set_mac_address = eth_mac_addr, 1623 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 1624 }; 1625 1626 static void geneve_get_drvinfo(struct net_device *dev, 1627 struct ethtool_drvinfo *drvinfo) 1628 { 1629 strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version)); 1630 strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver)); 1631 } 1632 1633 static const struct ethtool_ops geneve_ethtool_ops = { 1634 .get_drvinfo = geneve_get_drvinfo, 1635 .get_link = ethtool_op_get_link, 1636 }; 1637 1638 /* Info for udev, that this is a virtual tunnel endpoint */ 1639 static const struct device_type geneve_type = { 1640 .name = "geneve", 1641 }; 1642 1643 /* Calls the ndo_udp_tunnel_add of the caller in order to 1644 * supply the listening GENEVE udp ports. Callers are expected 1645 * to implement the ndo_udp_tunnel_add. 1646 */ 1647 static void geneve_offload_rx_ports(struct net_device *dev, bool push) 1648 { 1649 struct net *net = dev_net(dev); 1650 struct geneve_net *gn = net_generic(net, geneve_net_id); 1651 struct geneve_sock *gs; 1652 1653 ASSERT_RTNL(); 1654 1655 list_for_each_entry(gs, &gn->sock_list, list) { 1656 if (push) { 1657 udp_tunnel_push_rx_port(dev, gs->sock, 1658 UDP_TUNNEL_TYPE_GENEVE); 1659 } else { 1660 udp_tunnel_drop_rx_port(dev, gs->sock, 1661 UDP_TUNNEL_TYPE_GENEVE); 1662 } 1663 } 1664 } 1665 1666 /* Initialize the device structure. */ 1667 static void geneve_setup(struct net_device *dev) 1668 { 1669 ether_setup(dev); 1670 1671 dev->netdev_ops = &geneve_netdev_ops; 1672 dev->ethtool_ops = &geneve_ethtool_ops; 1673 dev->needs_free_netdev = true; 1674 1675 SET_NETDEV_DEVTYPE(dev, &geneve_type); 1676 1677 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 1678 dev->features |= NETIF_F_RXCSUM; 1679 dev->features |= NETIF_F_GSO_SOFTWARE; 1680 1681 /* Partial features are disabled by default. */ 1682 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 1683 dev->hw_features |= NETIF_F_RXCSUM; 1684 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1685 dev->hw_features |= UDP_TUNNEL_PARTIAL_FEATURES; 1686 dev->hw_features |= NETIF_F_GSO_PARTIAL; 1687 1688 dev->hw_enc_features = dev->hw_features; 1689 dev->gso_partial_features = UDP_TUNNEL_PARTIAL_FEATURES; 1690 dev->mangleid_features = NETIF_F_GSO_PARTIAL; 1691 1692 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 1693 /* MTU range: 68 - (something less than 65535) */ 1694 dev->min_mtu = ETH_MIN_MTU; 1695 /* The max_mtu calculation does not take account of GENEVE 1696 * options, to avoid excluding potentially valid 1697 * configurations. This will be further reduced by IPvX hdr size. 1698 */ 1699 dev->max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len; 1700 1701 netif_keep_dst(dev); 1702 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1703 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1704 dev->lltx = true; 1705 eth_hw_addr_random(dev); 1706 } 1707 1708 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 1709 [IFLA_GENEVE_UNSPEC] = { .strict_start_type = IFLA_GENEVE_INNER_PROTO_INHERIT }, 1710 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 1711 [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 1712 [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, 1713 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 1714 [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, 1715 [IFLA_GENEVE_LABEL] = { .type = NLA_U32 }, 1716 [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, 1717 [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, 1718 [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, 1719 [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 1720 [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 1721 [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, 1722 [IFLA_GENEVE_DF] = { .type = NLA_U8 }, 1723 [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG }, 1724 [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)), 1725 [IFLA_GENEVE_GRO_HINT] = { .type = NLA_FLAG }, 1726 }; 1727 1728 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], 1729 struct netlink_ext_ack *extack) 1730 { 1731 if (tb[IFLA_ADDRESS]) { 1732 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 1733 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], 1734 "Provided link layer address is not Ethernet"); 1735 return -EINVAL; 1736 } 1737 1738 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 1739 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], 1740 "Provided Ethernet address is not unicast"); 1741 return -EADDRNOTAVAIL; 1742 } 1743 } 1744 1745 if (!data) { 1746 NL_SET_ERR_MSG(extack, 1747 "Not enough attributes provided to perform the operation"); 1748 return -EINVAL; 1749 } 1750 1751 if (data[IFLA_GENEVE_ID]) { 1752 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1753 1754 if (vni >= GENEVE_N_VID) { 1755 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_ID], 1756 "Geneve ID must be lower than 16777216"); 1757 return -ERANGE; 1758 } 1759 } 1760 1761 if (data[IFLA_GENEVE_DF]) { 1762 enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); 1763 1764 if (df < 0 || df > GENEVE_DF_MAX) { 1765 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF], 1766 "Invalid DF attribute"); 1767 return -EINVAL; 1768 } 1769 } 1770 1771 if (data[IFLA_GENEVE_PORT_RANGE]) { 1772 const struct ifla_geneve_port_range *p; 1773 1774 p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); 1775 if (ntohs(p->high) < ntohs(p->low)) { 1776 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE], 1777 "Invalid source port range"); 1778 return -EINVAL; 1779 } 1780 } 1781 1782 return 0; 1783 } 1784 1785 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, 1786 const struct ip_tunnel_info *info, 1787 bool *tun_on_same_port, 1788 bool *tun_collect_md) 1789 { 1790 struct geneve_dev *geneve, *t = NULL; 1791 1792 *tun_on_same_port = false; 1793 *tun_collect_md = false; 1794 list_for_each_entry(geneve, &gn->geneve_list, next) { 1795 if (info->key.tp_dst == geneve->cfg.info.key.tp_dst) { 1796 *tun_collect_md = geneve->cfg.collect_md; 1797 *tun_on_same_port = true; 1798 } 1799 if (info->key.tun_id == geneve->cfg.info.key.tun_id && 1800 info->key.tp_dst == geneve->cfg.info.key.tp_dst && 1801 !memcmp(&info->key.u, &geneve->cfg.info.key.u, sizeof(info->key.u))) 1802 t = geneve; 1803 } 1804 return t; 1805 } 1806 1807 static bool is_tnl_info_zero(const struct ip_tunnel_info *info) 1808 { 1809 return !(info->key.tun_id || info->key.tos || 1810 !ip_tunnel_flags_empty(info->key.tun_flags) || 1811 info->key.ttl || info->key.label || info->key.tp_src || 1812 memchr_inv(&info->key.u, 0, sizeof(info->key.u))); 1813 } 1814 1815 static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, 1816 struct ip_tunnel_info *b) 1817 { 1818 if (ip_tunnel_info_af(a) == AF_INET) 1819 return a->key.u.ipv4.dst == b->key.u.ipv4.dst; 1820 else 1821 return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); 1822 } 1823 1824 static int geneve_configure(struct net *net, struct net_device *dev, 1825 struct netlink_ext_ack *extack, 1826 const struct geneve_config *cfg) 1827 { 1828 struct geneve_net *gn = net_generic(net, geneve_net_id); 1829 struct geneve_dev *t, *geneve = netdev_priv(dev); 1830 const struct ip_tunnel_info *info = &cfg->info; 1831 bool tun_collect_md, tun_on_same_port; 1832 int err, encap_len; 1833 1834 if (cfg->collect_md && !is_tnl_info_zero(info)) { 1835 NL_SET_ERR_MSG(extack, 1836 "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); 1837 return -EINVAL; 1838 } 1839 1840 geneve->net = net; 1841 geneve->dev = dev; 1842 1843 t = geneve_find_dev(gn, info, &tun_on_same_port, &tun_collect_md); 1844 if (t) 1845 return -EBUSY; 1846 1847 /* make enough headroom for basic scenario */ 1848 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1849 if (!cfg->collect_md && ip_tunnel_info_af(info) == AF_INET) { 1850 encap_len += sizeof(struct iphdr); 1851 dev->max_mtu -= sizeof(struct iphdr); 1852 } else { 1853 encap_len += sizeof(struct ipv6hdr); 1854 dev->max_mtu -= sizeof(struct ipv6hdr); 1855 } 1856 dev->needed_headroom = encap_len + ETH_HLEN; 1857 1858 if (cfg->collect_md) { 1859 if (tun_on_same_port) { 1860 NL_SET_ERR_MSG(extack, 1861 "There can be only one externally controlled device on a destination port"); 1862 return -EPERM; 1863 } 1864 } else { 1865 if (tun_collect_md) { 1866 NL_SET_ERR_MSG(extack, 1867 "There already exists an externally controlled device on this destination port"); 1868 return -EPERM; 1869 } 1870 } 1871 1872 dst_cache_reset(&geneve->cfg.info.dst_cache); 1873 memcpy(&geneve->cfg, cfg, sizeof(*cfg)); 1874 1875 if (geneve->cfg.inner_proto_inherit) { 1876 dev->header_ops = NULL; 1877 dev->type = ARPHRD_NONE; 1878 dev->hard_header_len = 0; 1879 dev->addr_len = 0; 1880 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1881 } 1882 1883 err = register_netdevice(dev); 1884 if (err) 1885 return err; 1886 1887 list_add(&geneve->next, &gn->geneve_list); 1888 return 0; 1889 } 1890 1891 static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) 1892 { 1893 memset(info, 0, sizeof(*info)); 1894 info->key.tp_dst = htons(dst_port); 1895 } 1896 1897 static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], 1898 struct netlink_ext_ack *extack, 1899 struct geneve_config *cfg, bool changelink) 1900 { 1901 struct ip_tunnel_info *info = &cfg->info; 1902 int attrtype; 1903 1904 if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { 1905 NL_SET_ERR_MSG(extack, 1906 "Cannot specify both IPv4 and IPv6 Remote addresses"); 1907 return -EINVAL; 1908 } 1909 1910 if (data[IFLA_GENEVE_REMOTE]) { 1911 if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) { 1912 attrtype = IFLA_GENEVE_REMOTE; 1913 goto change_notsup; 1914 } 1915 1916 info->key.u.ipv4.dst = 1917 nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); 1918 1919 if (ipv4_is_multicast(info->key.u.ipv4.dst)) { 1920 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], 1921 "Remote IPv4 address cannot be Multicast"); 1922 return -EINVAL; 1923 } 1924 } 1925 1926 if (data[IFLA_GENEVE_REMOTE6]) { 1927 #if IS_ENABLED(CONFIG_IPV6) 1928 if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { 1929 attrtype = IFLA_GENEVE_REMOTE6; 1930 goto change_notsup; 1931 } 1932 1933 info->mode = IP_TUNNEL_INFO_IPV6; 1934 info->key.u.ipv6.dst = 1935 nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); 1936 1937 if (ipv6_addr_type(&info->key.u.ipv6.dst) & 1938 IPV6_ADDR_LINKLOCAL) { 1939 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], 1940 "Remote IPv6 address cannot be link-local"); 1941 return -EINVAL; 1942 } 1943 if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { 1944 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], 1945 "Remote IPv6 address cannot be Multicast"); 1946 return -EINVAL; 1947 } 1948 __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 1949 cfg->use_udp6_rx_checksums = true; 1950 #else 1951 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], 1952 "IPv6 support not enabled in the kernel"); 1953 return -EPFNOSUPPORT; 1954 #endif 1955 } 1956 1957 if (data[IFLA_GENEVE_ID]) { 1958 __u32 vni; 1959 __u8 tvni[3]; 1960 __be64 tunid; 1961 1962 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1963 tvni[0] = (vni & 0x00ff0000) >> 16; 1964 tvni[1] = (vni & 0x0000ff00) >> 8; 1965 tvni[2] = vni & 0x000000ff; 1966 1967 tunid = vni_to_tunnel_id(tvni); 1968 if (changelink && (tunid != info->key.tun_id)) { 1969 attrtype = IFLA_GENEVE_ID; 1970 goto change_notsup; 1971 } 1972 info->key.tun_id = tunid; 1973 } 1974 1975 if (data[IFLA_GENEVE_TTL_INHERIT]) { 1976 if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) 1977 cfg->ttl_inherit = true; 1978 else 1979 cfg->ttl_inherit = false; 1980 } else if (data[IFLA_GENEVE_TTL]) { 1981 info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); 1982 cfg->ttl_inherit = false; 1983 } 1984 1985 if (data[IFLA_GENEVE_TOS]) 1986 info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 1987 1988 if (data[IFLA_GENEVE_DF]) 1989 cfg->df = nla_get_u8(data[IFLA_GENEVE_DF]); 1990 1991 if (data[IFLA_GENEVE_LABEL]) { 1992 info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & 1993 IPV6_FLOWLABEL_MASK; 1994 if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) { 1995 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL], 1996 "Label attribute only applies for IPv6 Geneve devices"); 1997 return -EINVAL; 1998 } 1999 } 2000 2001 if (data[IFLA_GENEVE_PORT]) { 2002 if (changelink) { 2003 attrtype = IFLA_GENEVE_PORT; 2004 goto change_notsup; 2005 } 2006 info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); 2007 } 2008 2009 if (data[IFLA_GENEVE_PORT_RANGE]) { 2010 const struct ifla_geneve_port_range *p; 2011 2012 if (changelink) { 2013 attrtype = IFLA_GENEVE_PORT_RANGE; 2014 goto change_notsup; 2015 } 2016 p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); 2017 cfg->port_min = ntohs(p->low); 2018 cfg->port_max = ntohs(p->high); 2019 } 2020 2021 if (data[IFLA_GENEVE_COLLECT_METADATA]) { 2022 if (changelink) { 2023 attrtype = IFLA_GENEVE_COLLECT_METADATA; 2024 goto change_notsup; 2025 } 2026 cfg->collect_md = true; 2027 } 2028 2029 if (data[IFLA_GENEVE_UDP_CSUM]) { 2030 if (changelink) { 2031 attrtype = IFLA_GENEVE_UDP_CSUM; 2032 goto change_notsup; 2033 } 2034 if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) 2035 __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 2036 } 2037 2038 if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { 2039 #if IS_ENABLED(CONFIG_IPV6) 2040 if (changelink) { 2041 attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; 2042 goto change_notsup; 2043 } 2044 if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) 2045 __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 2046 #else 2047 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], 2048 "IPv6 support not enabled in the kernel"); 2049 return -EPFNOSUPPORT; 2050 #endif 2051 } 2052 2053 if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { 2054 #if IS_ENABLED(CONFIG_IPV6) 2055 if (changelink) { 2056 attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; 2057 goto change_notsup; 2058 } 2059 if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) 2060 cfg->use_udp6_rx_checksums = false; 2061 #else 2062 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], 2063 "IPv6 support not enabled in the kernel"); 2064 return -EPFNOSUPPORT; 2065 #endif 2066 } 2067 2068 if (data[IFLA_GENEVE_INNER_PROTO_INHERIT]) { 2069 if (changelink) { 2070 attrtype = IFLA_GENEVE_INNER_PROTO_INHERIT; 2071 goto change_notsup; 2072 } 2073 cfg->inner_proto_inherit = true; 2074 } 2075 2076 if (data[IFLA_GENEVE_GRO_HINT]) { 2077 if (changelink) { 2078 attrtype = IFLA_GENEVE_GRO_HINT; 2079 goto change_notsup; 2080 } 2081 cfg->gro_hint = true; 2082 } 2083 2084 return 0; 2085 change_notsup: 2086 NL_SET_ERR_MSG_ATTR(extack, data[attrtype], 2087 "Changing VNI, Port, endpoint IP address family, external, inner_proto_inherit, gro_hint and UDP checksum attributes are not supported"); 2088 return -EOPNOTSUPP; 2089 } 2090 2091 static void geneve_link_config(struct net_device *dev, 2092 struct ip_tunnel_info *info, struct nlattr *tb[]) 2093 { 2094 struct geneve_dev *geneve = netdev_priv(dev); 2095 int ldev_mtu = 0; 2096 2097 if (tb[IFLA_MTU]) { 2098 geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 2099 return; 2100 } 2101 2102 switch (ip_tunnel_info_af(info)) { 2103 case AF_INET: { 2104 struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst }; 2105 struct rtable *rt = ip_route_output_key(geneve->net, &fl4); 2106 2107 if (!IS_ERR(rt) && rt->dst.dev) { 2108 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN; 2109 ip_rt_put(rt); 2110 } 2111 break; 2112 } 2113 #if IS_ENABLED(CONFIG_IPV6) 2114 case AF_INET6: { 2115 struct rt6_info *rt; 2116 2117 if (!__in6_dev_get(dev)) 2118 break; 2119 2120 rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, 2121 NULL, 0); 2122 2123 if (rt && rt->dst.dev) 2124 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; 2125 ip6_rt_put(rt); 2126 break; 2127 } 2128 #endif 2129 } 2130 2131 if (ldev_mtu <= 0) 2132 return; 2133 2134 geneve_change_mtu(dev, ldev_mtu - info->options_len); 2135 } 2136 2137 static int geneve_newlink(struct net_device *dev, 2138 struct rtnl_newlink_params *params, 2139 struct netlink_ext_ack *extack) 2140 { 2141 struct net *link_net = rtnl_newlink_link_net(params); 2142 struct nlattr **data = params->data; 2143 struct nlattr **tb = params->tb; 2144 struct geneve_config cfg = { 2145 .df = GENEVE_DF_UNSET, 2146 .use_udp6_rx_checksums = false, 2147 .ttl_inherit = false, 2148 .collect_md = false, 2149 .port_min = 1, 2150 .port_max = USHRT_MAX, 2151 }; 2152 int err; 2153 2154 init_tnl_info(&cfg.info, GENEVE_UDP_PORT); 2155 err = geneve_nl2info(tb, data, extack, &cfg, false); 2156 if (err) 2157 return err; 2158 2159 err = geneve_configure(link_net, dev, extack, &cfg); 2160 if (err) 2161 return err; 2162 2163 geneve_link_config(dev, &cfg.info, tb); 2164 2165 return 0; 2166 } 2167 2168 /* Quiesces the geneve device data path for both TX and RX. 2169 * 2170 * On transmit geneve checks for non-NULL geneve_sock before it proceeds. 2171 * So, if we set that socket to NULL under RCU and wait for synchronize_net() 2172 * to complete for the existing set of in-flight packets to be transmitted, 2173 * then we would have quiesced the transmit data path. All the future packets 2174 * will get dropped until we unquiesce the data path. 2175 * 2176 * On receive geneve dereference the geneve_sock stashed in the socket. So, 2177 * if we set that to NULL under RCU and wait for synchronize_net() to 2178 * complete, then we would have quiesced the receive data path. 2179 */ 2180 static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, 2181 struct geneve_sock **gs6) 2182 { 2183 *gs4 = rtnl_dereference(geneve->sock4); 2184 rcu_assign_pointer(geneve->sock4, NULL); 2185 if (*gs4) 2186 rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); 2187 #if IS_ENABLED(CONFIG_IPV6) 2188 *gs6 = rtnl_dereference(geneve->sock6); 2189 rcu_assign_pointer(geneve->sock6, NULL); 2190 if (*gs6) 2191 rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); 2192 #else 2193 *gs6 = NULL; 2194 #endif 2195 synchronize_net(); 2196 } 2197 2198 /* Resumes the geneve device data path for both TX and RX. */ 2199 static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, 2200 struct geneve_sock __maybe_unused *gs6) 2201 { 2202 rcu_assign_pointer(geneve->sock4, gs4); 2203 if (gs4) 2204 rcu_assign_sk_user_data(gs4->sock->sk, gs4); 2205 #if IS_ENABLED(CONFIG_IPV6) 2206 rcu_assign_pointer(geneve->sock6, gs6); 2207 if (gs6) 2208 rcu_assign_sk_user_data(gs6->sock->sk, gs6); 2209 #endif 2210 synchronize_net(); 2211 } 2212 2213 static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], 2214 struct nlattr *data[], 2215 struct netlink_ext_ack *extack) 2216 { 2217 struct geneve_dev *geneve = netdev_priv(dev); 2218 struct geneve_sock *gs4, *gs6; 2219 struct geneve_config cfg; 2220 int err; 2221 2222 /* If the geneve device is configured for metadata (or externally 2223 * controlled, for example, OVS), then nothing can be changed. 2224 */ 2225 if (geneve->cfg.collect_md) 2226 return -EOPNOTSUPP; 2227 2228 /* Start with the existing info. */ 2229 memcpy(&cfg, &geneve->cfg, sizeof(cfg)); 2230 err = geneve_nl2info(tb, data, extack, &cfg, true); 2231 if (err) 2232 return err; 2233 2234 if (!geneve_dst_addr_equal(&geneve->cfg.info, &cfg.info)) { 2235 dst_cache_reset(&cfg.info.dst_cache); 2236 geneve_link_config(dev, &cfg.info, tb); 2237 } 2238 2239 geneve_quiesce(geneve, &gs4, &gs6); 2240 memcpy(&geneve->cfg, &cfg, sizeof(cfg)); 2241 geneve_unquiesce(geneve, gs4, gs6); 2242 2243 return 0; 2244 } 2245 2246 static void geneve_dellink(struct net_device *dev, struct list_head *head) 2247 { 2248 struct geneve_dev *geneve = netdev_priv(dev); 2249 2250 list_del(&geneve->next); 2251 unregister_netdevice_queue(dev, head); 2252 } 2253 2254 static size_t geneve_get_size(const struct net_device *dev) 2255 { 2256 return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ 2257 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ 2258 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 2259 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 2260 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */ 2261 nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ 2262 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ 2263 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 2264 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ 2265 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ 2266 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ 2267 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ 2268 nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */ 2269 nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */ 2270 nla_total_size(0) + /* IFLA_GENEVE_GRO_HINT */ 2271 0; 2272 } 2273 2274 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) 2275 { 2276 struct geneve_dev *geneve = netdev_priv(dev); 2277 struct ip_tunnel_info *info = &geneve->cfg.info; 2278 bool ttl_inherit = geneve->cfg.ttl_inherit; 2279 bool metadata = geneve->cfg.collect_md; 2280 struct ifla_geneve_port_range ports = { 2281 .low = htons(geneve->cfg.port_min), 2282 .high = htons(geneve->cfg.port_max), 2283 }; 2284 __u8 tmp_vni[3]; 2285 __u32 vni; 2286 2287 tunnel_id_to_vni(info->key.tun_id, tmp_vni); 2288 vni = (tmp_vni[0] << 16) | (tmp_vni[1] << 8) | tmp_vni[2]; 2289 if (nla_put_u32(skb, IFLA_GENEVE_ID, vni)) 2290 goto nla_put_failure; 2291 2292 if (!metadata && ip_tunnel_info_af(info) == AF_INET) { 2293 if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE, 2294 info->key.u.ipv4.dst)) 2295 goto nla_put_failure; 2296 if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, 2297 test_bit(IP_TUNNEL_CSUM_BIT, 2298 info->key.tun_flags))) 2299 goto nla_put_failure; 2300 2301 #if IS_ENABLED(CONFIG_IPV6) 2302 } else if (!metadata) { 2303 if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6, 2304 &info->key.u.ipv6.dst)) 2305 goto nla_put_failure; 2306 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, 2307 !test_bit(IP_TUNNEL_CSUM_BIT, 2308 info->key.tun_flags))) 2309 goto nla_put_failure; 2310 #endif 2311 } 2312 2313 if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) || 2314 nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) || 2315 nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) 2316 goto nla_put_failure; 2317 2318 if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->cfg.df)) 2319 goto nla_put_failure; 2320 2321 if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) 2322 goto nla_put_failure; 2323 2324 if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) 2325 goto nla_put_failure; 2326 2327 #if IS_ENABLED(CONFIG_IPV6) 2328 if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 2329 !geneve->cfg.use_udp6_rx_checksums)) 2330 goto nla_put_failure; 2331 #endif 2332 2333 if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) 2334 goto nla_put_failure; 2335 2336 if (geneve->cfg.inner_proto_inherit && 2337 nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT)) 2338 goto nla_put_failure; 2339 2340 if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports)) 2341 goto nla_put_failure; 2342 2343 if (geneve->cfg.gro_hint && 2344 nla_put_flag(skb, IFLA_GENEVE_GRO_HINT)) 2345 goto nla_put_failure; 2346 2347 return 0; 2348 2349 nla_put_failure: 2350 return -EMSGSIZE; 2351 } 2352 2353 static struct rtnl_link_ops geneve_link_ops __read_mostly = { 2354 .kind = "geneve", 2355 .maxtype = IFLA_GENEVE_MAX, 2356 .policy = geneve_policy, 2357 .priv_size = sizeof(struct geneve_dev), 2358 .setup = geneve_setup, 2359 .validate = geneve_validate, 2360 .newlink = geneve_newlink, 2361 .changelink = geneve_changelink, 2362 .dellink = geneve_dellink, 2363 .get_size = geneve_get_size, 2364 .fill_info = geneve_fill_info, 2365 }; 2366 2367 struct net_device *geneve_dev_create_fb(struct net *net, const char *name, 2368 u8 name_assign_type, u16 dst_port) 2369 { 2370 struct nlattr *tb[IFLA_MAX + 1]; 2371 struct net_device *dev; 2372 LIST_HEAD(list_kill); 2373 int err; 2374 struct geneve_config cfg = { 2375 .df = GENEVE_DF_UNSET, 2376 .use_udp6_rx_checksums = true, 2377 .ttl_inherit = false, 2378 .collect_md = true, 2379 .port_min = 1, 2380 .port_max = USHRT_MAX, 2381 }; 2382 2383 memset(tb, 0, sizeof(tb)); 2384 dev = rtnl_create_link(net, name, name_assign_type, 2385 &geneve_link_ops, tb, NULL); 2386 if (IS_ERR(dev)) 2387 return dev; 2388 2389 init_tnl_info(&cfg.info, dst_port); 2390 err = geneve_configure(net, dev, NULL, &cfg); 2391 if (err) { 2392 free_netdev(dev); 2393 return ERR_PTR(err); 2394 } 2395 2396 /* openvswitch users expect packet sizes to be unrestricted, 2397 * so set the largest MTU we can. 2398 */ 2399 err = geneve_change_mtu(dev, IP_MAX_MTU); 2400 if (err) 2401 goto err; 2402 2403 err = rtnl_configure_link(dev, NULL, 0, NULL); 2404 if (err < 0) 2405 goto err; 2406 2407 return dev; 2408 err: 2409 geneve_dellink(dev, &list_kill); 2410 unregister_netdevice_many(&list_kill); 2411 return ERR_PTR(err); 2412 } 2413 EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 2414 2415 static int geneve_netdevice_event(struct notifier_block *unused, 2416 unsigned long event, void *ptr) 2417 { 2418 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2419 2420 if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 2421 geneve_offload_rx_ports(dev, true); 2422 else if (event == NETDEV_UDP_TUNNEL_DROP_INFO) 2423 geneve_offload_rx_ports(dev, false); 2424 2425 return NOTIFY_DONE; 2426 } 2427 2428 static struct notifier_block geneve_notifier_block __read_mostly = { 2429 .notifier_call = geneve_netdevice_event, 2430 }; 2431 2432 static __net_init int geneve_init_net(struct net *net) 2433 { 2434 struct geneve_net *gn = net_generic(net, geneve_net_id); 2435 2436 INIT_LIST_HEAD(&gn->geneve_list); 2437 INIT_LIST_HEAD(&gn->sock_list); 2438 return 0; 2439 } 2440 2441 static void __net_exit geneve_exit_rtnl_net(struct net *net, 2442 struct list_head *dev_to_kill) 2443 { 2444 struct geneve_net *gn = net_generic(net, geneve_net_id); 2445 struct geneve_dev *geneve, *next; 2446 2447 list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) 2448 geneve_dellink(geneve->dev, dev_to_kill); 2449 } 2450 2451 static void __net_exit geneve_exit_net(struct net *net) 2452 { 2453 const struct geneve_net *gn = net_generic(net, geneve_net_id); 2454 2455 WARN_ON_ONCE(!list_empty(&gn->sock_list)); 2456 } 2457 2458 static struct pernet_operations geneve_net_ops = { 2459 .init = geneve_init_net, 2460 .exit_rtnl = geneve_exit_rtnl_net, 2461 .exit = geneve_exit_net, 2462 .id = &geneve_net_id, 2463 .size = sizeof(struct geneve_net), 2464 }; 2465 2466 static int __init geneve_init_module(void) 2467 { 2468 int rc; 2469 2470 rc = register_pernet_subsys(&geneve_net_ops); 2471 if (rc) 2472 goto out1; 2473 2474 rc = register_netdevice_notifier(&geneve_notifier_block); 2475 if (rc) 2476 goto out2; 2477 2478 rc = rtnl_link_register(&geneve_link_ops); 2479 if (rc) 2480 goto out3; 2481 2482 return 0; 2483 out3: 2484 unregister_netdevice_notifier(&geneve_notifier_block); 2485 out2: 2486 unregister_pernet_subsys(&geneve_net_ops); 2487 out1: 2488 return rc; 2489 } 2490 late_initcall(geneve_init_module); 2491 2492 static void __exit geneve_cleanup_module(void) 2493 { 2494 rtnl_link_unregister(&geneve_link_ops); 2495 unregister_netdevice_notifier(&geneve_notifier_block); 2496 unregister_pernet_subsys(&geneve_net_ops); 2497 } 2498 module_exit(geneve_cleanup_module); 2499 2500 MODULE_LICENSE("GPL"); 2501 MODULE_VERSION(GENEVE_NETDEV_VER); 2502 MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>"); 2503 MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic"); 2504 MODULE_ALIAS_RTNL_LINK("geneve"); 2505