1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VXLAN: Virtual eXtensible Local Area Network 4 * 5 * Copyright (c) 2012-2013 Vyatta Inc. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/errno.h> 13 #include <linux/slab.h> 14 #include <linux/udp.h> 15 #include <linux/igmp.h> 16 #include <linux/if_ether.h> 17 #include <linux/ethtool.h> 18 #include <net/arp.h> 19 #include <net/ndisc.h> 20 #include <net/gro.h> 21 #include <net/ipv6_stubs.h> 22 #include <net/ip.h> 23 #include <net/icmp.h> 24 #include <net/rtnetlink.h> 25 #include <net/inet_ecn.h> 26 #include <net/net_namespace.h> 27 #include <net/netns/generic.h> 28 #include <net/tun_proto.h> 29 #include <net/vxlan.h> 30 #include <net/nexthop.h> 31 32 #if IS_ENABLED(CONFIG_IPV6) 33 #include <net/ip6_tunnel.h> 34 #include <net/ip6_checksum.h> 35 #endif 36 37 #include "vxlan_private.h" 38 39 #define VXLAN_VERSION "0.1" 40 41 #define FDB_AGE_DEFAULT 300 /* 5 min */ 42 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ 43 44 /* UDP port for VXLAN traffic. 45 * The IANA assigned port is 4789, but the Linux default is 8472 46 * for compatibility with early adopters. 47 */ 48 static unsigned short vxlan_port __read_mostly = 8472; 49 module_param_named(udp_port, vxlan_port, ushort, 0444); 50 MODULE_PARM_DESC(udp_port, "Destination UDP port"); 51 52 static bool log_ecn_error = true; 53 module_param(log_ecn_error, bool, 0644); 54 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 55 56 unsigned int vxlan_net_id; 57 58 const u8 all_zeros_mac[ETH_ALEN + 2]; 59 static struct rtnl_link_ops vxlan_link_ops; 60 61 static int vxlan_sock_add(struct vxlan_dev *vxlan); 62 63 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); 64 65 /* salt for hash table */ 66 static u32 vxlan_salt __read_mostly; 67 68 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) 69 { 70 return vs->flags & VXLAN_F_COLLECT_METADATA || 71 ip_tunnel_collect_metadata(); 72 } 73 74 /* Find VXLAN socket based on network namespace, address family, UDP port, 75 * enabled unshareable flags and socket device binding (see l3mdev with 76 * non-default VRF). 77 */ 78 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, 79 __be16 port, u32 flags, int ifindex) 80 { 81 struct vxlan_sock *vs; 82 83 flags &= VXLAN_F_RCV_FLAGS; 84 85 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 86 if (inet_sk(vs->sock->sk)->inet_sport == port && 87 vxlan_get_sk_family(vs) == family && 88 vs->flags == flags && 89 vs->sock->sk->sk_bound_dev_if == ifindex) 90 return vs; 91 } 92 return NULL; 93 } 94 95 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, 96 int ifindex, __be32 vni, 97 struct vxlan_vni_node **vninode) 98 { 99 struct vxlan_vni_node *vnode; 100 struct vxlan_dev_node *node; 101 102 /* For flow based devices, map all packets to VNI 0 */ 103 if (vs->flags & VXLAN_F_COLLECT_METADATA && 104 !(vs->flags & VXLAN_F_VNIFILTER)) 105 vni = 0; 106 107 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) { 108 if (!node->vxlan) 109 continue; 110 vnode = NULL; 111 if (node->vxlan->cfg.flags & VXLAN_F_VNIFILTER) { 112 vnode = vxlan_vnifilter_lookup(node->vxlan, vni); 113 if (!vnode) 114 continue; 115 } else if (node->vxlan->default_dst.remote_vni != vni) { 116 continue; 117 } 118 119 if (IS_ENABLED(CONFIG_IPV6)) { 120 const struct vxlan_config *cfg = &node->vxlan->cfg; 121 122 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) && 123 cfg->remote_ifindex != ifindex) 124 continue; 125 } 126 127 if (vninode) 128 *vninode = vnode; 129 return node->vxlan; 130 } 131 132 return NULL; 133 } 134 135 /* Look up VNI in a per net namespace table */ 136 static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex, 137 __be32 vni, sa_family_t family, 138 __be16 port, u32 flags) 139 { 140 struct vxlan_sock *vs; 141 142 vs = vxlan_find_sock(net, family, port, flags, ifindex); 143 if (!vs) 144 return NULL; 145 146 return vxlan_vs_find_vni(vs, ifindex, vni, NULL); 147 } 148 149 /* Fill in neighbour message in skbuff. */ 150 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 151 const struct vxlan_fdb *fdb, 152 u32 portid, u32 seq, int type, unsigned int flags, 153 const struct vxlan_rdst *rdst) 154 { 155 unsigned long now = jiffies; 156 struct nda_cacheinfo ci; 157 bool send_ip, send_eth; 158 struct nlmsghdr *nlh; 159 struct nexthop *nh; 160 struct ndmsg *ndm; 161 int nh_family; 162 u32 nh_id; 163 164 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 165 if (nlh == NULL) 166 return -EMSGSIZE; 167 168 ndm = nlmsg_data(nlh); 169 memset(ndm, 0, sizeof(*ndm)); 170 171 send_eth = send_ip = true; 172 173 rcu_read_lock(); 174 nh = rcu_dereference(fdb->nh); 175 if (nh) { 176 nh_family = nexthop_get_family(nh); 177 nh_id = nh->id; 178 } 179 rcu_read_unlock(); 180 181 if (type == RTM_GETNEIGH) { 182 if (rdst) { 183 send_ip = !vxlan_addr_any(&rdst->remote_ip); 184 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET; 185 } else if (nh) { 186 ndm->ndm_family = nh_family; 187 } 188 send_eth = !is_zero_ether_addr(fdb->eth_addr); 189 } else 190 ndm->ndm_family = AF_BRIDGE; 191 ndm->ndm_state = fdb->state; 192 ndm->ndm_ifindex = vxlan->dev->ifindex; 193 ndm->ndm_flags = fdb->flags; 194 if (rdst && rdst->offloaded) 195 ndm->ndm_flags |= NTF_OFFLOADED; 196 ndm->ndm_type = RTN_UNICAST; 197 198 if (!net_eq(dev_net(vxlan->dev), vxlan->net) && 199 nla_put_s32(skb, NDA_LINK_NETNSID, 200 peernet2id(dev_net(vxlan->dev), vxlan->net))) 201 goto nla_put_failure; 202 203 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 204 goto nla_put_failure; 205 if (nh) { 206 if (nla_put_u32(skb, NDA_NH_ID, nh_id)) 207 goto nla_put_failure; 208 } else if (rdst) { 209 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, 210 &rdst->remote_ip)) 211 goto nla_put_failure; 212 213 if (rdst->remote_port && 214 rdst->remote_port != vxlan->cfg.dst_port && 215 nla_put_be16(skb, NDA_PORT, rdst->remote_port)) 216 goto nla_put_failure; 217 if (rdst->remote_vni != vxlan->default_dst.remote_vni && 218 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) 219 goto nla_put_failure; 220 if (rdst->remote_ifindex && 221 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) 222 goto nla_put_failure; 223 } 224 225 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni && 226 nla_put_u32(skb, NDA_SRC_VNI, 227 be32_to_cpu(fdb->vni))) 228 goto nla_put_failure; 229 230 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 231 ci.ndm_confirmed = 0; 232 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 233 ci.ndm_refcnt = 0; 234 235 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 236 goto nla_put_failure; 237 238 nlmsg_end(skb, nlh); 239 return 0; 240 241 nla_put_failure: 242 nlmsg_cancel(skb, nlh); 243 return -EMSGSIZE; 244 } 245 246 static inline size_t vxlan_nlmsg_size(void) 247 { 248 return NLMSG_ALIGN(sizeof(struct ndmsg)) 249 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 250 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */ 251 + nla_total_size(sizeof(__be16)) /* NDA_PORT */ 252 + nla_total_size(sizeof(__be32)) /* NDA_VNI */ 253 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */ 254 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */ 255 + nla_total_size(sizeof(struct nda_cacheinfo)); 256 } 257 258 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 259 struct vxlan_rdst *rd, int type) 260 { 261 struct net *net = dev_net(vxlan->dev); 262 struct sk_buff *skb; 263 int err = -ENOBUFS; 264 265 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); 266 if (skb == NULL) 267 goto errout; 268 269 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); 270 if (err < 0) { 271 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 272 WARN_ON(err == -EMSGSIZE); 273 kfree_skb(skb); 274 goto errout; 275 } 276 277 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); 278 return; 279 errout: 280 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 281 } 282 283 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, 284 const struct vxlan_fdb *fdb, 285 const struct vxlan_rdst *rd, 286 struct netlink_ext_ack *extack, 287 struct switchdev_notifier_vxlan_fdb_info *fdb_info) 288 { 289 fdb_info->info.dev = vxlan->dev; 290 fdb_info->info.extack = extack; 291 fdb_info->remote_ip = rd->remote_ip; 292 fdb_info->remote_port = rd->remote_port; 293 fdb_info->remote_vni = rd->remote_vni; 294 fdb_info->remote_ifindex = rd->remote_ifindex; 295 memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN); 296 fdb_info->vni = fdb->vni; 297 fdb_info->offloaded = rd->offloaded; 298 fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER; 299 } 300 301 static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, 302 struct vxlan_fdb *fdb, 303 struct vxlan_rdst *rd, 304 bool adding, 305 struct netlink_ext_ack *extack) 306 { 307 struct switchdev_notifier_vxlan_fdb_info info; 308 enum switchdev_notifier_type notifier_type; 309 int ret; 310 311 if (WARN_ON(!rd)) 312 return 0; 313 314 notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE 315 : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE; 316 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); 317 ret = call_switchdev_notifiers(notifier_type, vxlan->dev, 318 &info.info, extack); 319 return notifier_to_errno(ret); 320 } 321 322 static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 323 struct vxlan_rdst *rd, int type, bool swdev_notify, 324 struct netlink_ext_ack *extack) 325 { 326 int err; 327 328 if (swdev_notify && rd) { 329 switch (type) { 330 case RTM_NEWNEIGH: 331 err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, 332 true, extack); 333 if (err) 334 return err; 335 break; 336 case RTM_DELNEIGH: 337 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, 338 false, extack); 339 break; 340 } 341 } 342 343 __vxlan_fdb_notify(vxlan, fdb, rd, type); 344 return 0; 345 } 346 347 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) 348 { 349 struct vxlan_dev *vxlan = netdev_priv(dev); 350 struct vxlan_fdb f = { 351 .state = NUD_STALE, 352 }; 353 struct vxlan_rdst remote = { 354 .remote_ip = *ipa, /* goes to NDA_DST */ 355 .remote_vni = cpu_to_be32(VXLAN_N_VID), 356 }; 357 358 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); 359 } 360 361 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 362 { 363 struct vxlan_fdb f = { 364 .state = NUD_STALE, 365 }; 366 struct vxlan_rdst remote = { }; 367 368 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 369 370 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); 371 } 372 373 /* Hash Ethernet address */ 374 static u32 eth_hash(const unsigned char *addr) 375 { 376 u64 value = get_unaligned((u64 *)addr); 377 378 /* only want 6 bytes */ 379 #ifdef __BIG_ENDIAN 380 value >>= 16; 381 #else 382 value <<= 16; 383 #endif 384 return hash_64(value, FDB_HASH_BITS); 385 } 386 387 u32 eth_vni_hash(const unsigned char *addr, __be32 vni) 388 { 389 /* use 1 byte of OUI and 3 bytes of NIC */ 390 u32 key = get_unaligned((u32 *)(addr + 2)); 391 392 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1); 393 } 394 395 u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni) 396 { 397 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) 398 return eth_vni_hash(mac, vni); 399 else 400 return eth_hash(mac); 401 } 402 403 /* Hash chain to use given mac address */ 404 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, 405 const u8 *mac, __be32 vni) 406 { 407 return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)]; 408 } 409 410 /* Look up Ethernet address in forwarding table */ 411 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, 412 const u8 *mac, __be32 vni) 413 { 414 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); 415 struct vxlan_fdb *f; 416 417 hlist_for_each_entry_rcu(f, head, hlist) { 418 if (ether_addr_equal(mac, f->eth_addr)) { 419 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { 420 if (vni == f->vni) 421 return f; 422 } else { 423 return f; 424 } 425 } 426 } 427 428 return NULL; 429 } 430 431 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 432 const u8 *mac, __be32 vni) 433 { 434 struct vxlan_fdb *f; 435 436 f = __vxlan_find_mac(vxlan, mac, vni); 437 if (f && f->used != jiffies) 438 f->used = jiffies; 439 440 return f; 441 } 442 443 /* caller should hold vxlan->hash_lock */ 444 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, 445 union vxlan_addr *ip, __be16 port, 446 __be32 vni, __u32 ifindex) 447 { 448 struct vxlan_rdst *rd; 449 450 list_for_each_entry(rd, &f->remotes, list) { 451 if (vxlan_addr_equal(&rd->remote_ip, ip) && 452 rd->remote_port == port && 453 rd->remote_vni == vni && 454 rd->remote_ifindex == ifindex) 455 return rd; 456 } 457 458 return NULL; 459 } 460 461 int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, 462 struct switchdev_notifier_vxlan_fdb_info *fdb_info) 463 { 464 struct vxlan_dev *vxlan = netdev_priv(dev); 465 u8 eth_addr[ETH_ALEN + 2] = { 0 }; 466 struct vxlan_rdst *rdst; 467 struct vxlan_fdb *f; 468 int rc = 0; 469 470 if (is_multicast_ether_addr(mac) || 471 is_zero_ether_addr(mac)) 472 return -EINVAL; 473 474 ether_addr_copy(eth_addr, mac); 475 476 rcu_read_lock(); 477 478 f = __vxlan_find_mac(vxlan, eth_addr, vni); 479 if (!f) { 480 rc = -ENOENT; 481 goto out; 482 } 483 484 rdst = first_remote_rcu(f); 485 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); 486 487 out: 488 rcu_read_unlock(); 489 return rc; 490 } 491 EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc); 492 493 static int vxlan_fdb_notify_one(struct notifier_block *nb, 494 const struct vxlan_dev *vxlan, 495 const struct vxlan_fdb *f, 496 const struct vxlan_rdst *rdst, 497 struct netlink_ext_ack *extack) 498 { 499 struct switchdev_notifier_vxlan_fdb_info fdb_info; 500 int rc; 501 502 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); 503 rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, 504 &fdb_info); 505 return notifier_to_errno(rc); 506 } 507 508 int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, 509 struct notifier_block *nb, 510 struct netlink_ext_ack *extack) 511 { 512 struct vxlan_dev *vxlan; 513 struct vxlan_rdst *rdst; 514 struct vxlan_fdb *f; 515 unsigned int h; 516 int rc = 0; 517 518 if (!netif_is_vxlan(dev)) 519 return -EINVAL; 520 vxlan = netdev_priv(dev); 521 522 for (h = 0; h < FDB_HASH_SIZE; ++h) { 523 spin_lock_bh(&vxlan->hash_lock[h]); 524 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { 525 if (f->vni == vni) { 526 list_for_each_entry(rdst, &f->remotes, list) { 527 rc = vxlan_fdb_notify_one(nb, vxlan, 528 f, rdst, 529 extack); 530 if (rc) 531 goto unlock; 532 } 533 } 534 } 535 spin_unlock_bh(&vxlan->hash_lock[h]); 536 } 537 return 0; 538 539 unlock: 540 spin_unlock_bh(&vxlan->hash_lock[h]); 541 return rc; 542 } 543 EXPORT_SYMBOL_GPL(vxlan_fdb_replay); 544 545 void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni) 546 { 547 struct vxlan_dev *vxlan; 548 struct vxlan_rdst *rdst; 549 struct vxlan_fdb *f; 550 unsigned int h; 551 552 if (!netif_is_vxlan(dev)) 553 return; 554 vxlan = netdev_priv(dev); 555 556 for (h = 0; h < FDB_HASH_SIZE; ++h) { 557 spin_lock_bh(&vxlan->hash_lock[h]); 558 hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) 559 if (f->vni == vni) 560 list_for_each_entry(rdst, &f->remotes, list) 561 rdst->offloaded = false; 562 spin_unlock_bh(&vxlan->hash_lock[h]); 563 } 564 565 } 566 EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload); 567 568 /* Replace destination of unicast mac */ 569 static int vxlan_fdb_replace(struct vxlan_fdb *f, 570 union vxlan_addr *ip, __be16 port, __be32 vni, 571 __u32 ifindex, struct vxlan_rdst *oldrd) 572 { 573 struct vxlan_rdst *rd; 574 575 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 576 if (rd) 577 return 0; 578 579 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); 580 if (!rd) 581 return 0; 582 583 *oldrd = *rd; 584 dst_cache_reset(&rd->dst_cache); 585 rd->remote_ip = *ip; 586 rd->remote_port = port; 587 rd->remote_vni = vni; 588 rd->remote_ifindex = ifindex; 589 rd->offloaded = false; 590 return 1; 591 } 592 593 /* Add/update destinations for multicast */ 594 static int vxlan_fdb_append(struct vxlan_fdb *f, 595 union vxlan_addr *ip, __be16 port, __be32 vni, 596 __u32 ifindex, struct vxlan_rdst **rdp) 597 { 598 struct vxlan_rdst *rd; 599 600 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex); 601 if (rd) 602 return 0; 603 604 rd = kmalloc(sizeof(*rd), GFP_ATOMIC); 605 if (rd == NULL) 606 return -ENOMEM; 607 608 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { 609 kfree(rd); 610 return -ENOMEM; 611 } 612 613 rd->remote_ip = *ip; 614 rd->remote_port = port; 615 rd->offloaded = false; 616 rd->remote_vni = vni; 617 rd->remote_ifindex = ifindex; 618 619 list_add_tail_rcu(&rd->list, &f->remotes); 620 621 *rdp = rd; 622 return 1; 623 } 624 625 static bool vxlan_parse_gpe_proto(const struct vxlanhdr *hdr, __be16 *protocol) 626 { 627 const struct vxlanhdr_gpe *gpe = (const struct vxlanhdr_gpe *)hdr; 628 629 /* Need to have Next Protocol set for interfaces in GPE mode. */ 630 if (!gpe->np_applied) 631 return false; 632 /* "The initial version is 0. If a receiver does not support the 633 * version indicated it MUST drop the packet. 634 */ 635 if (gpe->version != 0) 636 return false; 637 /* "When the O bit is set to 1, the packet is an OAM packet and OAM 638 * processing MUST occur." However, we don't implement OAM 639 * processing, thus drop the packet. 640 */ 641 if (gpe->oam_flag) 642 return false; 643 644 *protocol = tun_p_to_eth_p(gpe->next_protocol); 645 if (!*protocol) 646 return false; 647 648 return true; 649 } 650 651 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 652 unsigned int off, 653 struct vxlanhdr *vh, size_t hdrlen, 654 __be32 vni_field, 655 struct gro_remcsum *grc, 656 bool nopartial) 657 { 658 size_t start, offset; 659 660 if (skb->remcsum_offload) 661 return vh; 662 663 if (!NAPI_GRO_CB(skb)->csum_valid) 664 return NULL; 665 666 start = vxlan_rco_start(vni_field); 667 offset = start + vxlan_rco_offset(vni_field); 668 669 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, 670 start, offset, grc, nopartial); 671 672 skb->remcsum_offload = 1; 673 674 return vh; 675 } 676 677 static struct vxlanhdr *vxlan_gro_prepare_receive(struct sock *sk, 678 struct list_head *head, 679 struct sk_buff *skb, 680 struct gro_remcsum *grc) 681 { 682 struct sk_buff *p; 683 struct vxlanhdr *vh, *vh2; 684 unsigned int hlen, off_vx; 685 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk); 686 __be32 flags; 687 688 skb_gro_remcsum_init(grc); 689 690 off_vx = skb_gro_offset(skb); 691 hlen = off_vx + sizeof(*vh); 692 vh = skb_gro_header(skb, hlen, off_vx); 693 if (unlikely(!vh)) 694 return NULL; 695 696 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); 697 698 flags = vh->vx_flags; 699 700 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 701 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 702 vh->vx_vni, grc, 703 !!(vs->flags & 704 VXLAN_F_REMCSUM_NOPARTIAL)); 705 706 if (!vh) 707 return NULL; 708 } 709 710 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 711 712 list_for_each_entry(p, head, list) { 713 if (!NAPI_GRO_CB(p)->same_flow) 714 continue; 715 716 vh2 = (struct vxlanhdr *)(p->data + off_vx); 717 if (vh->vx_flags != vh2->vx_flags || 718 vh->vx_vni != vh2->vx_vni) { 719 NAPI_GRO_CB(p)->same_flow = 0; 720 continue; 721 } 722 } 723 724 return vh; 725 } 726 727 static struct sk_buff *vxlan_gro_receive(struct sock *sk, 728 struct list_head *head, 729 struct sk_buff *skb) 730 { 731 struct sk_buff *pp = NULL; 732 struct gro_remcsum grc; 733 int flush = 1; 734 735 if (vxlan_gro_prepare_receive(sk, head, skb, &grc)) { 736 pp = call_gro_receive(eth_gro_receive, head, skb); 737 flush = 0; 738 } 739 skb_gro_flush_final_remcsum(skb, pp, flush, &grc); 740 return pp; 741 } 742 743 static struct sk_buff *vxlan_gpe_gro_receive(struct sock *sk, 744 struct list_head *head, 745 struct sk_buff *skb) 746 { 747 const struct packet_offload *ptype; 748 struct sk_buff *pp = NULL; 749 struct gro_remcsum grc; 750 struct vxlanhdr *vh; 751 __be16 protocol; 752 int flush = 1; 753 754 vh = vxlan_gro_prepare_receive(sk, head, skb, &grc); 755 if (vh) { 756 if (!vxlan_parse_gpe_proto(vh, &protocol)) 757 goto out; 758 ptype = gro_find_receive_by_type(protocol); 759 if (!ptype) 760 goto out; 761 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 762 flush = 0; 763 } 764 out: 765 skb_gro_flush_final_remcsum(skb, pp, flush, &grc); 766 return pp; 767 } 768 769 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 770 { 771 /* Sets 'skb->inner_mac_header' since we are always called with 772 * 'skb->encapsulation' set. 773 */ 774 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 775 } 776 777 static int vxlan_gpe_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) 778 { 779 struct vxlanhdr *vh = (struct vxlanhdr *)(skb->data + nhoff); 780 const struct packet_offload *ptype; 781 int err = -ENOSYS; 782 __be16 protocol; 783 784 if (!vxlan_parse_gpe_proto(vh, &protocol)) 785 return err; 786 ptype = gro_find_complete_by_type(protocol); 787 if (ptype) 788 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 789 return err; 790 } 791 792 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, 793 __u16 state, __be32 src_vni, 794 __u16 ndm_flags) 795 { 796 struct vxlan_fdb *f; 797 798 f = kmalloc(sizeof(*f), GFP_ATOMIC); 799 if (!f) 800 return NULL; 801 f->state = state; 802 f->flags = ndm_flags; 803 f->updated = f->used = jiffies; 804 f->vni = src_vni; 805 f->nh = NULL; 806 RCU_INIT_POINTER(f->vdev, vxlan); 807 INIT_LIST_HEAD(&f->nh_list); 808 INIT_LIST_HEAD(&f->remotes); 809 memcpy(f->eth_addr, mac, ETH_ALEN); 810 811 return f; 812 } 813 814 static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac, 815 __be32 src_vni, struct vxlan_fdb *f) 816 { 817 ++vxlan->addrcnt; 818 hlist_add_head_rcu(&f->hlist, 819 vxlan_fdb_head(vxlan, mac, src_vni)); 820 } 821 822 static int vxlan_fdb_nh_update(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, 823 u32 nhid, struct netlink_ext_ack *extack) 824 { 825 struct nexthop *old_nh = rtnl_dereference(fdb->nh); 826 struct nexthop *nh; 827 int err = -EINVAL; 828 829 if (old_nh && old_nh->id == nhid) 830 return 0; 831 832 nh = nexthop_find_by_id(vxlan->net, nhid); 833 if (!nh) { 834 NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); 835 goto err_inval; 836 } 837 838 if (!nexthop_get(nh)) { 839 NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); 840 nh = NULL; 841 goto err_inval; 842 } 843 if (!nexthop_is_fdb(nh)) { 844 NL_SET_ERR_MSG(extack, "Nexthop is not a fdb nexthop"); 845 goto err_inval; 846 } 847 848 if (!nexthop_is_multipath(nh)) { 849 NL_SET_ERR_MSG(extack, "Nexthop is not a multipath group"); 850 goto err_inval; 851 } 852 853 /* check nexthop group family */ 854 switch (vxlan->default_dst.remote_ip.sa.sa_family) { 855 case AF_INET: 856 if (!nexthop_has_v4(nh)) { 857 err = -EAFNOSUPPORT; 858 NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); 859 goto err_inval; 860 } 861 break; 862 case AF_INET6: 863 if (nexthop_has_v4(nh)) { 864 err = -EAFNOSUPPORT; 865 NL_SET_ERR_MSG(extack, "Nexthop group family not supported"); 866 goto err_inval; 867 } 868 } 869 870 if (old_nh) { 871 list_del_rcu(&fdb->nh_list); 872 nexthop_put(old_nh); 873 } 874 rcu_assign_pointer(fdb->nh, nh); 875 list_add_tail_rcu(&fdb->nh_list, &nh->fdb_list); 876 return 1; 877 878 err_inval: 879 if (nh) 880 nexthop_put(nh); 881 return err; 882 } 883 884 int vxlan_fdb_create(struct vxlan_dev *vxlan, 885 const u8 *mac, union vxlan_addr *ip, 886 __u16 state, __be16 port, __be32 src_vni, 887 __be32 vni, __u32 ifindex, __u16 ndm_flags, 888 u32 nhid, struct vxlan_fdb **fdb, 889 struct netlink_ext_ack *extack) 890 { 891 struct vxlan_rdst *rd = NULL; 892 struct vxlan_fdb *f; 893 int rc; 894 895 if (vxlan->cfg.addrmax && 896 vxlan->addrcnt >= vxlan->cfg.addrmax) 897 return -ENOSPC; 898 899 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 900 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); 901 if (!f) 902 return -ENOMEM; 903 904 if (nhid) 905 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); 906 else 907 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 908 if (rc < 0) 909 goto errout; 910 911 *fdb = f; 912 913 return 0; 914 915 errout: 916 kfree(f); 917 return rc; 918 } 919 920 static void __vxlan_fdb_free(struct vxlan_fdb *f) 921 { 922 struct vxlan_rdst *rd, *nd; 923 struct nexthop *nh; 924 925 nh = rcu_dereference_raw(f->nh); 926 if (nh) { 927 rcu_assign_pointer(f->nh, NULL); 928 rcu_assign_pointer(f->vdev, NULL); 929 nexthop_put(nh); 930 } 931 932 list_for_each_entry_safe(rd, nd, &f->remotes, list) { 933 dst_cache_destroy(&rd->dst_cache); 934 kfree(rd); 935 } 936 kfree(f); 937 } 938 939 static void vxlan_fdb_free(struct rcu_head *head) 940 { 941 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); 942 943 __vxlan_fdb_free(f); 944 } 945 946 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 947 bool do_notify, bool swdev_notify) 948 { 949 struct vxlan_rdst *rd; 950 951 netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); 952 953 --vxlan->addrcnt; 954 if (do_notify) { 955 if (rcu_access_pointer(f->nh)) 956 vxlan_fdb_notify(vxlan, f, NULL, RTM_DELNEIGH, 957 swdev_notify, NULL); 958 else 959 list_for_each_entry(rd, &f->remotes, list) 960 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, 961 swdev_notify, NULL); 962 } 963 964 hlist_del_rcu(&f->hlist); 965 list_del_rcu(&f->nh_list); 966 call_rcu(&f->rcu, vxlan_fdb_free); 967 } 968 969 static void vxlan_dst_free(struct rcu_head *head) 970 { 971 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); 972 973 dst_cache_destroy(&rd->dst_cache); 974 kfree(rd); 975 } 976 977 static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, 978 union vxlan_addr *ip, 979 __u16 state, __u16 flags, 980 __be16 port, __be32 vni, 981 __u32 ifindex, __u16 ndm_flags, 982 struct vxlan_fdb *f, u32 nhid, 983 bool swdev_notify, 984 struct netlink_ext_ack *extack) 985 { 986 __u16 fdb_flags = (ndm_flags & ~NTF_USE); 987 struct vxlan_rdst *rd = NULL; 988 struct vxlan_rdst oldrd; 989 int notify = 0; 990 int rc = 0; 991 int err; 992 993 if (nhid && !rcu_access_pointer(f->nh)) { 994 NL_SET_ERR_MSG(extack, 995 "Cannot replace an existing non nexthop fdb with a nexthop"); 996 return -EOPNOTSUPP; 997 } 998 999 if (nhid && (flags & NLM_F_APPEND)) { 1000 NL_SET_ERR_MSG(extack, 1001 "Cannot append to a nexthop fdb"); 1002 return -EOPNOTSUPP; 1003 } 1004 1005 /* Do not allow an externally learned entry to take over an entry added 1006 * by the user. 1007 */ 1008 if (!(fdb_flags & NTF_EXT_LEARNED) || 1009 !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { 1010 if (f->state != state) { 1011 f->state = state; 1012 f->updated = jiffies; 1013 notify = 1; 1014 } 1015 if (f->flags != fdb_flags) { 1016 f->flags = fdb_flags; 1017 f->updated = jiffies; 1018 notify = 1; 1019 } 1020 } 1021 1022 if ((flags & NLM_F_REPLACE)) { 1023 /* Only change unicasts */ 1024 if (!(is_multicast_ether_addr(f->eth_addr) || 1025 is_zero_ether_addr(f->eth_addr))) { 1026 if (nhid) { 1027 rc = vxlan_fdb_nh_update(vxlan, f, nhid, extack); 1028 if (rc < 0) 1029 return rc; 1030 } else { 1031 rc = vxlan_fdb_replace(f, ip, port, vni, 1032 ifindex, &oldrd); 1033 } 1034 notify |= rc; 1035 } else { 1036 NL_SET_ERR_MSG(extack, "Cannot replace non-unicast fdb entries"); 1037 return -EOPNOTSUPP; 1038 } 1039 } 1040 if ((flags & NLM_F_APPEND) && 1041 (is_multicast_ether_addr(f->eth_addr) || 1042 is_zero_ether_addr(f->eth_addr))) { 1043 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 1044 1045 if (rc < 0) 1046 return rc; 1047 notify |= rc; 1048 } 1049 1050 if (ndm_flags & NTF_USE) 1051 f->used = jiffies; 1052 1053 if (notify) { 1054 if (rd == NULL) 1055 rd = first_remote_rtnl(f); 1056 1057 err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, 1058 swdev_notify, extack); 1059 if (err) 1060 goto err_notify; 1061 } 1062 1063 return 0; 1064 1065 err_notify: 1066 if (nhid) 1067 return err; 1068 if ((flags & NLM_F_REPLACE) && rc) 1069 *rd = oldrd; 1070 else if ((flags & NLM_F_APPEND) && rc) { 1071 list_del_rcu(&rd->list); 1072 call_rcu(&rd->rcu, vxlan_dst_free); 1073 } 1074 return err; 1075 } 1076 1077 static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, 1078 const u8 *mac, union vxlan_addr *ip, 1079 __u16 state, __u16 flags, 1080 __be16 port, __be32 src_vni, __be32 vni, 1081 __u32 ifindex, __u16 ndm_flags, u32 nhid, 1082 bool swdev_notify, 1083 struct netlink_ext_ack *extack) 1084 { 1085 __u16 fdb_flags = (ndm_flags & ~NTF_USE); 1086 struct vxlan_fdb *f; 1087 int rc; 1088 1089 /* Disallow replace to add a multicast entry */ 1090 if ((flags & NLM_F_REPLACE) && 1091 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 1092 return -EOPNOTSUPP; 1093 1094 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 1095 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, 1096 vni, ifindex, fdb_flags, nhid, &f, extack); 1097 if (rc < 0) 1098 return rc; 1099 1100 vxlan_fdb_insert(vxlan, mac, src_vni, f); 1101 rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, 1102 swdev_notify, extack); 1103 if (rc) 1104 goto err_notify; 1105 1106 return 0; 1107 1108 err_notify: 1109 vxlan_fdb_destroy(vxlan, f, false, false); 1110 return rc; 1111 } 1112 1113 /* Add new entry to forwarding table -- assumes lock held */ 1114 int vxlan_fdb_update(struct vxlan_dev *vxlan, 1115 const u8 *mac, union vxlan_addr *ip, 1116 __u16 state, __u16 flags, 1117 __be16 port, __be32 src_vni, __be32 vni, 1118 __u32 ifindex, __u16 ndm_flags, u32 nhid, 1119 bool swdev_notify, 1120 struct netlink_ext_ack *extack) 1121 { 1122 struct vxlan_fdb *f; 1123 1124 f = __vxlan_find_mac(vxlan, mac, src_vni); 1125 if (f) { 1126 if (flags & NLM_F_EXCL) { 1127 netdev_dbg(vxlan->dev, 1128 "lost race to create %pM\n", mac); 1129 return -EEXIST; 1130 } 1131 1132 return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, 1133 vni, ifindex, ndm_flags, f, 1134 nhid, swdev_notify, extack); 1135 } else { 1136 if (!(flags & NLM_F_CREATE)) 1137 return -ENOENT; 1138 1139 return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, 1140 port, src_vni, vni, ifindex, 1141 ndm_flags, nhid, swdev_notify, 1142 extack); 1143 } 1144 } 1145 1146 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 1147 struct vxlan_rdst *rd, bool swdev_notify) 1148 { 1149 list_del_rcu(&rd->list); 1150 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); 1151 call_rcu(&rd->rcu, vxlan_dst_free); 1152 } 1153 1154 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 1155 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 1156 __be32 *vni, u32 *ifindex, u32 *nhid, 1157 struct netlink_ext_ack *extack) 1158 { 1159 struct net *net = dev_net(vxlan->dev); 1160 int err; 1161 1162 if (tb[NDA_NH_ID] && 1163 (tb[NDA_DST] || tb[NDA_VNI] || tb[NDA_IFINDEX] || tb[NDA_PORT])) { 1164 NL_SET_ERR_MSG(extack, "DST, VNI, ifindex and port are mutually exclusive with NH_ID"); 1165 return -EINVAL; 1166 } 1167 1168 if (tb[NDA_DST]) { 1169 err = vxlan_nla_get_addr(ip, tb[NDA_DST]); 1170 if (err) { 1171 NL_SET_ERR_MSG(extack, "Unsupported address family"); 1172 return err; 1173 } 1174 } else { 1175 union vxlan_addr *remote = &vxlan->default_dst.remote_ip; 1176 1177 if (remote->sa.sa_family == AF_INET) { 1178 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY); 1179 ip->sa.sa_family = AF_INET; 1180 #if IS_ENABLED(CONFIG_IPV6) 1181 } else { 1182 ip->sin6.sin6_addr = in6addr_any; 1183 ip->sa.sa_family = AF_INET6; 1184 #endif 1185 } 1186 } 1187 1188 if (tb[NDA_PORT]) { 1189 if (nla_len(tb[NDA_PORT]) != sizeof(__be16)) { 1190 NL_SET_ERR_MSG(extack, "Invalid vxlan port"); 1191 return -EINVAL; 1192 } 1193 *port = nla_get_be16(tb[NDA_PORT]); 1194 } else { 1195 *port = vxlan->cfg.dst_port; 1196 } 1197 1198 if (tb[NDA_VNI]) { 1199 if (nla_len(tb[NDA_VNI]) != sizeof(u32)) { 1200 NL_SET_ERR_MSG(extack, "Invalid vni"); 1201 return -EINVAL; 1202 } 1203 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 1204 } else { 1205 *vni = vxlan->default_dst.remote_vni; 1206 } 1207 1208 if (tb[NDA_SRC_VNI]) { 1209 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32)) { 1210 NL_SET_ERR_MSG(extack, "Invalid src vni"); 1211 return -EINVAL; 1212 } 1213 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); 1214 } else { 1215 *src_vni = vxlan->default_dst.remote_vni; 1216 } 1217 1218 if (tb[NDA_IFINDEX]) { 1219 struct net_device *tdev; 1220 1221 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) { 1222 NL_SET_ERR_MSG(extack, "Invalid ifindex"); 1223 return -EINVAL; 1224 } 1225 *ifindex = nla_get_u32(tb[NDA_IFINDEX]); 1226 tdev = __dev_get_by_index(net, *ifindex); 1227 if (!tdev) { 1228 NL_SET_ERR_MSG(extack, "Device not found"); 1229 return -EADDRNOTAVAIL; 1230 } 1231 } else { 1232 *ifindex = 0; 1233 } 1234 1235 *nhid = nla_get_u32_default(tb[NDA_NH_ID], 0); 1236 1237 return 0; 1238 } 1239 1240 /* Add static entry (via netlink) */ 1241 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 1242 struct net_device *dev, 1243 const unsigned char *addr, u16 vid, u16 flags, 1244 bool *notified, struct netlink_ext_ack *extack) 1245 { 1246 struct vxlan_dev *vxlan = netdev_priv(dev); 1247 /* struct net *net = dev_net(vxlan->dev); */ 1248 union vxlan_addr ip; 1249 __be16 port; 1250 __be32 src_vni, vni; 1251 u32 ifindex, nhid; 1252 u32 hash_index; 1253 int err; 1254 1255 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 1256 pr_info("RTM_NEWNEIGH with invalid state %#x\n", 1257 ndm->ndm_state); 1258 return -EINVAL; 1259 } 1260 1261 if (!tb || (!tb[NDA_DST] && !tb[NDA_NH_ID])) 1262 return -EINVAL; 1263 1264 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, 1265 &nhid, extack); 1266 if (err) 1267 return err; 1268 1269 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) 1270 return -EAFNOSUPPORT; 1271 1272 hash_index = fdb_head_index(vxlan, addr, src_vni); 1273 spin_lock_bh(&vxlan->hash_lock[hash_index]); 1274 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, 1275 port, src_vni, vni, ifindex, 1276 ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER, 1277 nhid, true, extack); 1278 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 1279 1280 if (!err) 1281 *notified = true; 1282 1283 return err; 1284 } 1285 1286 int __vxlan_fdb_delete(struct vxlan_dev *vxlan, 1287 const unsigned char *addr, union vxlan_addr ip, 1288 __be16 port, __be32 src_vni, __be32 vni, 1289 u32 ifindex, bool swdev_notify) 1290 { 1291 struct vxlan_rdst *rd = NULL; 1292 struct vxlan_fdb *f; 1293 int err = -ENOENT; 1294 1295 f = vxlan_find_mac(vxlan, addr, src_vni); 1296 if (!f) 1297 return err; 1298 1299 if (!vxlan_addr_any(&ip)) { 1300 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex); 1301 if (!rd) 1302 goto out; 1303 } 1304 1305 /* remove a destination if it's not the only one on the list, 1306 * otherwise destroy the fdb entry 1307 */ 1308 if (rd && !list_is_singular(&f->remotes)) { 1309 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); 1310 goto out; 1311 } 1312 1313 vxlan_fdb_destroy(vxlan, f, true, swdev_notify); 1314 1315 out: 1316 return 0; 1317 } 1318 1319 /* Delete entry (via netlink) */ 1320 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], 1321 struct net_device *dev, 1322 const unsigned char *addr, u16 vid, bool *notified, 1323 struct netlink_ext_ack *extack) 1324 { 1325 struct vxlan_dev *vxlan = netdev_priv(dev); 1326 union vxlan_addr ip; 1327 __be32 src_vni, vni; 1328 u32 ifindex, nhid; 1329 u32 hash_index; 1330 __be16 port; 1331 int err; 1332 1333 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex, 1334 &nhid, extack); 1335 if (err) 1336 return err; 1337 1338 hash_index = fdb_head_index(vxlan, addr, src_vni); 1339 spin_lock_bh(&vxlan->hash_lock[hash_index]); 1340 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, 1341 true); 1342 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 1343 1344 if (!err) 1345 *notified = true; 1346 1347 return err; 1348 } 1349 1350 /* Dump forwarding table */ 1351 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, 1352 struct net_device *dev, 1353 struct net_device *filter_dev, int *idx) 1354 { 1355 struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; 1356 struct vxlan_dev *vxlan = netdev_priv(dev); 1357 unsigned int h; 1358 int err = 0; 1359 1360 for (h = 0; h < FDB_HASH_SIZE; ++h) { 1361 struct vxlan_fdb *f; 1362 1363 rcu_read_lock(); 1364 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 1365 struct vxlan_rdst *rd; 1366 1367 if (rcu_access_pointer(f->nh)) { 1368 if (*idx < ctx->fdb_idx) 1369 goto skip_nh; 1370 err = vxlan_fdb_info(skb, vxlan, f, 1371 NETLINK_CB(cb->skb).portid, 1372 cb->nlh->nlmsg_seq, 1373 RTM_NEWNEIGH, 1374 NLM_F_MULTI, NULL); 1375 if (err < 0) { 1376 rcu_read_unlock(); 1377 goto out; 1378 } 1379 skip_nh: 1380 *idx += 1; 1381 continue; 1382 } 1383 1384 list_for_each_entry_rcu(rd, &f->remotes, list) { 1385 if (*idx < ctx->fdb_idx) 1386 goto skip; 1387 1388 err = vxlan_fdb_info(skb, vxlan, f, 1389 NETLINK_CB(cb->skb).portid, 1390 cb->nlh->nlmsg_seq, 1391 RTM_NEWNEIGH, 1392 NLM_F_MULTI, rd); 1393 if (err < 0) { 1394 rcu_read_unlock(); 1395 goto out; 1396 } 1397 skip: 1398 *idx += 1; 1399 } 1400 } 1401 rcu_read_unlock(); 1402 } 1403 out: 1404 return err; 1405 } 1406 1407 static int vxlan_fdb_get(struct sk_buff *skb, 1408 struct nlattr *tb[], 1409 struct net_device *dev, 1410 const unsigned char *addr, 1411 u16 vid, u32 portid, u32 seq, 1412 struct netlink_ext_ack *extack) 1413 { 1414 struct vxlan_dev *vxlan = netdev_priv(dev); 1415 struct vxlan_fdb *f; 1416 __be32 vni; 1417 int err; 1418 1419 if (tb[NDA_VNI]) 1420 vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 1421 else 1422 vni = vxlan->default_dst.remote_vni; 1423 1424 rcu_read_lock(); 1425 1426 f = __vxlan_find_mac(vxlan, addr, vni); 1427 if (!f) { 1428 NL_SET_ERR_MSG(extack, "Fdb entry not found"); 1429 err = -ENOENT; 1430 goto errout; 1431 } 1432 1433 err = vxlan_fdb_info(skb, vxlan, f, portid, seq, 1434 RTM_NEWNEIGH, 0, first_remote_rcu(f)); 1435 errout: 1436 rcu_read_unlock(); 1437 return err; 1438 } 1439 1440 /* Watch incoming packets to learn mapping between Ethernet address 1441 * and Tunnel endpoint. 1442 */ 1443 static enum skb_drop_reason vxlan_snoop(struct net_device *dev, 1444 union vxlan_addr *src_ip, 1445 const u8 *src_mac, u32 src_ifindex, 1446 __be32 vni) 1447 { 1448 struct vxlan_dev *vxlan = netdev_priv(dev); 1449 struct vxlan_fdb *f; 1450 u32 ifindex = 0; 1451 1452 /* Ignore packets from invalid src-address */ 1453 if (!is_valid_ether_addr(src_mac)) 1454 return SKB_DROP_REASON_MAC_INVALID_SOURCE; 1455 1456 #if IS_ENABLED(CONFIG_IPV6) 1457 if (src_ip->sa.sa_family == AF_INET6 && 1458 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) 1459 ifindex = src_ifindex; 1460 #endif 1461 1462 f = vxlan_find_mac(vxlan, src_mac, vni); 1463 if (likely(f)) { 1464 struct vxlan_rdst *rdst = first_remote_rcu(f); 1465 1466 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) && 1467 rdst->remote_ifindex == ifindex)) 1468 return SKB_NOT_DROPPED_YET; 1469 1470 /* Don't migrate static entries, drop packets */ 1471 if (f->state & (NUD_PERMANENT | NUD_NOARP)) 1472 return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS; 1473 1474 /* Don't override an fdb with nexthop with a learnt entry */ 1475 if (rcu_access_pointer(f->nh)) 1476 return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS; 1477 1478 if (net_ratelimit()) 1479 netdev_info(dev, 1480 "%pM migrated from %pIS to %pIS\n", 1481 src_mac, &rdst->remote_ip.sa, &src_ip->sa); 1482 1483 rdst->remote_ip = *src_ip; 1484 f->updated = jiffies; 1485 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); 1486 } else { 1487 u32 hash_index = fdb_head_index(vxlan, src_mac, vni); 1488 1489 /* learned new entry */ 1490 spin_lock(&vxlan->hash_lock[hash_index]); 1491 1492 /* close off race between vxlan_flush and incoming packets */ 1493 if (netif_running(dev)) 1494 vxlan_fdb_update(vxlan, src_mac, src_ip, 1495 NUD_REACHABLE, 1496 NLM_F_EXCL|NLM_F_CREATE, 1497 vxlan->cfg.dst_port, 1498 vni, 1499 vxlan->default_dst.remote_vni, 1500 ifindex, NTF_SELF, 0, true, NULL); 1501 spin_unlock(&vxlan->hash_lock[hash_index]); 1502 } 1503 1504 return SKB_NOT_DROPPED_YET; 1505 } 1506 1507 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) 1508 { 1509 struct vxlan_net *vn; 1510 1511 if (!vs) 1512 return false; 1513 if (!refcount_dec_and_test(&vs->refcnt)) 1514 return false; 1515 1516 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); 1517 spin_lock(&vn->sock_lock); 1518 hlist_del_rcu(&vs->hlist); 1519 udp_tunnel_notify_del_rx_port(vs->sock, 1520 (vs->flags & VXLAN_F_GPE) ? 1521 UDP_TUNNEL_TYPE_VXLAN_GPE : 1522 UDP_TUNNEL_TYPE_VXLAN); 1523 spin_unlock(&vn->sock_lock); 1524 1525 return true; 1526 } 1527 1528 static void vxlan_sock_release(struct vxlan_dev *vxlan) 1529 { 1530 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock); 1531 #if IS_ENABLED(CONFIG_IPV6) 1532 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock); 1533 1534 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 1535 #endif 1536 1537 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 1538 synchronize_net(); 1539 1540 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) 1541 vxlan_vs_del_vnigrp(vxlan); 1542 else 1543 vxlan_vs_del_dev(vxlan); 1544 1545 if (__vxlan_sock_release_prep(sock4)) { 1546 udp_tunnel_sock_release(sock4->sock); 1547 kfree(sock4); 1548 } 1549 1550 #if IS_ENABLED(CONFIG_IPV6) 1551 if (__vxlan_sock_release_prep(sock6)) { 1552 udp_tunnel_sock_release(sock6->sock); 1553 kfree(sock6); 1554 } 1555 #endif 1556 } 1557 1558 static enum skb_drop_reason vxlan_remcsum(struct sk_buff *skb, u32 vxflags) 1559 { 1560 const struct vxlanhdr *vh = vxlan_hdr(skb); 1561 enum skb_drop_reason reason; 1562 size_t start, offset; 1563 1564 if (!(vh->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) 1565 return SKB_NOT_DROPPED_YET; 1566 1567 start = vxlan_rco_start(vh->vx_vni); 1568 offset = start + vxlan_rco_offset(vh->vx_vni); 1569 1570 reason = pskb_may_pull_reason(skb, offset + sizeof(u16)); 1571 if (reason) 1572 return reason; 1573 1574 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, 1575 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); 1576 return SKB_NOT_DROPPED_YET; 1577 } 1578 1579 static void vxlan_parse_gbp_hdr(struct sk_buff *skb, u32 vxflags, 1580 struct vxlan_metadata *md) 1581 { 1582 const struct vxlanhdr *vh = vxlan_hdr(skb); 1583 const struct vxlanhdr_gbp *gbp; 1584 struct metadata_dst *tun_dst; 1585 1586 gbp = (const struct vxlanhdr_gbp *)vh; 1587 1588 if (!(vh->vx_flags & VXLAN_HF_GBP)) 1589 return; 1590 1591 md->gbp = ntohs(gbp->policy_id); 1592 1593 tun_dst = (struct metadata_dst *)skb_dst(skb); 1594 if (tun_dst) { 1595 __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, 1596 tun_dst->u.tun_info.key.tun_flags); 1597 tun_dst->u.tun_info.options_len = sizeof(*md); 1598 } 1599 if (gbp->dont_learn) 1600 md->gbp |= VXLAN_GBP_DONT_LEARN; 1601 1602 if (gbp->policy_applied) 1603 md->gbp |= VXLAN_GBP_POLICY_APPLIED; 1604 1605 /* In flow-based mode, GBP is carried in dst_metadata */ 1606 if (!(vxflags & VXLAN_F_COLLECT_METADATA)) 1607 skb->mark = md->gbp; 1608 } 1609 1610 static enum skb_drop_reason vxlan_set_mac(struct vxlan_dev *vxlan, 1611 struct vxlan_sock *vs, 1612 struct sk_buff *skb, __be32 vni) 1613 { 1614 union vxlan_addr saddr; 1615 u32 ifindex = skb->dev->ifindex; 1616 1617 skb_reset_mac_header(skb); 1618 skb->protocol = eth_type_trans(skb, vxlan->dev); 1619 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1620 1621 /* Ignore packet loops (and multicast echo) */ 1622 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1623 return SKB_DROP_REASON_LOCAL_MAC; 1624 1625 /* Get address from the outer IP header */ 1626 if (vxlan_get_sk_family(vs) == AF_INET) { 1627 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; 1628 saddr.sa.sa_family = AF_INET; 1629 #if IS_ENABLED(CONFIG_IPV6) 1630 } else { 1631 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; 1632 saddr.sa.sa_family = AF_INET6; 1633 #endif 1634 } 1635 1636 if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) 1637 return SKB_NOT_DROPPED_YET; 1638 1639 return vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, 1640 ifindex, vni); 1641 } 1642 1643 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, 1644 struct sk_buff *skb) 1645 { 1646 int err = 0; 1647 1648 if (vxlan_get_sk_family(vs) == AF_INET) 1649 err = IP_ECN_decapsulate(oiph, skb); 1650 #if IS_ENABLED(CONFIG_IPV6) 1651 else 1652 err = IP6_ECN_decapsulate(oiph, skb); 1653 #endif 1654 1655 if (unlikely(err) && log_ecn_error) { 1656 if (vxlan_get_sk_family(vs) == AF_INET) 1657 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", 1658 &((struct iphdr *)oiph)->saddr, 1659 ((struct iphdr *)oiph)->tos); 1660 else 1661 net_info_ratelimited("non-ECT from %pI6\n", 1662 &((struct ipv6hdr *)oiph)->saddr); 1663 } 1664 return err <= 1; 1665 } 1666 1667 /* Callback from net/ipv4/udp.c to receive packets */ 1668 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) 1669 { 1670 struct vxlan_vni_node *vninode = NULL; 1671 const struct vxlanhdr *vh; 1672 struct vxlan_dev *vxlan; 1673 struct vxlan_sock *vs; 1674 struct vxlan_metadata _md; 1675 struct vxlan_metadata *md = &_md; 1676 __be16 protocol = htons(ETH_P_TEB); 1677 enum skb_drop_reason reason; 1678 bool raw_proto = false; 1679 void *oiph; 1680 __be32 vni = 0; 1681 int nh; 1682 1683 /* Need UDP and VXLAN header to be present */ 1684 reason = pskb_may_pull_reason(skb, VXLAN_HLEN); 1685 if (reason) 1686 goto drop; 1687 1688 vh = vxlan_hdr(skb); 1689 /* VNI flag always required to be set */ 1690 if (!(vh->vx_flags & VXLAN_HF_VNI)) { 1691 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", 1692 ntohl(vh->vx_flags), ntohl(vh->vx_vni)); 1693 reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; 1694 /* Return non vxlan pkt */ 1695 goto drop; 1696 } 1697 1698 vs = rcu_dereference_sk_user_data(sk); 1699 if (!vs) 1700 goto drop; 1701 1702 vni = vxlan_vni(vh->vx_vni); 1703 1704 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, &vninode); 1705 if (!vxlan) { 1706 reason = SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND; 1707 goto drop; 1708 } 1709 1710 if (vh->vx_flags & vxlan->cfg.reserved_bits.vx_flags || 1711 vh->vx_vni & vxlan->cfg.reserved_bits.vx_vni) { 1712 /* If the header uses bits besides those enabled by the 1713 * netdevice configuration, treat this as a malformed packet. 1714 * This behavior diverges from VXLAN RFC (RFC7348) which 1715 * stipulates that bits in reserved in reserved fields are to be 1716 * ignored. The approach here maintains compatibility with 1717 * previous stack code, and also is more robust and provides a 1718 * little more security in adding extensions to VXLAN. 1719 */ 1720 reason = SKB_DROP_REASON_VXLAN_INVALID_HDR; 1721 DEV_STATS_INC(vxlan->dev, rx_frame_errors); 1722 DEV_STATS_INC(vxlan->dev, rx_errors); 1723 vxlan_vnifilter_count(vxlan, vni, vninode, 1724 VXLAN_VNI_STATS_RX_ERRORS, 0); 1725 goto drop; 1726 } 1727 1728 if (vxlan->cfg.flags & VXLAN_F_GPE) { 1729 if (!vxlan_parse_gpe_proto(vh, &protocol)) 1730 goto drop; 1731 raw_proto = true; 1732 } 1733 1734 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto, 1735 !net_eq(vxlan->net, dev_net(vxlan->dev)))) { 1736 reason = SKB_DROP_REASON_NOMEM; 1737 goto drop; 1738 } 1739 1740 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_RX) { 1741 reason = vxlan_remcsum(skb, vxlan->cfg.flags); 1742 if (unlikely(reason)) 1743 goto drop; 1744 } 1745 1746 if (vxlan_collect_metadata(vs)) { 1747 IP_TUNNEL_DECLARE_FLAGS(flags) = { }; 1748 struct metadata_dst *tun_dst; 1749 1750 __set_bit(IP_TUNNEL_KEY_BIT, flags); 1751 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags, 1752 key32_to_tunnel_id(vni), sizeof(*md)); 1753 1754 if (!tun_dst) { 1755 reason = SKB_DROP_REASON_NOMEM; 1756 goto drop; 1757 } 1758 1759 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); 1760 1761 skb_dst_set(skb, (struct dst_entry *)tun_dst); 1762 } else { 1763 memset(md, 0, sizeof(*md)); 1764 } 1765 1766 if (vxlan->cfg.flags & VXLAN_F_GBP) 1767 vxlan_parse_gbp_hdr(skb, vxlan->cfg.flags, md); 1768 /* Note that GBP and GPE can never be active together. This is 1769 * ensured in vxlan_dev_configure. 1770 */ 1771 1772 if (!raw_proto) { 1773 reason = vxlan_set_mac(vxlan, vs, skb, vni); 1774 if (reason) 1775 goto drop; 1776 } else { 1777 skb_reset_mac_header(skb); 1778 skb->dev = vxlan->dev; 1779 skb->pkt_type = PACKET_HOST; 1780 } 1781 1782 /* Save offset of outer header relative to skb->head, 1783 * because we are going to reset the network header to the inner header 1784 * and might change skb->head. 1785 */ 1786 nh = skb_network_header(skb) - skb->head; 1787 1788 skb_reset_network_header(skb); 1789 1790 reason = pskb_inet_may_pull_reason(skb); 1791 if (reason) { 1792 DEV_STATS_INC(vxlan->dev, rx_length_errors); 1793 DEV_STATS_INC(vxlan->dev, rx_errors); 1794 vxlan_vnifilter_count(vxlan, vni, vninode, 1795 VXLAN_VNI_STATS_RX_ERRORS, 0); 1796 goto drop; 1797 } 1798 1799 /* Get the outer header. */ 1800 oiph = skb->head + nh; 1801 1802 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { 1803 reason = SKB_DROP_REASON_IP_TUNNEL_ECN; 1804 DEV_STATS_INC(vxlan->dev, rx_frame_errors); 1805 DEV_STATS_INC(vxlan->dev, rx_errors); 1806 vxlan_vnifilter_count(vxlan, vni, vninode, 1807 VXLAN_VNI_STATS_RX_ERRORS, 0); 1808 goto drop; 1809 } 1810 1811 rcu_read_lock(); 1812 1813 if (unlikely(!(vxlan->dev->flags & IFF_UP))) { 1814 rcu_read_unlock(); 1815 dev_dstats_rx_dropped(vxlan->dev); 1816 vxlan_vnifilter_count(vxlan, vni, vninode, 1817 VXLAN_VNI_STATS_RX_DROPS, 0); 1818 reason = SKB_DROP_REASON_DEV_READY; 1819 goto drop; 1820 } 1821 1822 dev_dstats_rx_add(vxlan->dev, skb->len); 1823 vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX, skb->len); 1824 gro_cells_receive(&vxlan->gro_cells, skb); 1825 1826 rcu_read_unlock(); 1827 1828 return 0; 1829 1830 drop: 1831 reason = reason ?: SKB_DROP_REASON_NOT_SPECIFIED; 1832 /* Consume bad packet */ 1833 kfree_skb_reason(skb, reason); 1834 return 0; 1835 } 1836 1837 /* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */ 1838 static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) 1839 { 1840 struct vxlan_dev *vxlan; 1841 struct vxlan_sock *vs; 1842 struct vxlanhdr *hdr; 1843 __be32 vni; 1844 1845 if (!pskb_may_pull(skb, skb_transport_offset(skb) + VXLAN_HLEN)) 1846 return -EINVAL; 1847 1848 hdr = vxlan_hdr(skb); 1849 1850 if (!(hdr->vx_flags & VXLAN_HF_VNI)) 1851 return -EINVAL; 1852 1853 vs = rcu_dereference_sk_user_data(sk); 1854 if (!vs) 1855 return -ENOENT; 1856 1857 vni = vxlan_vni(hdr->vx_vni); 1858 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni, NULL); 1859 if (!vxlan) 1860 return -ENOENT; 1861 1862 return 0; 1863 } 1864 1865 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 1866 { 1867 struct vxlan_dev *vxlan = netdev_priv(dev); 1868 struct arphdr *parp; 1869 u8 *arpptr, *sha; 1870 __be32 sip, tip; 1871 struct neighbour *n; 1872 1873 if (dev->flags & IFF_NOARP) 1874 goto out; 1875 1876 if (!pskb_may_pull(skb, arp_hdr_len(dev))) { 1877 dev_dstats_tx_dropped(dev); 1878 vxlan_vnifilter_count(vxlan, vni, NULL, 1879 VXLAN_VNI_STATS_TX_DROPS, 0); 1880 goto out; 1881 } 1882 parp = arp_hdr(skb); 1883 1884 if ((parp->ar_hrd != htons(ARPHRD_ETHER) && 1885 parp->ar_hrd != htons(ARPHRD_IEEE802)) || 1886 parp->ar_pro != htons(ETH_P_IP) || 1887 parp->ar_op != htons(ARPOP_REQUEST) || 1888 parp->ar_hln != dev->addr_len || 1889 parp->ar_pln != 4) 1890 goto out; 1891 arpptr = (u8 *)parp + sizeof(struct arphdr); 1892 sha = arpptr; 1893 arpptr += dev->addr_len; /* sha */ 1894 memcpy(&sip, arpptr, sizeof(sip)); 1895 arpptr += sizeof(sip); 1896 arpptr += dev->addr_len; /* tha */ 1897 memcpy(&tip, arpptr, sizeof(tip)); 1898 1899 if (ipv4_is_loopback(tip) || 1900 ipv4_is_multicast(tip)) 1901 goto out; 1902 1903 n = neigh_lookup(&arp_tbl, &tip, dev); 1904 1905 if (n) { 1906 struct vxlan_fdb *f; 1907 struct sk_buff *reply; 1908 1909 if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) { 1910 neigh_release(n); 1911 goto out; 1912 } 1913 1914 f = vxlan_find_mac(vxlan, n->ha, vni); 1915 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 1916 /* bridge-local neighbor */ 1917 neigh_release(n); 1918 goto out; 1919 } 1920 1921 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1922 n->ha, sha); 1923 1924 neigh_release(n); 1925 1926 if (reply == NULL) 1927 goto out; 1928 1929 skb_reset_mac_header(reply); 1930 __skb_pull(reply, skb_network_offset(reply)); 1931 reply->ip_summed = CHECKSUM_UNNECESSARY; 1932 reply->pkt_type = PACKET_HOST; 1933 1934 if (netif_rx(reply) == NET_RX_DROP) { 1935 dev_dstats_rx_dropped(dev); 1936 vxlan_vnifilter_count(vxlan, vni, NULL, 1937 VXLAN_VNI_STATS_RX_DROPS, 0); 1938 } 1939 1940 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { 1941 union vxlan_addr ipa = { 1942 .sin.sin_addr.s_addr = tip, 1943 .sin.sin_family = AF_INET, 1944 }; 1945 1946 vxlan_ip_miss(dev, &ipa); 1947 } 1948 out: 1949 consume_skb(skb); 1950 return NETDEV_TX_OK; 1951 } 1952 1953 #if IS_ENABLED(CONFIG_IPV6) 1954 static struct sk_buff *vxlan_na_create(struct sk_buff *request, 1955 struct neighbour *n, bool isrouter) 1956 { 1957 struct net_device *dev = request->dev; 1958 struct sk_buff *reply; 1959 struct nd_msg *ns, *na; 1960 struct ipv6hdr *pip6; 1961 u8 *daddr; 1962 int na_olen = 8; /* opt hdr + ETH_ALEN for target */ 1963 int ns_olen; 1964 int i, len; 1965 1966 if (dev == NULL || !pskb_may_pull(request, request->len)) 1967 return NULL; 1968 1969 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + 1970 sizeof(*na) + na_olen + dev->needed_tailroom; 1971 reply = alloc_skb(len, GFP_ATOMIC); 1972 if (reply == NULL) 1973 return NULL; 1974 1975 reply->protocol = htons(ETH_P_IPV6); 1976 reply->dev = dev; 1977 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); 1978 skb_push(reply, sizeof(struct ethhdr)); 1979 skb_reset_mac_header(reply); 1980 1981 ns = (struct nd_msg *)(ipv6_hdr(request) + 1); 1982 1983 daddr = eth_hdr(request)->h_source; 1984 ns_olen = request->len - skb_network_offset(request) - 1985 sizeof(struct ipv6hdr) - sizeof(*ns); 1986 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { 1987 if (!ns->opt[i + 1]) { 1988 kfree_skb(reply); 1989 return NULL; 1990 } 1991 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { 1992 daddr = ns->opt + i + sizeof(struct nd_opt_hdr); 1993 break; 1994 } 1995 } 1996 1997 /* Ethernet header */ 1998 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); 1999 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); 2000 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); 2001 reply->protocol = htons(ETH_P_IPV6); 2002 2003 skb_pull(reply, sizeof(struct ethhdr)); 2004 skb_reset_network_header(reply); 2005 skb_put(reply, sizeof(struct ipv6hdr)); 2006 2007 /* IPv6 header */ 2008 2009 pip6 = ipv6_hdr(reply); 2010 memset(pip6, 0, sizeof(struct ipv6hdr)); 2011 pip6->version = 6; 2012 pip6->priority = ipv6_hdr(request)->priority; 2013 pip6->nexthdr = IPPROTO_ICMPV6; 2014 pip6->hop_limit = 255; 2015 pip6->daddr = ipv6_hdr(request)->saddr; 2016 pip6->saddr = *(struct in6_addr *)n->primary_key; 2017 2018 skb_pull(reply, sizeof(struct ipv6hdr)); 2019 skb_reset_transport_header(reply); 2020 2021 /* Neighbor Advertisement */ 2022 na = skb_put_zero(reply, sizeof(*na) + na_olen); 2023 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; 2024 na->icmph.icmp6_router = isrouter; 2025 na->icmph.icmp6_override = 1; 2026 na->icmph.icmp6_solicited = 1; 2027 na->target = ns->target; 2028 ether_addr_copy(&na->opt[2], n->ha); 2029 na->opt[0] = ND_OPT_TARGET_LL_ADDR; 2030 na->opt[1] = na_olen >> 3; 2031 2032 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, 2033 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, 2034 csum_partial(na, sizeof(*na)+na_olen, 0)); 2035 2036 pip6->payload_len = htons(sizeof(*na)+na_olen); 2037 2038 skb_push(reply, sizeof(struct ipv6hdr)); 2039 2040 reply->ip_summed = CHECKSUM_UNNECESSARY; 2041 2042 return reply; 2043 } 2044 2045 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) 2046 { 2047 struct vxlan_dev *vxlan = netdev_priv(dev); 2048 const struct in6_addr *daddr; 2049 const struct ipv6hdr *iphdr; 2050 struct inet6_dev *in6_dev; 2051 struct neighbour *n; 2052 struct nd_msg *msg; 2053 2054 rcu_read_lock(); 2055 in6_dev = __in6_dev_get(dev); 2056 if (!in6_dev) 2057 goto out; 2058 2059 iphdr = ipv6_hdr(skb); 2060 daddr = &iphdr->daddr; 2061 msg = (struct nd_msg *)(iphdr + 1); 2062 2063 if (ipv6_addr_loopback(daddr) || 2064 ipv6_addr_is_multicast(&msg->target)) 2065 goto out; 2066 2067 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); 2068 2069 if (n) { 2070 struct vxlan_fdb *f; 2071 struct sk_buff *reply; 2072 2073 if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) { 2074 neigh_release(n); 2075 goto out; 2076 } 2077 2078 f = vxlan_find_mac(vxlan, n->ha, vni); 2079 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) { 2080 /* bridge-local neighbor */ 2081 neigh_release(n); 2082 goto out; 2083 } 2084 2085 reply = vxlan_na_create(skb, n, 2086 !!(f ? f->flags & NTF_ROUTER : 0)); 2087 2088 neigh_release(n); 2089 2090 if (reply == NULL) 2091 goto out; 2092 2093 if (netif_rx(reply) == NET_RX_DROP) { 2094 dev_dstats_rx_dropped(dev); 2095 vxlan_vnifilter_count(vxlan, vni, NULL, 2096 VXLAN_VNI_STATS_RX_DROPS, 0); 2097 } 2098 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) { 2099 union vxlan_addr ipa = { 2100 .sin6.sin6_addr = msg->target, 2101 .sin6.sin6_family = AF_INET6, 2102 }; 2103 2104 vxlan_ip_miss(dev, &ipa); 2105 } 2106 2107 out: 2108 rcu_read_unlock(); 2109 consume_skb(skb); 2110 return NETDEV_TX_OK; 2111 } 2112 #endif 2113 2114 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) 2115 { 2116 struct vxlan_dev *vxlan = netdev_priv(dev); 2117 struct neighbour *n; 2118 2119 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) 2120 return false; 2121 2122 n = NULL; 2123 switch (ntohs(eth_hdr(skb)->h_proto)) { 2124 case ETH_P_IP: 2125 { 2126 struct iphdr *pip; 2127 2128 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 2129 return false; 2130 pip = ip_hdr(skb); 2131 n = neigh_lookup(&arp_tbl, &pip->daddr, dev); 2132 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { 2133 union vxlan_addr ipa = { 2134 .sin.sin_addr.s_addr = pip->daddr, 2135 .sin.sin_family = AF_INET, 2136 }; 2137 2138 vxlan_ip_miss(dev, &ipa); 2139 return false; 2140 } 2141 2142 break; 2143 } 2144 #if IS_ENABLED(CONFIG_IPV6) 2145 case ETH_P_IPV6: 2146 { 2147 struct ipv6hdr *pip6; 2148 2149 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 2150 return false; 2151 pip6 = ipv6_hdr(skb); 2152 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev); 2153 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) { 2154 union vxlan_addr ipa = { 2155 .sin6.sin6_addr = pip6->daddr, 2156 .sin6.sin6_family = AF_INET6, 2157 }; 2158 2159 vxlan_ip_miss(dev, &ipa); 2160 return false; 2161 } 2162 2163 break; 2164 } 2165 #endif 2166 default: 2167 return false; 2168 } 2169 2170 if (n) { 2171 bool diff; 2172 2173 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha); 2174 if (diff) { 2175 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, 2176 dev->addr_len); 2177 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len); 2178 } 2179 neigh_release(n); 2180 return diff; 2181 } 2182 2183 return false; 2184 } 2185 2186 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, __be16 protocol) 2187 { 2188 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh; 2189 2190 gpe->np_applied = 1; 2191 gpe->next_protocol = tun_p_from_eth_p(protocol); 2192 if (!gpe->next_protocol) 2193 return -EPFNOSUPPORT; 2194 return 0; 2195 } 2196 2197 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, 2198 int iphdr_len, __be32 vni, 2199 struct vxlan_metadata *md, u32 vxflags, 2200 bool udp_sum) 2201 { 2202 struct vxlanhdr *vxh; 2203 int min_headroom; 2204 int err; 2205 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 2206 __be16 inner_protocol = htons(ETH_P_TEB); 2207 2208 if ((vxflags & VXLAN_F_REMCSUM_TX) && 2209 skb->ip_summed == CHECKSUM_PARTIAL) { 2210 int csum_start = skb_checksum_start_offset(skb); 2211 2212 if (csum_start <= VXLAN_MAX_REMCSUM_START && 2213 !(csum_start & VXLAN_RCO_SHIFT_MASK) && 2214 (skb->csum_offset == offsetof(struct udphdr, check) || 2215 skb->csum_offset == offsetof(struct tcphdr, check))) 2216 type |= SKB_GSO_TUNNEL_REMCSUM; 2217 } 2218 2219 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 2220 + VXLAN_HLEN + iphdr_len; 2221 2222 /* Need space for new headers (invalidates iph ptr) */ 2223 err = skb_cow_head(skb, min_headroom); 2224 if (unlikely(err)) 2225 return err; 2226 2227 err = iptunnel_handle_offloads(skb, type); 2228 if (err) 2229 return err; 2230 2231 vxh = __skb_push(skb, sizeof(*vxh)); 2232 vxh->vx_flags = VXLAN_HF_VNI; 2233 vxh->vx_vni = vxlan_vni_field(vni); 2234 2235 if (type & SKB_GSO_TUNNEL_REMCSUM) { 2236 unsigned int start; 2237 2238 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); 2239 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); 2240 vxh->vx_flags |= VXLAN_HF_RCO; 2241 2242 if (!skb_is_gso(skb)) { 2243 skb->ip_summed = CHECKSUM_NONE; 2244 skb->encapsulation = 0; 2245 } 2246 } 2247 2248 if (vxflags & VXLAN_F_GBP) 2249 vxlan_build_gbp_hdr(vxh, md); 2250 if (vxflags & VXLAN_F_GPE) { 2251 err = vxlan_build_gpe_hdr(vxh, skb->protocol); 2252 if (err < 0) 2253 return err; 2254 inner_protocol = skb->protocol; 2255 } 2256 2257 skb_set_inner_protocol(skb, inner_protocol); 2258 return 0; 2259 } 2260 2261 /* Bypass encapsulation if the destination is local */ 2262 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 2263 struct vxlan_dev *dst_vxlan, __be32 vni, 2264 bool snoop) 2265 { 2266 union vxlan_addr loopback; 2267 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 2268 unsigned int len = skb->len; 2269 struct net_device *dev; 2270 2271 skb->pkt_type = PACKET_HOST; 2272 skb->encapsulation = 0; 2273 skb->dev = dst_vxlan->dev; 2274 __skb_pull(skb, skb_network_offset(skb)); 2275 2276 if (remote_ip->sa.sa_family == AF_INET) { 2277 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 2278 loopback.sa.sa_family = AF_INET; 2279 #if IS_ENABLED(CONFIG_IPV6) 2280 } else { 2281 loopback.sin6.sin6_addr = in6addr_loopback; 2282 loopback.sa.sa_family = AF_INET6; 2283 #endif 2284 } 2285 2286 rcu_read_lock(); 2287 dev = skb->dev; 2288 if (unlikely(!(dev->flags & IFF_UP))) { 2289 kfree_skb_reason(skb, SKB_DROP_REASON_DEV_READY); 2290 goto drop; 2291 } 2292 2293 if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop) 2294 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); 2295 2296 dev_dstats_tx_add(src_vxlan->dev, len); 2297 vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len); 2298 2299 if (__netif_rx(skb) == NET_RX_SUCCESS) { 2300 dev_dstats_rx_add(dst_vxlan->dev, len); 2301 vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX, 2302 len); 2303 } else { 2304 drop: 2305 dev_dstats_rx_dropped(dev); 2306 vxlan_vnifilter_count(dst_vxlan, vni, NULL, 2307 VXLAN_VNI_STATS_RX_DROPS, 0); 2308 } 2309 rcu_read_unlock(); 2310 } 2311 2312 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2313 struct vxlan_dev *vxlan, 2314 int addr_family, 2315 __be16 dst_port, int dst_ifindex, __be32 vni, 2316 struct dst_entry *dst, 2317 u32 rt_flags) 2318 { 2319 #if IS_ENABLED(CONFIG_IPV6) 2320 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of 2321 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple 2322 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry. 2323 */ 2324 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL); 2325 #endif 2326 /* Bypass encapsulation if the destination is local */ 2327 if (rt_flags & RTCF_LOCAL && 2328 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 2329 vxlan->cfg.flags & VXLAN_F_LOCALBYPASS) { 2330 struct vxlan_dev *dst_vxlan; 2331 2332 dst_release(dst); 2333 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni, 2334 addr_family, dst_port, 2335 vxlan->cfg.flags); 2336 if (!dst_vxlan) { 2337 DEV_STATS_INC(dev, tx_errors); 2338 vxlan_vnifilter_count(vxlan, vni, NULL, 2339 VXLAN_VNI_STATS_TX_ERRORS, 0); 2340 kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND); 2341 2342 return -ENOENT; 2343 } 2344 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); 2345 return 1; 2346 } 2347 2348 return 0; 2349 } 2350 2351 void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, 2352 __be32 default_vni, struct vxlan_rdst *rdst, bool did_rsc) 2353 { 2354 struct dst_cache *dst_cache; 2355 struct ip_tunnel_info *info; 2356 struct ip_tunnel_key *pkey; 2357 struct ip_tunnel_key key; 2358 struct vxlan_dev *vxlan = netdev_priv(dev); 2359 const struct iphdr *old_iph; 2360 struct vxlan_metadata _md; 2361 struct vxlan_metadata *md = &_md; 2362 unsigned int pkt_len = skb->len; 2363 __be16 src_port = 0, dst_port; 2364 struct dst_entry *ndst = NULL; 2365 int addr_family; 2366 __u8 tos, ttl; 2367 int ifindex; 2368 int err; 2369 u32 flags = vxlan->cfg.flags; 2370 bool use_cache; 2371 bool udp_sum = false; 2372 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); 2373 enum skb_drop_reason reason; 2374 bool no_eth_encap; 2375 __be32 vni = 0; 2376 2377 no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB); 2378 reason = skb_vlan_inet_prepare(skb, no_eth_encap); 2379 if (reason) 2380 goto drop; 2381 2382 reason = SKB_DROP_REASON_NOT_SPECIFIED; 2383 old_iph = ip_hdr(skb); 2384 2385 info = skb_tunnel_info(skb); 2386 use_cache = ip_tunnel_dst_cache_usable(skb, info); 2387 2388 if (rdst) { 2389 memset(&key, 0, sizeof(key)); 2390 pkey = &key; 2391 2392 if (vxlan_addr_any(&rdst->remote_ip)) { 2393 if (did_rsc) { 2394 /* short-circuited back to local bridge */ 2395 vxlan_encap_bypass(skb, vxlan, vxlan, 2396 default_vni, true); 2397 return; 2398 } 2399 goto drop; 2400 } 2401 2402 addr_family = vxlan->cfg.saddr.sa.sa_family; 2403 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; 2404 vni = (rdst->remote_vni) ? : default_vni; 2405 ifindex = rdst->remote_ifindex; 2406 2407 if (addr_family == AF_INET) { 2408 key.u.ipv4.src = vxlan->cfg.saddr.sin.sin_addr.s_addr; 2409 key.u.ipv4.dst = rdst->remote_ip.sin.sin_addr.s_addr; 2410 } else { 2411 key.u.ipv6.src = vxlan->cfg.saddr.sin6.sin6_addr; 2412 key.u.ipv6.dst = rdst->remote_ip.sin6.sin6_addr; 2413 } 2414 2415 dst_cache = &rdst->dst_cache; 2416 md->gbp = skb->mark; 2417 if (flags & VXLAN_F_TTL_INHERIT) { 2418 ttl = ip_tunnel_get_ttl(old_iph, skb); 2419 } else { 2420 ttl = vxlan->cfg.ttl; 2421 if (!ttl && vxlan_addr_multicast(&rdst->remote_ip)) 2422 ttl = 1; 2423 } 2424 tos = vxlan->cfg.tos; 2425 if (tos == 1) 2426 tos = ip_tunnel_get_dsfield(old_iph, skb); 2427 if (tos && !info) 2428 use_cache = false; 2429 2430 if (addr_family == AF_INET) 2431 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); 2432 else 2433 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2434 #if IS_ENABLED(CONFIG_IPV6) 2435 switch (vxlan->cfg.label_policy) { 2436 case VXLAN_LABEL_FIXED: 2437 key.label = vxlan->cfg.label; 2438 break; 2439 case VXLAN_LABEL_INHERIT: 2440 key.label = ip_tunnel_get_flowlabel(old_iph, skb); 2441 break; 2442 default: 2443 DEBUG_NET_WARN_ON_ONCE(1); 2444 goto drop; 2445 } 2446 #endif 2447 } else { 2448 if (!info) { 2449 WARN_ONCE(1, "%s: Missing encapsulation instructions\n", 2450 dev->name); 2451 goto drop; 2452 } 2453 pkey = &info->key; 2454 addr_family = ip_tunnel_info_af(info); 2455 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; 2456 vni = tunnel_id_to_key32(info->key.tun_id); 2457 ifindex = 0; 2458 dst_cache = &info->dst_cache; 2459 if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { 2460 if (info->options_len < sizeof(*md)) 2461 goto drop; 2462 md = ip_tunnel_info_opts(info); 2463 } 2464 ttl = info->key.ttl; 2465 tos = info->key.tos; 2466 udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); 2467 } 2468 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 2469 vxlan->cfg.port_max, true); 2470 2471 rcu_read_lock(); 2472 if (addr_family == AF_INET) { 2473 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 2474 struct rtable *rt; 2475 __be16 df = 0; 2476 __be32 saddr; 2477 2478 if (!ifindex) 2479 ifindex = sock4->sock->sk->sk_bound_dev_if; 2480 2481 rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, ifindex, 2482 &saddr, pkey, src_port, dst_port, 2483 tos, use_cache ? dst_cache : NULL); 2484 if (IS_ERR(rt)) { 2485 err = PTR_ERR(rt); 2486 reason = SKB_DROP_REASON_IP_OUTNOROUTES; 2487 goto tx_error; 2488 } 2489 2490 if (!info) { 2491 /* Bypass encapsulation if the destination is local */ 2492 err = encap_bypass_if_local(skb, dev, vxlan, AF_INET, 2493 dst_port, ifindex, vni, 2494 &rt->dst, rt->rt_flags); 2495 if (err) 2496 goto out_unlock; 2497 2498 if (vxlan->cfg.df == VXLAN_DF_SET) { 2499 df = htons(IP_DF); 2500 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { 2501 struct ethhdr *eth = eth_hdr(skb); 2502 2503 if (ntohs(eth->h_proto) == ETH_P_IPV6 || 2504 (ntohs(eth->h_proto) == ETH_P_IP && 2505 old_iph->frag_off & htons(IP_DF))) 2506 df = htons(IP_DF); 2507 } 2508 } else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, 2509 info->key.tun_flags)) { 2510 df = htons(IP_DF); 2511 } 2512 2513 ndst = &rt->dst; 2514 err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE), 2515 netif_is_any_bridge_port(dev)); 2516 if (err < 0) { 2517 goto tx_error; 2518 } else if (err) { 2519 if (info) { 2520 struct ip_tunnel_info *unclone; 2521 2522 unclone = skb_tunnel_info_unclone(skb); 2523 if (unlikely(!unclone)) 2524 goto tx_error; 2525 2526 unclone->key.u.ipv4.src = pkey->u.ipv4.dst; 2527 unclone->key.u.ipv4.dst = saddr; 2528 } 2529 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); 2530 dst_release(ndst); 2531 goto out_unlock; 2532 } 2533 2534 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2535 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 2536 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), 2537 vni, md, flags, udp_sum); 2538 if (err < 0) { 2539 reason = SKB_DROP_REASON_NOMEM; 2540 goto tx_error; 2541 } 2542 2543 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr, 2544 pkey->u.ipv4.dst, tos, ttl, df, 2545 src_port, dst_port, xnet, !udp_sum); 2546 #if IS_ENABLED(CONFIG_IPV6) 2547 } else { 2548 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 2549 struct in6_addr saddr; 2550 2551 if (!ifindex) 2552 ifindex = sock6->sock->sk->sk_bound_dev_if; 2553 2554 ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, 2555 ifindex, &saddr, pkey, 2556 src_port, dst_port, tos, 2557 use_cache ? dst_cache : NULL); 2558 if (IS_ERR(ndst)) { 2559 err = PTR_ERR(ndst); 2560 ndst = NULL; 2561 reason = SKB_DROP_REASON_IP_OUTNOROUTES; 2562 goto tx_error; 2563 } 2564 2565 if (!info) { 2566 u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags; 2567 2568 err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, 2569 dst_port, ifindex, vni, 2570 ndst, rt6i_flags); 2571 if (err) 2572 goto out_unlock; 2573 } 2574 2575 err = skb_tunnel_check_pmtu(skb, ndst, 2576 vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6), 2577 netif_is_any_bridge_port(dev)); 2578 if (err < 0) { 2579 goto tx_error; 2580 } else if (err) { 2581 if (info) { 2582 struct ip_tunnel_info *unclone; 2583 2584 unclone = skb_tunnel_info_unclone(skb); 2585 if (unlikely(!unclone)) 2586 goto tx_error; 2587 2588 unclone->key.u.ipv6.src = pkey->u.ipv6.dst; 2589 unclone->key.u.ipv6.dst = saddr; 2590 } 2591 2592 vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); 2593 dst_release(ndst); 2594 goto out_unlock; 2595 } 2596 2597 tos = ip_tunnel_ecn_encap(tos, old_iph, skb); 2598 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2599 skb_scrub_packet(skb, xnet); 2600 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), 2601 vni, md, flags, udp_sum); 2602 if (err < 0) { 2603 reason = SKB_DROP_REASON_NOMEM; 2604 goto tx_error; 2605 } 2606 2607 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, 2608 &saddr, &pkey->u.ipv6.dst, tos, ttl, 2609 pkey->label, src_port, dst_port, !udp_sum); 2610 #endif 2611 } 2612 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len); 2613 out_unlock: 2614 rcu_read_unlock(); 2615 return; 2616 2617 drop: 2618 dev_dstats_tx_dropped(dev); 2619 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); 2620 kfree_skb_reason(skb, reason); 2621 return; 2622 2623 tx_error: 2624 rcu_read_unlock(); 2625 if (err == -ELOOP) 2626 DEV_STATS_INC(dev, collisions); 2627 else if (err == -ENETUNREACH) 2628 DEV_STATS_INC(dev, tx_carrier_errors); 2629 dst_release(ndst); 2630 DEV_STATS_INC(dev, tx_errors); 2631 vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); 2632 kfree_skb_reason(skb, reason); 2633 } 2634 2635 static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, 2636 struct vxlan_fdb *f, __be32 vni, bool did_rsc) 2637 { 2638 struct vxlan_rdst nh_rdst; 2639 struct nexthop *nh; 2640 bool do_xmit; 2641 u32 hash; 2642 2643 memset(&nh_rdst, 0, sizeof(struct vxlan_rdst)); 2644 hash = skb_get_hash(skb); 2645 2646 rcu_read_lock(); 2647 nh = rcu_dereference(f->nh); 2648 if (!nh) { 2649 rcu_read_unlock(); 2650 goto drop; 2651 } 2652 do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst); 2653 rcu_read_unlock(); 2654 2655 if (likely(do_xmit)) 2656 vxlan_xmit_one(skb, dev, vni, &nh_rdst, did_rsc); 2657 else 2658 goto drop; 2659 2660 return; 2661 2662 drop: 2663 dev_dstats_tx_dropped(dev); 2664 vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, 2665 VXLAN_VNI_STATS_TX_DROPS, 0); 2666 dev_kfree_skb(skb); 2667 } 2668 2669 static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev, 2670 u32 nhid, __be32 vni) 2671 { 2672 struct vxlan_dev *vxlan = netdev_priv(dev); 2673 struct vxlan_rdst nh_rdst; 2674 struct nexthop *nh; 2675 bool do_xmit; 2676 u32 hash; 2677 2678 memset(&nh_rdst, 0, sizeof(struct vxlan_rdst)); 2679 hash = skb_get_hash(skb); 2680 2681 rcu_read_lock(); 2682 nh = nexthop_find_by_id(dev_net(dev), nhid); 2683 if (unlikely(!nh || !nexthop_is_fdb(nh) || !nexthop_is_multipath(nh))) { 2684 rcu_read_unlock(); 2685 goto drop; 2686 } 2687 do_xmit = vxlan_fdb_nh_path_select(nh, hash, &nh_rdst); 2688 rcu_read_unlock(); 2689 2690 if (vxlan->cfg.saddr.sa.sa_family != nh_rdst.remote_ip.sa.sa_family) 2691 goto drop; 2692 2693 if (likely(do_xmit)) 2694 vxlan_xmit_one(skb, dev, vni, &nh_rdst, false); 2695 else 2696 goto drop; 2697 2698 return NETDEV_TX_OK; 2699 2700 drop: 2701 dev_dstats_tx_dropped(dev); 2702 vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, 2703 VXLAN_VNI_STATS_TX_DROPS, 0); 2704 dev_kfree_skb(skb); 2705 return NETDEV_TX_OK; 2706 } 2707 2708 /* Transmit local packets over Vxlan 2709 * 2710 * Outer IP header inherits ECN and DF from inner header. 2711 * Outer UDP destination is the VXLAN assigned port. 2712 * source port is based on hash of flow 2713 */ 2714 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 2715 { 2716 struct vxlan_dev *vxlan = netdev_priv(dev); 2717 struct vxlan_rdst *rdst, *fdst = NULL; 2718 const struct ip_tunnel_info *info; 2719 struct vxlan_fdb *f; 2720 struct ethhdr *eth; 2721 __be32 vni = 0; 2722 u32 nhid = 0; 2723 bool did_rsc; 2724 2725 info = skb_tunnel_info(skb); 2726 2727 skb_reset_mac_header(skb); 2728 2729 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) { 2730 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE && 2731 info->mode & IP_TUNNEL_INFO_TX) { 2732 vni = tunnel_id_to_key32(info->key.tun_id); 2733 nhid = info->key.nhid; 2734 } else { 2735 if (info && info->mode & IP_TUNNEL_INFO_TX) 2736 vxlan_xmit_one(skb, dev, vni, NULL, false); 2737 else 2738 kfree_skb_reason(skb, SKB_DROP_REASON_TUNNEL_TXINFO); 2739 return NETDEV_TX_OK; 2740 } 2741 } 2742 2743 if (vxlan->cfg.flags & VXLAN_F_PROXY) { 2744 eth = eth_hdr(skb); 2745 if (ntohs(eth->h_proto) == ETH_P_ARP) 2746 return arp_reduce(dev, skb, vni); 2747 #if IS_ENABLED(CONFIG_IPV6) 2748 else if (ntohs(eth->h_proto) == ETH_P_IPV6 && 2749 pskb_may_pull(skb, sizeof(struct ipv6hdr) + 2750 sizeof(struct nd_msg)) && 2751 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { 2752 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1); 2753 2754 if (m->icmph.icmp6_code == 0 && 2755 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) 2756 return neigh_reduce(dev, skb, vni); 2757 } 2758 #endif 2759 } 2760 2761 if (nhid) 2762 return vxlan_xmit_nhid(skb, dev, nhid, vni); 2763 2764 if (vxlan->cfg.flags & VXLAN_F_MDB) { 2765 struct vxlan_mdb_entry *mdb_entry; 2766 2767 rcu_read_lock(); 2768 mdb_entry = vxlan_mdb_entry_skb_get(vxlan, skb, vni); 2769 if (mdb_entry) { 2770 netdev_tx_t ret; 2771 2772 ret = vxlan_mdb_xmit(vxlan, mdb_entry, skb); 2773 rcu_read_unlock(); 2774 return ret; 2775 } 2776 rcu_read_unlock(); 2777 } 2778 2779 eth = eth_hdr(skb); 2780 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2781 did_rsc = false; 2782 2783 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) && 2784 (ntohs(eth->h_proto) == ETH_P_IP || 2785 ntohs(eth->h_proto) == ETH_P_IPV6)) { 2786 did_rsc = route_shortcircuit(dev, skb); 2787 if (did_rsc) 2788 f = vxlan_find_mac(vxlan, eth->h_dest, vni); 2789 } 2790 2791 if (f == NULL) { 2792 f = vxlan_find_mac(vxlan, all_zeros_mac, vni); 2793 if (f == NULL) { 2794 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) && 2795 !is_multicast_ether_addr(eth->h_dest)) 2796 vxlan_fdb_miss(vxlan, eth->h_dest); 2797 2798 dev_dstats_tx_dropped(dev); 2799 vxlan_vnifilter_count(vxlan, vni, NULL, 2800 VXLAN_VNI_STATS_TX_DROPS, 0); 2801 kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); 2802 return NETDEV_TX_OK; 2803 } 2804 } 2805 2806 if (rcu_access_pointer(f->nh)) { 2807 vxlan_xmit_nh(skb, dev, f, 2808 (vni ? : vxlan->default_dst.remote_vni), did_rsc); 2809 } else { 2810 list_for_each_entry_rcu(rdst, &f->remotes, list) { 2811 struct sk_buff *skb1; 2812 2813 if (!fdst) { 2814 fdst = rdst; 2815 continue; 2816 } 2817 skb1 = skb_clone(skb, GFP_ATOMIC); 2818 if (skb1) 2819 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc); 2820 } 2821 if (fdst) 2822 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc); 2823 else 2824 kfree_skb_reason(skb, SKB_DROP_REASON_VXLAN_NO_REMOTE); 2825 } 2826 2827 return NETDEV_TX_OK; 2828 } 2829 2830 /* Walk the forwarding table and purge stale entries */ 2831 static void vxlan_cleanup(struct timer_list *t) 2832 { 2833 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); 2834 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; 2835 unsigned int h; 2836 2837 if (!netif_running(vxlan->dev)) 2838 return; 2839 2840 for (h = 0; h < FDB_HASH_SIZE; ++h) { 2841 struct hlist_node *p, *n; 2842 2843 spin_lock(&vxlan->hash_lock[h]); 2844 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2845 struct vxlan_fdb *f 2846 = container_of(p, struct vxlan_fdb, hlist); 2847 unsigned long timeout; 2848 2849 if (f->state & (NUD_PERMANENT | NUD_NOARP)) 2850 continue; 2851 2852 if (f->flags & NTF_EXT_LEARNED) 2853 continue; 2854 2855 timeout = f->used + vxlan->cfg.age_interval * HZ; 2856 if (time_before_eq(timeout, jiffies)) { 2857 netdev_dbg(vxlan->dev, 2858 "garbage collect %pM\n", 2859 f->eth_addr); 2860 f->state = NUD_STALE; 2861 vxlan_fdb_destroy(vxlan, f, true, true); 2862 } else if (time_before(timeout, next_timer)) 2863 next_timer = timeout; 2864 } 2865 spin_unlock(&vxlan->hash_lock[h]); 2866 } 2867 2868 mod_timer(&vxlan->age_timer, next_timer); 2869 } 2870 2871 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) 2872 { 2873 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2874 2875 spin_lock(&vn->sock_lock); 2876 hlist_del_init_rcu(&vxlan->hlist4.hlist); 2877 #if IS_ENABLED(CONFIG_IPV6) 2878 hlist_del_init_rcu(&vxlan->hlist6.hlist); 2879 #endif 2880 spin_unlock(&vn->sock_lock); 2881 } 2882 2883 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, 2884 struct vxlan_dev_node *node) 2885 { 2886 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2887 __be32 vni = vxlan->default_dst.remote_vni; 2888 2889 node->vxlan = vxlan; 2890 spin_lock(&vn->sock_lock); 2891 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni)); 2892 spin_unlock(&vn->sock_lock); 2893 } 2894 2895 /* Setup stats when device is created */ 2896 static int vxlan_init(struct net_device *dev) 2897 { 2898 struct vxlan_dev *vxlan = netdev_priv(dev); 2899 int err; 2900 2901 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) 2902 vxlan_vnigroup_init(vxlan); 2903 2904 err = gro_cells_init(&vxlan->gro_cells, dev); 2905 if (err) 2906 goto err_vnigroup_uninit; 2907 2908 err = vxlan_mdb_init(vxlan); 2909 if (err) 2910 goto err_gro_cells_destroy; 2911 2912 netdev_lockdep_set_classes(dev); 2913 return 0; 2914 2915 err_gro_cells_destroy: 2916 gro_cells_destroy(&vxlan->gro_cells); 2917 err_vnigroup_uninit: 2918 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) 2919 vxlan_vnigroup_uninit(vxlan); 2920 return err; 2921 } 2922 2923 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) 2924 { 2925 struct vxlan_fdb *f; 2926 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni); 2927 2928 spin_lock_bh(&vxlan->hash_lock[hash_index]); 2929 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2930 if (f) 2931 vxlan_fdb_destroy(vxlan, f, true, true); 2932 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 2933 } 2934 2935 static void vxlan_uninit(struct net_device *dev) 2936 { 2937 struct vxlan_dev *vxlan = netdev_priv(dev); 2938 2939 vxlan_mdb_fini(vxlan); 2940 2941 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) 2942 vxlan_vnigroup_uninit(vxlan); 2943 2944 gro_cells_destroy(&vxlan->gro_cells); 2945 2946 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); 2947 } 2948 2949 /* Start ageing timer and join group when device is brought up */ 2950 static int vxlan_open(struct net_device *dev) 2951 { 2952 struct vxlan_dev *vxlan = netdev_priv(dev); 2953 int ret; 2954 2955 ret = vxlan_sock_add(vxlan); 2956 if (ret < 0) 2957 return ret; 2958 2959 ret = vxlan_multicast_join(vxlan); 2960 if (ret) { 2961 vxlan_sock_release(vxlan); 2962 return ret; 2963 } 2964 2965 if (vxlan->cfg.age_interval) 2966 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); 2967 2968 return ret; 2969 } 2970 2971 struct vxlan_fdb_flush_desc { 2972 bool ignore_default_entry; 2973 unsigned long state; 2974 unsigned long state_mask; 2975 unsigned long flags; 2976 unsigned long flags_mask; 2977 __be32 src_vni; 2978 u32 nhid; 2979 __be32 vni; 2980 __be16 port; 2981 union vxlan_addr dst_ip; 2982 }; 2983 2984 static bool vxlan_fdb_is_default_entry(const struct vxlan_fdb *f, 2985 const struct vxlan_dev *vxlan) 2986 { 2987 return is_zero_ether_addr(f->eth_addr) && f->vni == vxlan->cfg.vni; 2988 } 2989 2990 static bool vxlan_fdb_nhid_matches(const struct vxlan_fdb *f, u32 nhid) 2991 { 2992 struct nexthop *nh = rtnl_dereference(f->nh); 2993 2994 return nh && nh->id == nhid; 2995 } 2996 2997 static bool vxlan_fdb_flush_matches(const struct vxlan_fdb *f, 2998 const struct vxlan_dev *vxlan, 2999 const struct vxlan_fdb_flush_desc *desc) 3000 { 3001 if (desc->state_mask && (f->state & desc->state_mask) != desc->state) 3002 return false; 3003 3004 if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags) 3005 return false; 3006 3007 if (desc->ignore_default_entry && vxlan_fdb_is_default_entry(f, vxlan)) 3008 return false; 3009 3010 if (desc->src_vni && f->vni != desc->src_vni) 3011 return false; 3012 3013 if (desc->nhid && !vxlan_fdb_nhid_matches(f, desc->nhid)) 3014 return false; 3015 3016 return true; 3017 } 3018 3019 static bool 3020 vxlan_fdb_flush_should_match_remotes(const struct vxlan_fdb_flush_desc *desc) 3021 { 3022 return desc->vni || desc->port || desc->dst_ip.sa.sa_family; 3023 } 3024 3025 static bool 3026 vxlan_fdb_flush_remote_matches(const struct vxlan_fdb_flush_desc *desc, 3027 const struct vxlan_rdst *rd) 3028 { 3029 if (desc->vni && rd->remote_vni != desc->vni) 3030 return false; 3031 3032 if (desc->port && rd->remote_port != desc->port) 3033 return false; 3034 3035 if (desc->dst_ip.sa.sa_family && 3036 !vxlan_addr_equal(&rd->remote_ip, &desc->dst_ip)) 3037 return false; 3038 3039 return true; 3040 } 3041 3042 static void 3043 vxlan_fdb_flush_match_remotes(struct vxlan_fdb *f, struct vxlan_dev *vxlan, 3044 const struct vxlan_fdb_flush_desc *desc, 3045 bool *p_destroy_fdb) 3046 { 3047 bool remotes_flushed = false; 3048 struct vxlan_rdst *rd, *tmp; 3049 3050 list_for_each_entry_safe(rd, tmp, &f->remotes, list) { 3051 if (!vxlan_fdb_flush_remote_matches(desc, rd)) 3052 continue; 3053 3054 vxlan_fdb_dst_destroy(vxlan, f, rd, true); 3055 remotes_flushed = true; 3056 } 3057 3058 *p_destroy_fdb = remotes_flushed && list_empty(&f->remotes); 3059 } 3060 3061 /* Purge the forwarding table */ 3062 static void vxlan_flush(struct vxlan_dev *vxlan, 3063 const struct vxlan_fdb_flush_desc *desc) 3064 { 3065 bool match_remotes = vxlan_fdb_flush_should_match_remotes(desc); 3066 unsigned int h; 3067 3068 for (h = 0; h < FDB_HASH_SIZE; ++h) { 3069 struct hlist_node *p, *n; 3070 3071 spin_lock_bh(&vxlan->hash_lock[h]); 3072 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 3073 struct vxlan_fdb *f 3074 = container_of(p, struct vxlan_fdb, hlist); 3075 3076 if (!vxlan_fdb_flush_matches(f, vxlan, desc)) 3077 continue; 3078 3079 if (match_remotes) { 3080 bool destroy_fdb = false; 3081 3082 vxlan_fdb_flush_match_remotes(f, vxlan, desc, 3083 &destroy_fdb); 3084 3085 if (!destroy_fdb) 3086 continue; 3087 } 3088 3089 vxlan_fdb_destroy(vxlan, f, true, true); 3090 } 3091 spin_unlock_bh(&vxlan->hash_lock[h]); 3092 } 3093 } 3094 3095 static const struct nla_policy vxlan_del_bulk_policy[NDA_MAX + 1] = { 3096 [NDA_SRC_VNI] = { .type = NLA_U32 }, 3097 [NDA_NH_ID] = { .type = NLA_U32 }, 3098 [NDA_VNI] = { .type = NLA_U32 }, 3099 [NDA_PORT] = { .type = NLA_U16 }, 3100 [NDA_DST] = NLA_POLICY_RANGE(NLA_BINARY, sizeof(struct in_addr), 3101 sizeof(struct in6_addr)), 3102 [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, 3103 [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, 3104 }; 3105 3106 #define VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS (NTF_MASTER | NTF_SELF) 3107 #define VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES (NUD_PERMANENT | NUD_NOARP) 3108 #define VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS (NTF_EXT_LEARNED | NTF_OFFLOADED | \ 3109 NTF_ROUTER) 3110 3111 static int vxlan_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, 3112 struct netlink_ext_ack *extack) 3113 { 3114 struct vxlan_dev *vxlan = netdev_priv(dev); 3115 struct vxlan_fdb_flush_desc desc = {}; 3116 struct ndmsg *ndm = nlmsg_data(nlh); 3117 struct nlattr *tb[NDA_MAX + 1]; 3118 u8 ndm_flags; 3119 int err; 3120 3121 ndm_flags = ndm->ndm_flags & ~VXLAN_FDB_FLUSH_IGNORED_NDM_FLAGS; 3122 3123 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, vxlan_del_bulk_policy, 3124 extack); 3125 if (err) 3126 return err; 3127 3128 if (ndm_flags & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_FLAGS) { 3129 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set"); 3130 return -EINVAL; 3131 } 3132 if (ndm->ndm_state & ~VXLAN_FDB_FLUSH_ALLOWED_NDM_STATES) { 3133 NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set"); 3134 return -EINVAL; 3135 } 3136 3137 desc.state = ndm->ndm_state; 3138 desc.flags = ndm_flags; 3139 3140 if (tb[NDA_NDM_STATE_MASK]) 3141 desc.state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]); 3142 3143 if (tb[NDA_NDM_FLAGS_MASK]) 3144 desc.flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]); 3145 3146 if (tb[NDA_SRC_VNI]) 3147 desc.src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI])); 3148 3149 if (tb[NDA_NH_ID]) 3150 desc.nhid = nla_get_u32(tb[NDA_NH_ID]); 3151 3152 if (tb[NDA_VNI]) 3153 desc.vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); 3154 3155 if (tb[NDA_PORT]) 3156 desc.port = nla_get_be16(tb[NDA_PORT]); 3157 3158 if (tb[NDA_DST]) { 3159 union vxlan_addr ip; 3160 3161 err = vxlan_nla_get_addr(&ip, tb[NDA_DST]); 3162 if (err) { 3163 NL_SET_ERR_MSG_ATTR(extack, tb[NDA_DST], 3164 "Unsupported address family"); 3165 return err; 3166 } 3167 desc.dst_ip = ip; 3168 } 3169 3170 vxlan_flush(vxlan, &desc); 3171 3172 return 0; 3173 } 3174 3175 /* Cleanup timer and forwarding table on shutdown */ 3176 static int vxlan_stop(struct net_device *dev) 3177 { 3178 struct vxlan_dev *vxlan = netdev_priv(dev); 3179 struct vxlan_fdb_flush_desc desc = { 3180 /* Default entry is deleted at vxlan_uninit. */ 3181 .ignore_default_entry = true, 3182 .state = 0, 3183 .state_mask = NUD_PERMANENT | NUD_NOARP, 3184 }; 3185 3186 vxlan_multicast_leave(vxlan); 3187 3188 del_timer_sync(&vxlan->age_timer); 3189 3190 vxlan_flush(vxlan, &desc); 3191 vxlan_sock_release(vxlan); 3192 3193 return 0; 3194 } 3195 3196 /* Stub, nothing needs to be done. */ 3197 static void vxlan_set_multicast_list(struct net_device *dev) 3198 { 3199 } 3200 3201 static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 3202 { 3203 struct vxlan_dev *vxlan = netdev_priv(dev); 3204 struct vxlan_rdst *dst = &vxlan->default_dst; 3205 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 3206 dst->remote_ifindex); 3207 3208 /* This check is different than dev->max_mtu, because it looks at 3209 * the lowerdev->mtu, rather than the static dev->max_mtu 3210 */ 3211 if (lowerdev) { 3212 int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); 3213 if (new_mtu > max_mtu) 3214 return -EINVAL; 3215 } 3216 3217 WRITE_ONCE(dev->mtu, new_mtu); 3218 return 0; 3219 } 3220 3221 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 3222 { 3223 struct vxlan_dev *vxlan = netdev_priv(dev); 3224 struct ip_tunnel_info *info = skb_tunnel_info(skb); 3225 __be16 sport, dport; 3226 3227 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, 3228 vxlan->cfg.port_max, true); 3229 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 3230 3231 if (ip_tunnel_info_af(info) == AF_INET) { 3232 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock); 3233 struct rtable *rt; 3234 3235 if (!sock4) 3236 return -EIO; 3237 3238 rt = udp_tunnel_dst_lookup(skb, dev, vxlan->net, 0, 3239 &info->key.u.ipv4.src, 3240 &info->key, 3241 sport, dport, info->key.tos, 3242 &info->dst_cache); 3243 if (IS_ERR(rt)) 3244 return PTR_ERR(rt); 3245 ip_rt_put(rt); 3246 } else { 3247 #if IS_ENABLED(CONFIG_IPV6) 3248 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); 3249 struct dst_entry *ndst; 3250 3251 if (!sock6) 3252 return -EIO; 3253 3254 ndst = udp_tunnel6_dst_lookup(skb, dev, vxlan->net, sock6->sock, 3255 0, &info->key.u.ipv6.src, 3256 &info->key, 3257 sport, dport, info->key.tos, 3258 &info->dst_cache); 3259 if (IS_ERR(ndst)) 3260 return PTR_ERR(ndst); 3261 dst_release(ndst); 3262 #else /* !CONFIG_IPV6 */ 3263 return -EPFNOSUPPORT; 3264 #endif 3265 } 3266 info->key.tp_src = sport; 3267 info->key.tp_dst = dport; 3268 return 0; 3269 } 3270 3271 static const struct net_device_ops vxlan_netdev_ether_ops = { 3272 .ndo_init = vxlan_init, 3273 .ndo_uninit = vxlan_uninit, 3274 .ndo_open = vxlan_open, 3275 .ndo_stop = vxlan_stop, 3276 .ndo_start_xmit = vxlan_xmit, 3277 .ndo_set_rx_mode = vxlan_set_multicast_list, 3278 .ndo_change_mtu = vxlan_change_mtu, 3279 .ndo_validate_addr = eth_validate_addr, 3280 .ndo_set_mac_address = eth_mac_addr, 3281 .ndo_fdb_add = vxlan_fdb_add, 3282 .ndo_fdb_del = vxlan_fdb_delete, 3283 .ndo_fdb_del_bulk = vxlan_fdb_delete_bulk, 3284 .ndo_fdb_dump = vxlan_fdb_dump, 3285 .ndo_fdb_get = vxlan_fdb_get, 3286 .ndo_mdb_add = vxlan_mdb_add, 3287 .ndo_mdb_del = vxlan_mdb_del, 3288 .ndo_mdb_del_bulk = vxlan_mdb_del_bulk, 3289 .ndo_mdb_dump = vxlan_mdb_dump, 3290 .ndo_mdb_get = vxlan_mdb_get, 3291 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 3292 }; 3293 3294 static const struct net_device_ops vxlan_netdev_raw_ops = { 3295 .ndo_init = vxlan_init, 3296 .ndo_uninit = vxlan_uninit, 3297 .ndo_open = vxlan_open, 3298 .ndo_stop = vxlan_stop, 3299 .ndo_start_xmit = vxlan_xmit, 3300 .ndo_change_mtu = vxlan_change_mtu, 3301 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, 3302 }; 3303 3304 /* Info for udev, that this is a virtual tunnel endpoint */ 3305 static const struct device_type vxlan_type = { 3306 .name = "vxlan", 3307 }; 3308 3309 /* Calls the ndo_udp_tunnel_add of the caller in order to 3310 * supply the listening VXLAN udp ports. Callers are expected 3311 * to implement the ndo_udp_tunnel_add. 3312 */ 3313 static void vxlan_offload_rx_ports(struct net_device *dev, bool push) 3314 { 3315 struct vxlan_sock *vs; 3316 struct net *net = dev_net(dev); 3317 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3318 unsigned int i; 3319 3320 spin_lock(&vn->sock_lock); 3321 for (i = 0; i < PORT_HASH_SIZE; ++i) { 3322 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { 3323 unsigned short type; 3324 3325 if (vs->flags & VXLAN_F_GPE) 3326 type = UDP_TUNNEL_TYPE_VXLAN_GPE; 3327 else 3328 type = UDP_TUNNEL_TYPE_VXLAN; 3329 3330 if (push) 3331 udp_tunnel_push_rx_port(dev, vs->sock, type); 3332 else 3333 udp_tunnel_drop_rx_port(dev, vs->sock, type); 3334 } 3335 } 3336 spin_unlock(&vn->sock_lock); 3337 } 3338 3339 /* Initialize the device structure. */ 3340 static void vxlan_setup(struct net_device *dev) 3341 { 3342 struct vxlan_dev *vxlan = netdev_priv(dev); 3343 unsigned int h; 3344 3345 eth_hw_addr_random(dev); 3346 ether_setup(dev); 3347 3348 dev->needs_free_netdev = true; 3349 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 3350 3351 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 3352 dev->features |= NETIF_F_RXCSUM; 3353 dev->features |= NETIF_F_GSO_SOFTWARE; 3354 3355 dev->vlan_features = dev->features; 3356 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 3357 dev->hw_features |= NETIF_F_RXCSUM; 3358 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 3359 netif_keep_dst(dev); 3360 dev->priv_flags |= IFF_NO_QUEUE; 3361 dev->change_proto_down = true; 3362 dev->lltx = true; 3363 3364 /* MTU range: 68 - 65535 */ 3365 dev->min_mtu = ETH_MIN_MTU; 3366 dev->max_mtu = ETH_MAX_MTU; 3367 3368 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 3369 INIT_LIST_HEAD(&vxlan->next); 3370 3371 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); 3372 3373 vxlan->dev = dev; 3374 3375 for (h = 0; h < FDB_HASH_SIZE; ++h) { 3376 spin_lock_init(&vxlan->hash_lock[h]); 3377 INIT_HLIST_HEAD(&vxlan->fdb_head[h]); 3378 } 3379 } 3380 3381 static void vxlan_ether_setup(struct net_device *dev) 3382 { 3383 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 3384 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 3385 dev->netdev_ops = &vxlan_netdev_ether_ops; 3386 } 3387 3388 static void vxlan_raw_setup(struct net_device *dev) 3389 { 3390 dev->header_ops = NULL; 3391 dev->type = ARPHRD_NONE; 3392 dev->hard_header_len = 0; 3393 dev->addr_len = 0; 3394 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 3395 dev->netdev_ops = &vxlan_netdev_raw_ops; 3396 } 3397 3398 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 3399 [IFLA_VXLAN_UNSPEC] = { .strict_start_type = IFLA_VXLAN_LOCALBYPASS }, 3400 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 3401 [IFLA_VXLAN_GROUP] = { .len = sizeof_field(struct iphdr, daddr) }, 3402 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 3403 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 3404 [IFLA_VXLAN_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 3405 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 3406 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 3407 [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, 3408 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, 3409 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, 3410 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 3411 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 3412 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 3413 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 }, 3414 [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, 3415 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, 3416 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, 3417 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, 3418 [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, 3419 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, 3420 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, 3421 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, 3422 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 3423 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 3424 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 3425 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, 3426 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 3427 [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, 3428 [IFLA_VXLAN_DF] = { .type = NLA_U8 }, 3429 [IFLA_VXLAN_VNIFILTER] = { .type = NLA_U8 }, 3430 [IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1), 3431 [IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX), 3432 [IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)), 3433 }; 3434 3435 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], 3436 struct netlink_ext_ack *extack) 3437 { 3438 if (tb[IFLA_ADDRESS]) { 3439 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 3440 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], 3441 "Provided link layer address is not Ethernet"); 3442 return -EINVAL; 3443 } 3444 3445 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 3446 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], 3447 "Provided Ethernet address is not unicast"); 3448 return -EADDRNOTAVAIL; 3449 } 3450 } 3451 3452 if (tb[IFLA_MTU]) { 3453 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 3454 3455 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) { 3456 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], 3457 "MTU must be between 68 and 65535"); 3458 return -EINVAL; 3459 } 3460 } 3461 3462 if (!data) { 3463 NL_SET_ERR_MSG(extack, 3464 "Required attributes not provided to perform the operation"); 3465 return -EINVAL; 3466 } 3467 3468 if (data[IFLA_VXLAN_ID]) { 3469 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); 3470 3471 if (id >= VXLAN_N_VID) { 3472 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID], 3473 "VXLAN ID must be lower than 16777216"); 3474 return -ERANGE; 3475 } 3476 } 3477 3478 if (data[IFLA_VXLAN_PORT_RANGE]) { 3479 const struct ifla_vxlan_port_range *p 3480 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 3481 3482 if (ntohs(p->high) < ntohs(p->low)) { 3483 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE], 3484 "Invalid source port range"); 3485 return -EINVAL; 3486 } 3487 } 3488 3489 if (data[IFLA_VXLAN_DF]) { 3490 enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]); 3491 3492 if (df < 0 || df > VXLAN_DF_MAX) { 3493 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF], 3494 "Invalid DF attribute"); 3495 return -EINVAL; 3496 } 3497 } 3498 3499 return 0; 3500 } 3501 3502 static void vxlan_get_drvinfo(struct net_device *netdev, 3503 struct ethtool_drvinfo *drvinfo) 3504 { 3505 strscpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version)); 3506 strscpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver)); 3507 } 3508 3509 static int vxlan_get_link_ksettings(struct net_device *dev, 3510 struct ethtool_link_ksettings *cmd) 3511 { 3512 struct vxlan_dev *vxlan = netdev_priv(dev); 3513 struct vxlan_rdst *dst = &vxlan->default_dst; 3514 struct net_device *lowerdev = __dev_get_by_index(vxlan->net, 3515 dst->remote_ifindex); 3516 3517 if (!lowerdev) { 3518 cmd->base.duplex = DUPLEX_UNKNOWN; 3519 cmd->base.port = PORT_OTHER; 3520 cmd->base.speed = SPEED_UNKNOWN; 3521 3522 return 0; 3523 } 3524 3525 return __ethtool_get_link_ksettings(lowerdev, cmd); 3526 } 3527 3528 static const struct ethtool_ops vxlan_ethtool_ops = { 3529 .get_drvinfo = vxlan_get_drvinfo, 3530 .get_link = ethtool_op_get_link, 3531 .get_link_ksettings = vxlan_get_link_ksettings, 3532 }; 3533 3534 static struct socket *vxlan_create_sock(struct net *net, bool ipv6, 3535 __be16 port, u32 flags, int ifindex) 3536 { 3537 struct socket *sock; 3538 struct udp_port_cfg udp_conf; 3539 int err; 3540 3541 memset(&udp_conf, 0, sizeof(udp_conf)); 3542 3543 if (ipv6) { 3544 udp_conf.family = AF_INET6; 3545 udp_conf.use_udp6_rx_checksums = 3546 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 3547 udp_conf.ipv6_v6only = 1; 3548 } else { 3549 udp_conf.family = AF_INET; 3550 } 3551 3552 udp_conf.local_udp_port = port; 3553 udp_conf.bind_ifindex = ifindex; 3554 3555 /* Open UDP socket */ 3556 err = udp_sock_create(net, &udp_conf, &sock); 3557 if (err < 0) 3558 return ERR_PTR(err); 3559 3560 udp_allow_gso(sock->sk); 3561 return sock; 3562 } 3563 3564 /* Create new listen socket if needed */ 3565 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, 3566 __be16 port, u32 flags, 3567 int ifindex) 3568 { 3569 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3570 struct vxlan_sock *vs; 3571 struct socket *sock; 3572 unsigned int h; 3573 struct udp_tunnel_sock_cfg tunnel_cfg; 3574 3575 vs = kzalloc(sizeof(*vs), GFP_KERNEL); 3576 if (!vs) 3577 return ERR_PTR(-ENOMEM); 3578 3579 for (h = 0; h < VNI_HASH_SIZE; ++h) 3580 INIT_HLIST_HEAD(&vs->vni_list[h]); 3581 3582 sock = vxlan_create_sock(net, ipv6, port, flags, ifindex); 3583 if (IS_ERR(sock)) { 3584 kfree(vs); 3585 return ERR_CAST(sock); 3586 } 3587 3588 vs->sock = sock; 3589 refcount_set(&vs->refcnt, 1); 3590 vs->flags = (flags & VXLAN_F_RCV_FLAGS); 3591 3592 spin_lock(&vn->sock_lock); 3593 hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); 3594 udp_tunnel_notify_add_rx_port(sock, 3595 (vs->flags & VXLAN_F_GPE) ? 3596 UDP_TUNNEL_TYPE_VXLAN_GPE : 3597 UDP_TUNNEL_TYPE_VXLAN); 3598 spin_unlock(&vn->sock_lock); 3599 3600 /* Mark socket as an encapsulation socket. */ 3601 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 3602 tunnel_cfg.sk_user_data = vs; 3603 tunnel_cfg.encap_type = 1; 3604 tunnel_cfg.encap_rcv = vxlan_rcv; 3605 tunnel_cfg.encap_err_lookup = vxlan_err_lookup; 3606 tunnel_cfg.encap_destroy = NULL; 3607 if (vs->flags & VXLAN_F_GPE) { 3608 tunnel_cfg.gro_receive = vxlan_gpe_gro_receive; 3609 tunnel_cfg.gro_complete = vxlan_gpe_gro_complete; 3610 } else { 3611 tunnel_cfg.gro_receive = vxlan_gro_receive; 3612 tunnel_cfg.gro_complete = vxlan_gro_complete; 3613 } 3614 3615 setup_udp_tunnel_sock(net, sock, &tunnel_cfg); 3616 3617 return vs; 3618 } 3619 3620 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) 3621 { 3622 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3623 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; 3624 struct vxlan_sock *vs = NULL; 3625 struct vxlan_dev_node *node; 3626 int l3mdev_index = 0; 3627 3628 if (vxlan->cfg.remote_ifindex) 3629 l3mdev_index = l3mdev_master_upper_ifindex_by_index( 3630 vxlan->net, vxlan->cfg.remote_ifindex); 3631 3632 if (!vxlan->cfg.no_share) { 3633 spin_lock(&vn->sock_lock); 3634 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, 3635 vxlan->cfg.dst_port, vxlan->cfg.flags, 3636 l3mdev_index); 3637 if (vs && !refcount_inc_not_zero(&vs->refcnt)) { 3638 spin_unlock(&vn->sock_lock); 3639 return -EBUSY; 3640 } 3641 spin_unlock(&vn->sock_lock); 3642 } 3643 if (!vs) 3644 vs = vxlan_socket_create(vxlan->net, ipv6, 3645 vxlan->cfg.dst_port, vxlan->cfg.flags, 3646 l3mdev_index); 3647 if (IS_ERR(vs)) 3648 return PTR_ERR(vs); 3649 #if IS_ENABLED(CONFIG_IPV6) 3650 if (ipv6) { 3651 rcu_assign_pointer(vxlan->vn6_sock, vs); 3652 node = &vxlan->hlist6; 3653 } else 3654 #endif 3655 { 3656 rcu_assign_pointer(vxlan->vn4_sock, vs); 3657 node = &vxlan->hlist4; 3658 } 3659 3660 if (metadata && (vxlan->cfg.flags & VXLAN_F_VNIFILTER)) 3661 vxlan_vs_add_vnigrp(vxlan, vs, ipv6); 3662 else 3663 vxlan_vs_add_dev(vs, vxlan, node); 3664 3665 return 0; 3666 } 3667 3668 static int vxlan_sock_add(struct vxlan_dev *vxlan) 3669 { 3670 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA; 3671 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata; 3672 bool ipv4 = !ipv6 || metadata; 3673 int ret = 0; 3674 3675 RCU_INIT_POINTER(vxlan->vn4_sock, NULL); 3676 #if IS_ENABLED(CONFIG_IPV6) 3677 RCU_INIT_POINTER(vxlan->vn6_sock, NULL); 3678 if (ipv6) { 3679 ret = __vxlan_sock_add(vxlan, true); 3680 if (ret < 0 && ret != -EAFNOSUPPORT) 3681 ipv4 = false; 3682 } 3683 #endif 3684 if (ipv4) 3685 ret = __vxlan_sock_add(vxlan, false); 3686 if (ret < 0) 3687 vxlan_sock_release(vxlan); 3688 return ret; 3689 } 3690 3691 int vxlan_vni_in_use(struct net *src_net, struct vxlan_dev *vxlan, 3692 struct vxlan_config *conf, __be32 vni) 3693 { 3694 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); 3695 struct vxlan_dev *tmp; 3696 3697 list_for_each_entry(tmp, &vn->vxlan_list, next) { 3698 if (tmp == vxlan) 3699 continue; 3700 if (tmp->cfg.flags & VXLAN_F_VNIFILTER) { 3701 if (!vxlan_vnifilter_lookup(tmp, vni)) 3702 continue; 3703 } else if (tmp->cfg.vni != vni) { 3704 continue; 3705 } 3706 if (tmp->cfg.dst_port != conf->dst_port) 3707 continue; 3708 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) != 3709 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6))) 3710 continue; 3711 3712 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) && 3713 tmp->cfg.remote_ifindex != conf->remote_ifindex) 3714 continue; 3715 3716 return -EEXIST; 3717 } 3718 3719 return 0; 3720 } 3721 3722 static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, 3723 struct net_device **lower, 3724 struct vxlan_dev *old, 3725 struct netlink_ext_ack *extack) 3726 { 3727 bool use_ipv6 = false; 3728 3729 if (conf->flags & VXLAN_F_GPE) { 3730 /* For now, allow GPE only together with 3731 * COLLECT_METADATA. This can be relaxed later; in such 3732 * case, the other side of the PtP link will have to be 3733 * provided. 3734 */ 3735 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || 3736 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 3737 NL_SET_ERR_MSG(extack, 3738 "VXLAN GPE does not support this combination of attributes"); 3739 return -EINVAL; 3740 } 3741 } 3742 3743 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) { 3744 /* Unless IPv6 is explicitly requested, assume IPv4 */ 3745 conf->remote_ip.sa.sa_family = AF_INET; 3746 conf->saddr.sa.sa_family = AF_INET; 3747 } else if (!conf->remote_ip.sa.sa_family) { 3748 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family; 3749 } else if (!conf->saddr.sa.sa_family) { 3750 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family; 3751 } 3752 3753 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) { 3754 NL_SET_ERR_MSG(extack, 3755 "Local and remote address must be from the same family"); 3756 return -EINVAL; 3757 } 3758 3759 if (vxlan_addr_multicast(&conf->saddr)) { 3760 NL_SET_ERR_MSG(extack, "Local address cannot be multicast"); 3761 return -EINVAL; 3762 } 3763 3764 if (conf->saddr.sa.sa_family == AF_INET6) { 3765 if (!IS_ENABLED(CONFIG_IPV6)) { 3766 NL_SET_ERR_MSG(extack, 3767 "IPv6 support not enabled in the kernel"); 3768 return -EPFNOSUPPORT; 3769 } 3770 use_ipv6 = true; 3771 conf->flags |= VXLAN_F_IPV6; 3772 3773 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) { 3774 int local_type = 3775 ipv6_addr_type(&conf->saddr.sin6.sin6_addr); 3776 int remote_type = 3777 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr); 3778 3779 if (local_type & IPV6_ADDR_LINKLOCAL) { 3780 if (!(remote_type & IPV6_ADDR_LINKLOCAL) && 3781 (remote_type != IPV6_ADDR_ANY)) { 3782 NL_SET_ERR_MSG(extack, 3783 "Invalid combination of local and remote address scopes"); 3784 return -EINVAL; 3785 } 3786 3787 conf->flags |= VXLAN_F_IPV6_LINKLOCAL; 3788 } else { 3789 if (remote_type == 3790 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) { 3791 NL_SET_ERR_MSG(extack, 3792 "Invalid combination of local and remote address scopes"); 3793 return -EINVAL; 3794 } 3795 3796 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL; 3797 } 3798 } 3799 } 3800 3801 if (conf->label && !use_ipv6) { 3802 NL_SET_ERR_MSG(extack, 3803 "Label attribute only applies to IPv6 VXLAN devices"); 3804 return -EINVAL; 3805 } 3806 3807 if (conf->label_policy && !use_ipv6) { 3808 NL_SET_ERR_MSG(extack, 3809 "Label policy only applies to IPv6 VXLAN devices"); 3810 return -EINVAL; 3811 } 3812 3813 if (conf->remote_ifindex) { 3814 struct net_device *lowerdev; 3815 3816 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); 3817 if (!lowerdev) { 3818 NL_SET_ERR_MSG(extack, 3819 "Invalid local interface, device not found"); 3820 return -ENODEV; 3821 } 3822 3823 #if IS_ENABLED(CONFIG_IPV6) 3824 if (use_ipv6) { 3825 struct inet6_dev *idev = __in6_dev_get(lowerdev); 3826 3827 if (idev && idev->cnf.disable_ipv6) { 3828 NL_SET_ERR_MSG(extack, 3829 "IPv6 support disabled by administrator"); 3830 return -EPERM; 3831 } 3832 } 3833 #endif 3834 3835 *lower = lowerdev; 3836 } else { 3837 if (vxlan_addr_multicast(&conf->remote_ip)) { 3838 NL_SET_ERR_MSG(extack, 3839 "Local interface required for multicast remote destination"); 3840 3841 return -EINVAL; 3842 } 3843 3844 #if IS_ENABLED(CONFIG_IPV6) 3845 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) { 3846 NL_SET_ERR_MSG(extack, 3847 "Local interface required for link-local local/remote addresses"); 3848 return -EINVAL; 3849 } 3850 #endif 3851 3852 *lower = NULL; 3853 } 3854 3855 if (!conf->dst_port) { 3856 if (conf->flags & VXLAN_F_GPE) 3857 conf->dst_port = htons(IANA_VXLAN_GPE_UDP_PORT); 3858 else 3859 conf->dst_port = htons(vxlan_port); 3860 } 3861 3862 if (!conf->age_interval) 3863 conf->age_interval = FDB_AGE_DEFAULT; 3864 3865 if (vxlan_vni_in_use(src_net, old, conf, conf->vni)) { 3866 NL_SET_ERR_MSG(extack, 3867 "A VXLAN device with the specified VNI already exists"); 3868 return -EEXIST; 3869 } 3870 3871 return 0; 3872 } 3873 3874 static void vxlan_config_apply(struct net_device *dev, 3875 struct vxlan_config *conf, 3876 struct net_device *lowerdev, 3877 struct net *src_net, 3878 bool changelink) 3879 { 3880 struct vxlan_dev *vxlan = netdev_priv(dev); 3881 struct vxlan_rdst *dst = &vxlan->default_dst; 3882 unsigned short needed_headroom = ETH_HLEN; 3883 int max_mtu = ETH_MAX_MTU; 3884 u32 flags = conf->flags; 3885 3886 if (!changelink) { 3887 if (flags & VXLAN_F_GPE) 3888 vxlan_raw_setup(dev); 3889 else 3890 vxlan_ether_setup(dev); 3891 3892 if (conf->mtu) 3893 dev->mtu = conf->mtu; 3894 3895 vxlan->net = src_net; 3896 } 3897 3898 dst->remote_vni = conf->vni; 3899 3900 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); 3901 3902 if (lowerdev) { 3903 dst->remote_ifindex = conf->remote_ifindex; 3904 3905 netif_inherit_tso_max(dev, lowerdev); 3906 3907 needed_headroom = lowerdev->hard_header_len; 3908 needed_headroom += lowerdev->needed_headroom; 3909 3910 dev->needed_tailroom = lowerdev->needed_tailroom; 3911 3912 max_mtu = lowerdev->mtu - vxlan_headroom(flags); 3913 if (max_mtu < ETH_MIN_MTU) 3914 max_mtu = ETH_MIN_MTU; 3915 3916 if (!changelink && !conf->mtu) 3917 dev->mtu = max_mtu; 3918 } 3919 3920 if (dev->mtu > max_mtu) 3921 dev->mtu = max_mtu; 3922 3923 if (flags & VXLAN_F_COLLECT_METADATA) 3924 flags |= VXLAN_F_IPV6; 3925 needed_headroom += vxlan_headroom(flags); 3926 dev->needed_headroom = needed_headroom; 3927 3928 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 3929 } 3930 3931 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, 3932 struct vxlan_config *conf, bool changelink, 3933 struct netlink_ext_ack *extack) 3934 { 3935 struct vxlan_dev *vxlan = netdev_priv(dev); 3936 struct net_device *lowerdev; 3937 int ret; 3938 3939 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); 3940 if (ret) 3941 return ret; 3942 3943 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink); 3944 3945 return 0; 3946 } 3947 3948 static int __vxlan_dev_create(struct net *net, struct net_device *dev, 3949 struct vxlan_config *conf, 3950 struct netlink_ext_ack *extack) 3951 { 3952 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3953 struct vxlan_dev *vxlan = netdev_priv(dev); 3954 struct net_device *remote_dev = NULL; 3955 struct vxlan_fdb *f = NULL; 3956 bool unregister = false; 3957 struct vxlan_rdst *dst; 3958 int err; 3959 3960 dst = &vxlan->default_dst; 3961 err = vxlan_dev_configure(net, dev, conf, false, extack); 3962 if (err) 3963 return err; 3964 3965 dev->ethtool_ops = &vxlan_ethtool_ops; 3966 3967 /* create an fdb entry for a valid default destination */ 3968 if (!vxlan_addr_any(&dst->remote_ip)) { 3969 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3970 &dst->remote_ip, 3971 NUD_REACHABLE | NUD_PERMANENT, 3972 vxlan->cfg.dst_port, 3973 dst->remote_vni, 3974 dst->remote_vni, 3975 dst->remote_ifindex, 3976 NTF_SELF, 0, &f, extack); 3977 if (err) 3978 return err; 3979 } 3980 3981 err = register_netdevice(dev); 3982 if (err) 3983 goto errout; 3984 unregister = true; 3985 3986 if (dst->remote_ifindex) { 3987 remote_dev = __dev_get_by_index(net, dst->remote_ifindex); 3988 if (!remote_dev) { 3989 err = -ENODEV; 3990 goto errout; 3991 } 3992 3993 err = netdev_upper_dev_link(remote_dev, dev, extack); 3994 if (err) 3995 goto errout; 3996 } 3997 3998 err = rtnl_configure_link(dev, NULL, 0, NULL); 3999 if (err < 0) 4000 goto unlink; 4001 4002 if (f) { 4003 vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f); 4004 4005 /* notify default fdb entry */ 4006 err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), 4007 RTM_NEWNEIGH, true, extack); 4008 if (err) { 4009 vxlan_fdb_destroy(vxlan, f, false, false); 4010 if (remote_dev) 4011 netdev_upper_dev_unlink(remote_dev, dev); 4012 goto unregister; 4013 } 4014 } 4015 4016 list_add(&vxlan->next, &vn->vxlan_list); 4017 if (remote_dev) 4018 dst->remote_dev = remote_dev; 4019 return 0; 4020 unlink: 4021 if (remote_dev) 4022 netdev_upper_dev_unlink(remote_dev, dev); 4023 errout: 4024 /* unregister_netdevice() destroys the default FDB entry with deletion 4025 * notification. But the addition notification was not sent yet, so 4026 * destroy the entry by hand here. 4027 */ 4028 if (f) 4029 __vxlan_fdb_free(f); 4030 unregister: 4031 if (unregister) 4032 unregister_netdevice(dev); 4033 return err; 4034 } 4035 4036 /* Set/clear flags based on attribute */ 4037 static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[], 4038 int attrtype, unsigned long mask, bool changelink, 4039 bool changelink_supported, 4040 struct netlink_ext_ack *extack) 4041 { 4042 unsigned long flags; 4043 4044 if (!tb[attrtype]) 4045 return 0; 4046 4047 if (changelink && !changelink_supported) { 4048 vxlan_flag_attr_error(attrtype, extack); 4049 return -EOPNOTSUPP; 4050 } 4051 4052 if (vxlan_policy[attrtype].type == NLA_FLAG) 4053 flags = conf->flags | mask; 4054 else if (nla_get_u8(tb[attrtype])) 4055 flags = conf->flags | mask; 4056 else 4057 flags = conf->flags & ~mask; 4058 4059 conf->flags = flags; 4060 4061 return 0; 4062 } 4063 4064 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 4065 struct net_device *dev, struct vxlan_config *conf, 4066 bool changelink, struct netlink_ext_ack *extack) 4067 { 4068 struct vxlanhdr used_bits = { 4069 .vx_flags = VXLAN_HF_VNI, 4070 .vx_vni = VXLAN_VNI_MASK, 4071 }; 4072 struct vxlan_dev *vxlan = netdev_priv(dev); 4073 int err = 0; 4074 4075 memset(conf, 0, sizeof(*conf)); 4076 4077 /* if changelink operation, start with old existing cfg */ 4078 if (changelink) 4079 memcpy(conf, &vxlan->cfg, sizeof(*conf)); 4080 4081 if (data[IFLA_VXLAN_ID]) { 4082 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 4083 4084 if (changelink && (vni != conf->vni)) { 4085 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI"); 4086 return -EOPNOTSUPP; 4087 } 4088 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); 4089 } 4090 4091 if (data[IFLA_VXLAN_GROUP]) { 4092 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) { 4093 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group"); 4094 return -EOPNOTSUPP; 4095 } 4096 4097 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); 4098 conf->remote_ip.sa.sa_family = AF_INET; 4099 } else if (data[IFLA_VXLAN_GROUP6]) { 4100 if (!IS_ENABLED(CONFIG_IPV6)) { 4101 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel"); 4102 return -EPFNOSUPPORT; 4103 } 4104 4105 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) { 4106 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group"); 4107 return -EOPNOTSUPP; 4108 } 4109 4110 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); 4111 conf->remote_ip.sa.sa_family = AF_INET6; 4112 } 4113 4114 if (data[IFLA_VXLAN_LOCAL]) { 4115 if (changelink && (conf->saddr.sa.sa_family != AF_INET)) { 4116 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old"); 4117 return -EOPNOTSUPP; 4118 } 4119 4120 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); 4121 conf->saddr.sa.sa_family = AF_INET; 4122 } else if (data[IFLA_VXLAN_LOCAL6]) { 4123 if (!IS_ENABLED(CONFIG_IPV6)) { 4124 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel"); 4125 return -EPFNOSUPPORT; 4126 } 4127 4128 if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) { 4129 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old"); 4130 return -EOPNOTSUPP; 4131 } 4132 4133 /* TODO: respect scope id */ 4134 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); 4135 conf->saddr.sa.sa_family = AF_INET6; 4136 } 4137 4138 if (data[IFLA_VXLAN_LINK]) 4139 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); 4140 4141 if (data[IFLA_VXLAN_TOS]) 4142 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 4143 4144 if (data[IFLA_VXLAN_TTL]) 4145 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); 4146 4147 if (data[IFLA_VXLAN_TTL_INHERIT]) { 4148 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_TTL_INHERIT, 4149 VXLAN_F_TTL_INHERIT, changelink, false, 4150 extack); 4151 if (err) 4152 return err; 4153 4154 } 4155 4156 if (data[IFLA_VXLAN_LABEL]) 4157 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & 4158 IPV6_FLOWLABEL_MASK; 4159 if (data[IFLA_VXLAN_LABEL_POLICY]) 4160 conf->label_policy = nla_get_u32(data[IFLA_VXLAN_LABEL_POLICY]); 4161 4162 if (data[IFLA_VXLAN_LEARNING]) { 4163 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING, 4164 VXLAN_F_LEARN, changelink, true, 4165 extack); 4166 if (err) 4167 return err; 4168 } else if (!changelink) { 4169 /* default to learn on a new device */ 4170 conf->flags |= VXLAN_F_LEARN; 4171 } 4172 4173 if (data[IFLA_VXLAN_AGEING]) 4174 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 4175 4176 if (data[IFLA_VXLAN_PROXY]) { 4177 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_PROXY, 4178 VXLAN_F_PROXY, changelink, false, 4179 extack); 4180 if (err) 4181 return err; 4182 } 4183 4184 if (data[IFLA_VXLAN_RSC]) { 4185 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_RSC, 4186 VXLAN_F_RSC, changelink, false, 4187 extack); 4188 if (err) 4189 return err; 4190 } 4191 4192 if (data[IFLA_VXLAN_L2MISS]) { 4193 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L2MISS, 4194 VXLAN_F_L2MISS, changelink, false, 4195 extack); 4196 if (err) 4197 return err; 4198 } 4199 4200 if (data[IFLA_VXLAN_L3MISS]) { 4201 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L3MISS, 4202 VXLAN_F_L3MISS, changelink, false, 4203 extack); 4204 if (err) 4205 return err; 4206 } 4207 4208 if (data[IFLA_VXLAN_LIMIT]) { 4209 if (changelink) { 4210 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT], 4211 "Cannot change limit"); 4212 return -EOPNOTSUPP; 4213 } 4214 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 4215 } 4216 4217 if (data[IFLA_VXLAN_COLLECT_METADATA]) { 4218 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_COLLECT_METADATA, 4219 VXLAN_F_COLLECT_METADATA, changelink, false, 4220 extack); 4221 if (err) 4222 return err; 4223 } 4224 4225 if (data[IFLA_VXLAN_PORT_RANGE]) { 4226 if (!changelink) { 4227 const struct ifla_vxlan_port_range *p 4228 = nla_data(data[IFLA_VXLAN_PORT_RANGE]); 4229 conf->port_min = ntohs(p->low); 4230 conf->port_max = ntohs(p->high); 4231 } else { 4232 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], 4233 "Cannot change port range"); 4234 return -EOPNOTSUPP; 4235 } 4236 } 4237 4238 if (data[IFLA_VXLAN_PORT]) { 4239 if (changelink) { 4240 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT], 4241 "Cannot change port"); 4242 return -EOPNOTSUPP; 4243 } 4244 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); 4245 } 4246 4247 if (data[IFLA_VXLAN_UDP_CSUM]) { 4248 if (changelink) { 4249 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM], 4250 "Cannot change UDP_CSUM flag"); 4251 return -EOPNOTSUPP; 4252 } 4253 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) 4254 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX; 4255 } 4256 4257 if (data[IFLA_VXLAN_LOCALBYPASS]) { 4258 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LOCALBYPASS, 4259 VXLAN_F_LOCALBYPASS, changelink, 4260 true, extack); 4261 if (err) 4262 return err; 4263 } else if (!changelink) { 4264 /* default to local bypass on a new device */ 4265 conf->flags |= VXLAN_F_LOCALBYPASS; 4266 } 4267 4268 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) { 4269 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 4270 VXLAN_F_UDP_ZERO_CSUM6_TX, changelink, 4271 false, extack); 4272 if (err) 4273 return err; 4274 } 4275 4276 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) { 4277 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 4278 VXLAN_F_UDP_ZERO_CSUM6_RX, changelink, 4279 false, extack); 4280 if (err) 4281 return err; 4282 } 4283 4284 if (data[IFLA_VXLAN_REMCSUM_TX]) { 4285 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_TX, 4286 VXLAN_F_REMCSUM_TX, changelink, false, 4287 extack); 4288 if (err) 4289 return err; 4290 } 4291 4292 if (data[IFLA_VXLAN_REMCSUM_RX]) { 4293 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_RX, 4294 VXLAN_F_REMCSUM_RX, changelink, false, 4295 extack); 4296 if (err) 4297 return err; 4298 used_bits.vx_flags |= VXLAN_HF_RCO; 4299 used_bits.vx_vni |= ~VXLAN_VNI_MASK; 4300 } 4301 4302 if (data[IFLA_VXLAN_GBP]) { 4303 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GBP, 4304 VXLAN_F_GBP, changelink, false, extack); 4305 if (err) 4306 return err; 4307 used_bits.vx_flags |= VXLAN_GBP_USED_BITS; 4308 } 4309 4310 if (data[IFLA_VXLAN_GPE]) { 4311 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GPE, 4312 VXLAN_F_GPE, changelink, false, 4313 extack); 4314 if (err) 4315 return err; 4316 4317 used_bits.vx_flags |= VXLAN_GPE_USED_BITS; 4318 } 4319 4320 if (data[IFLA_VXLAN_RESERVED_BITS]) { 4321 struct vxlanhdr reserved_bits; 4322 4323 if (changelink) { 4324 NL_SET_ERR_MSG_ATTR(extack, 4325 data[IFLA_VXLAN_RESERVED_BITS], 4326 "Cannot change reserved_bits"); 4327 return -EOPNOTSUPP; 4328 } 4329 4330 nla_memcpy(&reserved_bits, data[IFLA_VXLAN_RESERVED_BITS], 4331 sizeof(reserved_bits)); 4332 if (used_bits.vx_flags & reserved_bits.vx_flags || 4333 used_bits.vx_vni & reserved_bits.vx_vni) { 4334 __be64 ub_be64, rb_be64; 4335 4336 memcpy(&ub_be64, &used_bits, sizeof(ub_be64)); 4337 memcpy(&rb_be64, &reserved_bits, sizeof(rb_be64)); 4338 4339 NL_SET_ERR_MSG_ATTR_FMT(extack, 4340 data[IFLA_VXLAN_RESERVED_BITS], 4341 "Used bits %#018llx cannot overlap reserved bits %#018llx", 4342 be64_to_cpu(ub_be64), 4343 be64_to_cpu(rb_be64)); 4344 return -EINVAL; 4345 } 4346 4347 conf->reserved_bits = reserved_bits; 4348 } else { 4349 /* For backwards compatibility, only allow reserved fields to be 4350 * used by VXLAN extensions if explicitly requested. 4351 */ 4352 conf->reserved_bits = (struct vxlanhdr) { 4353 .vx_flags = ~used_bits.vx_flags, 4354 .vx_vni = ~used_bits.vx_vni, 4355 }; 4356 } 4357 4358 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { 4359 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_NOPARTIAL, 4360 VXLAN_F_REMCSUM_NOPARTIAL, changelink, 4361 false, extack); 4362 if (err) 4363 return err; 4364 } 4365 4366 if (tb[IFLA_MTU]) { 4367 if (changelink) { 4368 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], 4369 "Cannot change mtu"); 4370 return -EOPNOTSUPP; 4371 } 4372 conf->mtu = nla_get_u32(tb[IFLA_MTU]); 4373 } 4374 4375 if (data[IFLA_VXLAN_DF]) 4376 conf->df = nla_get_u8(data[IFLA_VXLAN_DF]); 4377 4378 if (data[IFLA_VXLAN_VNIFILTER]) { 4379 err = vxlan_nl2flag(conf, data, IFLA_VXLAN_VNIFILTER, 4380 VXLAN_F_VNIFILTER, changelink, false, 4381 extack); 4382 if (err) 4383 return err; 4384 4385 if ((conf->flags & VXLAN_F_VNIFILTER) && 4386 !(conf->flags & VXLAN_F_COLLECT_METADATA)) { 4387 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_VNIFILTER], 4388 "vxlan vnifilter only valid in collect metadata mode"); 4389 return -EINVAL; 4390 } 4391 } 4392 4393 return 0; 4394 } 4395 4396 static int vxlan_newlink(struct net *src_net, struct net_device *dev, 4397 struct nlattr *tb[], struct nlattr *data[], 4398 struct netlink_ext_ack *extack) 4399 { 4400 struct vxlan_config conf; 4401 int err; 4402 4403 err = vxlan_nl2conf(tb, data, dev, &conf, false, extack); 4404 if (err) 4405 return err; 4406 4407 return __vxlan_dev_create(src_net, dev, &conf, extack); 4408 } 4409 4410 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], 4411 struct nlattr *data[], 4412 struct netlink_ext_ack *extack) 4413 { 4414 struct vxlan_dev *vxlan = netdev_priv(dev); 4415 struct net_device *lowerdev; 4416 struct vxlan_config conf; 4417 struct vxlan_rdst *dst; 4418 int err; 4419 4420 dst = &vxlan->default_dst; 4421 err = vxlan_nl2conf(tb, data, dev, &conf, true, extack); 4422 if (err) 4423 return err; 4424 4425 err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, 4426 vxlan, extack); 4427 if (err) 4428 return err; 4429 4430 if (dst->remote_dev == lowerdev) 4431 lowerdev = NULL; 4432 4433 err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev, 4434 extack); 4435 if (err) 4436 return err; 4437 4438 /* handle default dst entry */ 4439 if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { 4440 u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); 4441 4442 spin_lock_bh(&vxlan->hash_lock[hash_index]); 4443 if (!vxlan_addr_any(&conf.remote_ip)) { 4444 err = vxlan_fdb_update(vxlan, all_zeros_mac, 4445 &conf.remote_ip, 4446 NUD_REACHABLE | NUD_PERMANENT, 4447 NLM_F_APPEND | NLM_F_CREATE, 4448 vxlan->cfg.dst_port, 4449 conf.vni, conf.vni, 4450 conf.remote_ifindex, 4451 NTF_SELF, 0, true, extack); 4452 if (err) { 4453 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4454 netdev_adjacent_change_abort(dst->remote_dev, 4455 lowerdev, dev); 4456 return err; 4457 } 4458 } 4459 if (!vxlan_addr_any(&dst->remote_ip)) 4460 __vxlan_fdb_delete(vxlan, all_zeros_mac, 4461 dst->remote_ip, 4462 vxlan->cfg.dst_port, 4463 dst->remote_vni, 4464 dst->remote_vni, 4465 dst->remote_ifindex, 4466 true); 4467 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4468 4469 /* If vni filtering device, also update fdb entries of 4470 * all vnis that were using default remote ip 4471 */ 4472 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { 4473 err = vxlan_vnilist_update_group(vxlan, &dst->remote_ip, 4474 &conf.remote_ip, extack); 4475 if (err) { 4476 netdev_adjacent_change_abort(dst->remote_dev, 4477 lowerdev, dev); 4478 return err; 4479 } 4480 } 4481 } 4482 4483 if (conf.age_interval != vxlan->cfg.age_interval) 4484 mod_timer(&vxlan->age_timer, jiffies); 4485 4486 netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev); 4487 if (lowerdev && lowerdev != dst->remote_dev) 4488 dst->remote_dev = lowerdev; 4489 vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); 4490 return 0; 4491 } 4492 4493 static void vxlan_dellink(struct net_device *dev, struct list_head *head) 4494 { 4495 struct vxlan_dev *vxlan = netdev_priv(dev); 4496 struct vxlan_fdb_flush_desc desc = { 4497 /* Default entry is deleted at vxlan_uninit. */ 4498 .ignore_default_entry = true, 4499 }; 4500 4501 vxlan_flush(vxlan, &desc); 4502 4503 list_del(&vxlan->next); 4504 unregister_netdevice_queue(dev, head); 4505 if (vxlan->default_dst.remote_dev) 4506 netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev); 4507 } 4508 4509 static size_t vxlan_get_size(const struct net_device *dev) 4510 { 4511 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */ 4512 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */ 4513 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 4514 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 4515 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 4516 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */ 4517 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 4518 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */ 4519 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 4520 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LABEL_POLICY */ 4521 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 4522 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ 4523 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ 4524 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ 4525 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ 4526 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ 4527 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 4528 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 4529 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */ 4530 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */ 4531 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */ 4532 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */ 4533 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */ 4534 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */ 4535 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */ 4536 /* IFLA_VXLAN_PORT_RANGE */ 4537 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 4538 nla_total_size(0) + /* IFLA_VXLAN_GBP */ 4539 nla_total_size(0) + /* IFLA_VXLAN_GPE */ 4540 nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */ 4541 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */ 4542 /* IFLA_VXLAN_RESERVED_BITS */ 4543 nla_total_size(sizeof(struct vxlanhdr)) + 4544 0; 4545 } 4546 4547 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) 4548 { 4549 const struct vxlan_dev *vxlan = netdev_priv(dev); 4550 const struct vxlan_rdst *dst = &vxlan->default_dst; 4551 struct ifla_vxlan_port_range ports = { 4552 .low = htons(vxlan->cfg.port_min), 4553 .high = htons(vxlan->cfg.port_max), 4554 }; 4555 4556 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) 4557 goto nla_put_failure; 4558 4559 if (!vxlan_addr_any(&dst->remote_ip)) { 4560 if (dst->remote_ip.sa.sa_family == AF_INET) { 4561 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP, 4562 dst->remote_ip.sin.sin_addr.s_addr)) 4563 goto nla_put_failure; 4564 #if IS_ENABLED(CONFIG_IPV6) 4565 } else { 4566 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6, 4567 &dst->remote_ip.sin6.sin6_addr)) 4568 goto nla_put_failure; 4569 #endif 4570 } 4571 } 4572 4573 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) 4574 goto nla_put_failure; 4575 4576 if (!vxlan_addr_any(&vxlan->cfg.saddr)) { 4577 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { 4578 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, 4579 vxlan->cfg.saddr.sin.sin_addr.s_addr)) 4580 goto nla_put_failure; 4581 #if IS_ENABLED(CONFIG_IPV6) 4582 } else { 4583 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, 4584 &vxlan->cfg.saddr.sin6.sin6_addr)) 4585 goto nla_put_failure; 4586 #endif 4587 } 4588 } 4589 4590 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 4591 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT, 4592 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || 4593 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 4594 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || 4595 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 4596 nla_put_u32(skb, IFLA_VXLAN_LABEL_POLICY, vxlan->cfg.label_policy) || 4597 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 4598 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || 4599 nla_put_u8(skb, IFLA_VXLAN_PROXY, 4600 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) || 4601 nla_put_u8(skb, IFLA_VXLAN_RSC, 4602 !!(vxlan->cfg.flags & VXLAN_F_RSC)) || 4603 nla_put_u8(skb, IFLA_VXLAN_L2MISS, 4604 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) || 4605 nla_put_u8(skb, IFLA_VXLAN_L3MISS, 4606 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) || 4607 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, 4608 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) || 4609 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || 4610 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || 4611 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || 4612 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, 4613 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || 4614 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, 4615 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || 4616 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, 4617 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) || 4618 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX, 4619 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) || 4620 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX, 4621 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)) || 4622 nla_put_u8(skb, IFLA_VXLAN_LOCALBYPASS, 4623 !!(vxlan->cfg.flags & VXLAN_F_LOCALBYPASS))) 4624 goto nla_put_failure; 4625 4626 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports)) 4627 goto nla_put_failure; 4628 4629 if (vxlan->cfg.flags & VXLAN_F_GBP && 4630 nla_put_flag(skb, IFLA_VXLAN_GBP)) 4631 goto nla_put_failure; 4632 4633 if (vxlan->cfg.flags & VXLAN_F_GPE && 4634 nla_put_flag(skb, IFLA_VXLAN_GPE)) 4635 goto nla_put_failure; 4636 4637 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL && 4638 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 4639 goto nla_put_failure; 4640 4641 if (vxlan->cfg.flags & VXLAN_F_VNIFILTER && 4642 nla_put_u8(skb, IFLA_VXLAN_VNIFILTER, 4643 !!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))) 4644 goto nla_put_failure; 4645 4646 if (nla_put(skb, IFLA_VXLAN_RESERVED_BITS, 4647 sizeof(vxlan->cfg.reserved_bits), 4648 &vxlan->cfg.reserved_bits)) 4649 goto nla_put_failure; 4650 4651 return 0; 4652 4653 nla_put_failure: 4654 return -EMSGSIZE; 4655 } 4656 4657 static struct net *vxlan_get_link_net(const struct net_device *dev) 4658 { 4659 struct vxlan_dev *vxlan = netdev_priv(dev); 4660 4661 return READ_ONCE(vxlan->net); 4662 } 4663 4664 static struct rtnl_link_ops vxlan_link_ops __read_mostly = { 4665 .kind = "vxlan", 4666 .maxtype = IFLA_VXLAN_MAX, 4667 .policy = vxlan_policy, 4668 .priv_size = sizeof(struct vxlan_dev), 4669 .setup = vxlan_setup, 4670 .validate = vxlan_validate, 4671 .newlink = vxlan_newlink, 4672 .changelink = vxlan_changelink, 4673 .dellink = vxlan_dellink, 4674 .get_size = vxlan_get_size, 4675 .fill_info = vxlan_fill_info, 4676 .get_link_net = vxlan_get_link_net, 4677 }; 4678 4679 struct net_device *vxlan_dev_create(struct net *net, const char *name, 4680 u8 name_assign_type, 4681 struct vxlan_config *conf) 4682 { 4683 struct nlattr *tb[IFLA_MAX + 1]; 4684 struct net_device *dev; 4685 int err; 4686 4687 memset(&tb, 0, sizeof(tb)); 4688 4689 dev = rtnl_create_link(net, name, name_assign_type, 4690 &vxlan_link_ops, tb, NULL); 4691 if (IS_ERR(dev)) 4692 return dev; 4693 4694 err = __vxlan_dev_create(net, dev, conf, NULL); 4695 if (err < 0) { 4696 free_netdev(dev); 4697 return ERR_PTR(err); 4698 } 4699 4700 err = rtnl_configure_link(dev, NULL, 0, NULL); 4701 if (err < 0) { 4702 LIST_HEAD(list_kill); 4703 4704 vxlan_dellink(dev, &list_kill); 4705 unregister_netdevice_many(&list_kill); 4706 return ERR_PTR(err); 4707 } 4708 4709 return dev; 4710 } 4711 EXPORT_SYMBOL_GPL(vxlan_dev_create); 4712 4713 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, 4714 struct net_device *dev) 4715 { 4716 struct vxlan_dev *vxlan, *next; 4717 LIST_HEAD(list_kill); 4718 4719 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) { 4720 struct vxlan_rdst *dst = &vxlan->default_dst; 4721 4722 /* In case we created vxlan device with carrier 4723 * and we loose the carrier due to module unload 4724 * we also need to remove vxlan device. In other 4725 * cases, it's not necessary and remote_ifindex 4726 * is 0 here, so no matches. 4727 */ 4728 if (dst->remote_ifindex == dev->ifindex) 4729 vxlan_dellink(vxlan->dev, &list_kill); 4730 } 4731 4732 unregister_netdevice_many(&list_kill); 4733 } 4734 4735 static int vxlan_netdevice_event(struct notifier_block *unused, 4736 unsigned long event, void *ptr) 4737 { 4738 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4739 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); 4740 4741 if (event == NETDEV_UNREGISTER) 4742 vxlan_handle_lowerdev_unregister(vn, dev); 4743 else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) 4744 vxlan_offload_rx_ports(dev, true); 4745 else if (event == NETDEV_UDP_TUNNEL_DROP_INFO) 4746 vxlan_offload_rx_ports(dev, false); 4747 4748 return NOTIFY_DONE; 4749 } 4750 4751 static struct notifier_block vxlan_notifier_block __read_mostly = { 4752 .notifier_call = vxlan_netdevice_event, 4753 }; 4754 4755 static void 4756 vxlan_fdb_offloaded_set(struct net_device *dev, 4757 struct switchdev_notifier_vxlan_fdb_info *fdb_info) 4758 { 4759 struct vxlan_dev *vxlan = netdev_priv(dev); 4760 struct vxlan_rdst *rdst; 4761 struct vxlan_fdb *f; 4762 u32 hash_index; 4763 4764 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); 4765 4766 spin_lock_bh(&vxlan->hash_lock[hash_index]); 4767 4768 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); 4769 if (!f) 4770 goto out; 4771 4772 rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip, 4773 fdb_info->remote_port, 4774 fdb_info->remote_vni, 4775 fdb_info->remote_ifindex); 4776 if (!rdst) 4777 goto out; 4778 4779 rdst->offloaded = fdb_info->offloaded; 4780 4781 out: 4782 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4783 } 4784 4785 static int 4786 vxlan_fdb_external_learn_add(struct net_device *dev, 4787 struct switchdev_notifier_vxlan_fdb_info *fdb_info) 4788 { 4789 struct vxlan_dev *vxlan = netdev_priv(dev); 4790 struct netlink_ext_ack *extack; 4791 u32 hash_index; 4792 int err; 4793 4794 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); 4795 extack = switchdev_notifier_info_to_extack(&fdb_info->info); 4796 4797 spin_lock_bh(&vxlan->hash_lock[hash_index]); 4798 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, 4799 NUD_REACHABLE, 4800 NLM_F_CREATE | NLM_F_REPLACE, 4801 fdb_info->remote_port, 4802 fdb_info->vni, 4803 fdb_info->remote_vni, 4804 fdb_info->remote_ifindex, 4805 NTF_USE | NTF_SELF | NTF_EXT_LEARNED, 4806 0, false, extack); 4807 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4808 4809 return err; 4810 } 4811 4812 static int 4813 vxlan_fdb_external_learn_del(struct net_device *dev, 4814 struct switchdev_notifier_vxlan_fdb_info *fdb_info) 4815 { 4816 struct vxlan_dev *vxlan = netdev_priv(dev); 4817 struct vxlan_fdb *f; 4818 u32 hash_index; 4819 int err = 0; 4820 4821 hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); 4822 spin_lock_bh(&vxlan->hash_lock[hash_index]); 4823 4824 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); 4825 if (!f) 4826 err = -ENOENT; 4827 else if (f->flags & NTF_EXT_LEARNED) 4828 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, 4829 fdb_info->remote_ip, 4830 fdb_info->remote_port, 4831 fdb_info->vni, 4832 fdb_info->remote_vni, 4833 fdb_info->remote_ifindex, 4834 false); 4835 4836 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4837 4838 return err; 4839 } 4840 4841 static int vxlan_switchdev_event(struct notifier_block *unused, 4842 unsigned long event, void *ptr) 4843 { 4844 struct net_device *dev = switchdev_notifier_info_to_dev(ptr); 4845 struct switchdev_notifier_vxlan_fdb_info *fdb_info; 4846 int err = 0; 4847 4848 switch (event) { 4849 case SWITCHDEV_VXLAN_FDB_OFFLOADED: 4850 vxlan_fdb_offloaded_set(dev, ptr); 4851 break; 4852 case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE: 4853 fdb_info = ptr; 4854 err = vxlan_fdb_external_learn_add(dev, fdb_info); 4855 if (err) { 4856 err = notifier_from_errno(err); 4857 break; 4858 } 4859 fdb_info->offloaded = true; 4860 vxlan_fdb_offloaded_set(dev, fdb_info); 4861 break; 4862 case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE: 4863 fdb_info = ptr; 4864 err = vxlan_fdb_external_learn_del(dev, fdb_info); 4865 if (err) { 4866 err = notifier_from_errno(err); 4867 break; 4868 } 4869 fdb_info->offloaded = false; 4870 vxlan_fdb_offloaded_set(dev, fdb_info); 4871 break; 4872 } 4873 4874 return err; 4875 } 4876 4877 static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = { 4878 .notifier_call = vxlan_switchdev_event, 4879 }; 4880 4881 static void vxlan_fdb_nh_flush(struct nexthop *nh) 4882 { 4883 struct vxlan_fdb *fdb; 4884 struct vxlan_dev *vxlan; 4885 u32 hash_index; 4886 4887 rcu_read_lock(); 4888 list_for_each_entry_rcu(fdb, &nh->fdb_list, nh_list) { 4889 vxlan = rcu_dereference(fdb->vdev); 4890 WARN_ON(!vxlan); 4891 hash_index = fdb_head_index(vxlan, fdb->eth_addr, 4892 vxlan->default_dst.remote_vni); 4893 spin_lock_bh(&vxlan->hash_lock[hash_index]); 4894 if (!hlist_unhashed(&fdb->hlist)) 4895 vxlan_fdb_destroy(vxlan, fdb, false, false); 4896 spin_unlock_bh(&vxlan->hash_lock[hash_index]); 4897 } 4898 rcu_read_unlock(); 4899 } 4900 4901 static int vxlan_nexthop_event(struct notifier_block *nb, 4902 unsigned long event, void *ptr) 4903 { 4904 struct nh_notifier_info *info = ptr; 4905 struct nexthop *nh; 4906 4907 if (event != NEXTHOP_EVENT_DEL) 4908 return NOTIFY_DONE; 4909 4910 nh = nexthop_find_by_id(info->net, info->id); 4911 if (!nh) 4912 return NOTIFY_DONE; 4913 4914 vxlan_fdb_nh_flush(nh); 4915 4916 return NOTIFY_DONE; 4917 } 4918 4919 static __net_init int vxlan_init_net(struct net *net) 4920 { 4921 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4922 unsigned int h; 4923 4924 INIT_LIST_HEAD(&vn->vxlan_list); 4925 spin_lock_init(&vn->sock_lock); 4926 vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event; 4927 4928 for (h = 0; h < PORT_HASH_SIZE; ++h) 4929 INIT_HLIST_HEAD(&vn->sock_list[h]); 4930 4931 return register_nexthop_notifier(net, &vn->nexthop_notifier_block, 4932 NULL); 4933 } 4934 4935 static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn, 4936 struct list_head *dev_to_kill) 4937 { 4938 struct vxlan_dev *vxlan, *next; 4939 4940 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) 4941 vxlan_dellink(vxlan->dev, dev_to_kill); 4942 } 4943 4944 static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list, 4945 struct list_head *dev_to_kill) 4946 { 4947 struct net *net; 4948 4949 ASSERT_RTNL(); 4950 list_for_each_entry(net, net_list, exit_list) { 4951 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4952 4953 __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); 4954 4955 vxlan_destroy_tunnels(vn, dev_to_kill); 4956 } 4957 } 4958 4959 static void __net_exit vxlan_exit_net(struct net *net) 4960 { 4961 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4962 unsigned int h; 4963 4964 for (h = 0; h < PORT_HASH_SIZE; ++h) 4965 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h])); 4966 } 4967 4968 static struct pernet_operations vxlan_net_ops = { 4969 .init = vxlan_init_net, 4970 .exit_batch_rtnl = vxlan_exit_batch_rtnl, 4971 .exit = vxlan_exit_net, 4972 .id = &vxlan_net_id, 4973 .size = sizeof(struct vxlan_net), 4974 }; 4975 4976 static int __init vxlan_init_module(void) 4977 { 4978 int rc; 4979 4980 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt)); 4981 4982 rc = register_pernet_subsys(&vxlan_net_ops); 4983 if (rc) 4984 goto out1; 4985 4986 rc = register_netdevice_notifier(&vxlan_notifier_block); 4987 if (rc) 4988 goto out2; 4989 4990 rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block); 4991 if (rc) 4992 goto out3; 4993 4994 rc = rtnl_link_register(&vxlan_link_ops); 4995 if (rc) 4996 goto out4; 4997 4998 rc = vxlan_vnifilter_init(); 4999 if (rc) 5000 goto out5; 5001 5002 return 0; 5003 out5: 5004 rtnl_link_unregister(&vxlan_link_ops); 5005 out4: 5006 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block); 5007 out3: 5008 unregister_netdevice_notifier(&vxlan_notifier_block); 5009 out2: 5010 unregister_pernet_subsys(&vxlan_net_ops); 5011 out1: 5012 return rc; 5013 } 5014 late_initcall(vxlan_init_module); 5015 5016 static void __exit vxlan_cleanup_module(void) 5017 { 5018 vxlan_vnifilter_uninit(); 5019 rtnl_link_unregister(&vxlan_link_ops); 5020 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block); 5021 unregister_netdevice_notifier(&vxlan_notifier_block); 5022 unregister_pernet_subsys(&vxlan_net_ops); 5023 /* rcu_barrier() is called by netns */ 5024 } 5025 module_exit(vxlan_cleanup_module); 5026 5027 MODULE_LICENSE("GPL"); 5028 MODULE_VERSION(VXLAN_VERSION); 5029 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>"); 5030 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic"); 5031 MODULE_ALIAS_RTNL_LINK("vxlan"); 5032