1 /*- 2 * Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org> 3 * All rights reserved. 4 * Copyright (c) 2020, Chelsio Communications. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "opt_inet.h" 29 #include "opt_inet6.h" 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/eventhandler.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/hash.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/refcount.h> 43 #include <sys/rmlock.h> 44 #include <sys/priv.h> 45 #include <sys/proc.h> 46 #include <sys/queue.h> 47 #include <sys/sbuf.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 54 #include <net/bpf.h> 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_var.h> 58 #include <net/if_clone.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_vxlan.h> 63 #include <net/netisr.h> 64 #include <net/route.h> 65 #include <net/route/nhop.h> 66 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/in_pcb.h> 71 #include <netinet/ip.h> 72 #include <netinet/ip6.h> 73 #include <netinet/ip_var.h> 74 #include <netinet/udp.h> 75 #include <netinet/udp_var.h> 76 #include <netinet/in_fib.h> 77 #include <netinet6/in6_fib.h> 78 79 #include <netinet6/ip6_var.h> 80 #include <netinet6/scope6_var.h> 81 82 struct vxlan_softc; 83 LIST_HEAD(vxlan_softc_head, vxlan_softc); 84 85 struct sx vxlan_sx; 86 SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock"); 87 88 struct vxlan_socket_mc_info { 89 union vxlan_sockaddr vxlsomc_saddr; 90 union vxlan_sockaddr vxlsomc_gaddr; 91 int vxlsomc_ifidx; 92 int vxlsomc_users; 93 }; 94 95 /* 96 * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet. 97 */ 98 #define VXLAN_MAX_MTU (IP_MAXPACKET - \ 99 60 /* Maximum IPv4 header len */ - \ 100 sizeof(struct udphdr) - \ 101 sizeof(struct vxlan_header) - \ 102 ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) 103 #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU) 104 105 #define VXLAN_SO_MC_MAX_GROUPS 32 106 107 #define VXLAN_SO_VNI_HASH_SHIFT 6 108 #define VXLAN_SO_VNI_HASH_SIZE (1 << VXLAN_SO_VNI_HASH_SHIFT) 109 #define VXLAN_SO_VNI_HASH(_vni) ((_vni) % VXLAN_SO_VNI_HASH_SIZE) 110 111 struct vxlan_socket { 112 struct socket *vxlso_sock; 113 struct rmlock vxlso_lock; 114 u_int vxlso_refcnt; 115 union vxlan_sockaddr vxlso_laddr; 116 LIST_ENTRY(vxlan_socket) vxlso_entry; 117 struct vxlan_softc_head vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE]; 118 struct vxlan_socket_mc_info vxlso_mc[VXLAN_SO_MC_MAX_GROUPS]; 119 }; 120 121 #define VXLAN_SO_RLOCK(_vso, _p) rm_rlock(&(_vso)->vxlso_lock, (_p)) 122 #define VXLAN_SO_RUNLOCK(_vso, _p) rm_runlock(&(_vso)->vxlso_lock, (_p)) 123 #define VXLAN_SO_WLOCK(_vso) rm_wlock(&(_vso)->vxlso_lock) 124 #define VXLAN_SO_WUNLOCK(_vso) rm_wunlock(&(_vso)->vxlso_lock) 125 #define VXLAN_SO_LOCK_ASSERT(_vso) \ 126 rm_assert(&(_vso)->vxlso_lock, RA_LOCKED) 127 #define VXLAN_SO_LOCK_WASSERT(_vso) \ 128 rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED) 129 130 #define VXLAN_SO_ACQUIRE(_vso) refcount_acquire(&(_vso)->vxlso_refcnt) 131 #define VXLAN_SO_RELEASE(_vso) refcount_release(&(_vso)->vxlso_refcnt) 132 133 struct vxlan_ftable_entry { 134 LIST_ENTRY(vxlan_ftable_entry) vxlfe_hash; 135 uint16_t vxlfe_flags; 136 uint8_t vxlfe_mac[ETHER_ADDR_LEN]; 137 union vxlan_sockaddr vxlfe_raddr; 138 time_t vxlfe_expire; 139 }; 140 141 #define VXLAN_FE_FLAG_DYNAMIC 0x01 142 #define VXLAN_FE_FLAG_STATIC 0x02 143 144 #define VXLAN_FE_IS_DYNAMIC(_fe) \ 145 ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC) 146 147 #define VXLAN_SC_FTABLE_SHIFT 9 148 #define VXLAN_SC_FTABLE_SIZE (1 << VXLAN_SC_FTABLE_SHIFT) 149 #define VXLAN_SC_FTABLE_MASK (VXLAN_SC_FTABLE_SIZE - 1) 150 #define VXLAN_SC_FTABLE_HASH(_sc, _mac) \ 151 (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE) 152 153 LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry); 154 155 struct vxlan_statistics { 156 uint32_t ftable_nospace; 157 uint32_t ftable_lock_upgrade_failed; 158 counter_u64_t txcsum; 159 counter_u64_t tso; 160 counter_u64_t rxcsum; 161 }; 162 163 struct vxlan_softc { 164 struct ifnet *vxl_ifp; 165 int vxl_reqcap; 166 u_int vxl_fibnum; 167 struct vxlan_socket *vxl_sock; 168 uint32_t vxl_vni; 169 union vxlan_sockaddr vxl_src_addr; 170 union vxlan_sockaddr vxl_dst_addr; 171 uint32_t vxl_flags; 172 #define VXLAN_FLAG_INIT 0x0001 173 #define VXLAN_FLAG_TEARDOWN 0x0002 174 #define VXLAN_FLAG_LEARN 0x0004 175 #define VXLAN_FLAG_USER_MTU 0x0008 176 177 uint32_t vxl_port_hash_key; 178 uint16_t vxl_min_port; 179 uint16_t vxl_max_port; 180 uint8_t vxl_ttl; 181 182 /* Lookup table from MAC address to forwarding entry. */ 183 uint32_t vxl_ftable_cnt; 184 uint32_t vxl_ftable_max; 185 uint32_t vxl_ftable_timeout; 186 uint32_t vxl_ftable_hash_key; 187 struct vxlan_ftable_head *vxl_ftable; 188 189 /* Derived from vxl_dst_addr. */ 190 struct vxlan_ftable_entry vxl_default_fe; 191 192 struct ip_moptions *vxl_im4o; 193 struct ip6_moptions *vxl_im6o; 194 195 struct rmlock vxl_lock; 196 volatile u_int vxl_refcnt; 197 198 int vxl_unit; 199 int vxl_vso_mc_index; 200 struct vxlan_statistics vxl_stats; 201 struct sysctl_oid *vxl_sysctl_node; 202 struct sysctl_ctx_list vxl_sysctl_ctx; 203 struct callout vxl_callout; 204 struct ether_addr vxl_hwaddr; 205 int vxl_mc_ifindex; 206 struct ifnet *vxl_mc_ifp; 207 struct ifmedia vxl_media; 208 char vxl_mc_ifname[IFNAMSIZ]; 209 LIST_ENTRY(vxlan_softc) vxl_entry; 210 LIST_ENTRY(vxlan_softc) vxl_ifdetach_list; 211 212 /* For rate limiting errors on the tx fast path. */ 213 struct timeval err_time; 214 int err_pps; 215 }; 216 217 #define VXLAN_RLOCK(_sc, _p) rm_rlock(&(_sc)->vxl_lock, (_p)) 218 #define VXLAN_RUNLOCK(_sc, _p) rm_runlock(&(_sc)->vxl_lock, (_p)) 219 #define VXLAN_WLOCK(_sc) rm_wlock(&(_sc)->vxl_lock) 220 #define VXLAN_WUNLOCK(_sc) rm_wunlock(&(_sc)->vxl_lock) 221 #define VXLAN_LOCK_WOWNED(_sc) rm_wowned(&(_sc)->vxl_lock) 222 #define VXLAN_LOCK_ASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_LOCKED) 223 #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED) 224 #define VXLAN_UNLOCK(_sc, _p) do { \ 225 if (VXLAN_LOCK_WOWNED(_sc)) \ 226 VXLAN_WUNLOCK(_sc); \ 227 else \ 228 VXLAN_RUNLOCK(_sc, _p); \ 229 } while (0) 230 231 #define VXLAN_ACQUIRE(_sc) refcount_acquire(&(_sc)->vxl_refcnt) 232 #define VXLAN_RELEASE(_sc) refcount_release(&(_sc)->vxl_refcnt) 233 234 #define satoconstsin(sa) ((const struct sockaddr_in *)(sa)) 235 #define satoconstsin6(sa) ((const struct sockaddr_in6 *)(sa)) 236 237 struct vxlanudphdr { 238 struct udphdr vxlh_udp; 239 struct vxlan_header vxlh_hdr; 240 } __packed; 241 242 static int vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *); 243 static void vxlan_ftable_init(struct vxlan_softc *); 244 static void vxlan_ftable_fini(struct vxlan_softc *); 245 static void vxlan_ftable_flush(struct vxlan_softc *, int); 246 static void vxlan_ftable_expire(struct vxlan_softc *); 247 static int vxlan_ftable_update_locked(struct vxlan_softc *, 248 const union vxlan_sockaddr *, const uint8_t *, 249 struct rm_priotracker *); 250 static int vxlan_ftable_learn(struct vxlan_softc *, 251 const struct sockaddr *, const uint8_t *); 252 static int vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS); 253 254 static struct vxlan_ftable_entry * 255 vxlan_ftable_entry_alloc(void); 256 static void vxlan_ftable_entry_free(struct vxlan_ftable_entry *); 257 static void vxlan_ftable_entry_init(struct vxlan_softc *, 258 struct vxlan_ftable_entry *, const uint8_t *, 259 const struct sockaddr *, uint32_t); 260 static void vxlan_ftable_entry_destroy(struct vxlan_softc *, 261 struct vxlan_ftable_entry *); 262 static int vxlan_ftable_entry_insert(struct vxlan_softc *, 263 struct vxlan_ftable_entry *); 264 static struct vxlan_ftable_entry * 265 vxlan_ftable_entry_lookup(struct vxlan_softc *, 266 const uint8_t *); 267 static void vxlan_ftable_entry_dump(struct vxlan_ftable_entry *, 268 struct sbuf *); 269 270 static struct vxlan_socket * 271 vxlan_socket_alloc(const union vxlan_sockaddr *); 272 static void vxlan_socket_destroy(struct vxlan_socket *); 273 static void vxlan_socket_release(struct vxlan_socket *); 274 static struct vxlan_socket * 275 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa); 276 static void vxlan_socket_insert(struct vxlan_socket *); 277 static int vxlan_socket_init(struct vxlan_socket *, struct ifnet *); 278 static int vxlan_socket_bind(struct vxlan_socket *, struct ifnet *); 279 static int vxlan_socket_create(struct ifnet *, int, 280 const union vxlan_sockaddr *, struct vxlan_socket **); 281 static void vxlan_socket_ifdetach(struct vxlan_socket *, 282 struct ifnet *, struct vxlan_softc_head *); 283 284 static struct vxlan_socket * 285 vxlan_socket_mc_lookup(const union vxlan_sockaddr *); 286 static int vxlan_sockaddr_mc_info_match( 287 const struct vxlan_socket_mc_info *, 288 const union vxlan_sockaddr *, 289 const union vxlan_sockaddr *, int); 290 static int vxlan_socket_mc_join_group(struct vxlan_socket *, 291 const union vxlan_sockaddr *, const union vxlan_sockaddr *, 292 int *, union vxlan_sockaddr *); 293 static int vxlan_socket_mc_leave_group(struct vxlan_socket *, 294 const union vxlan_sockaddr *, 295 const union vxlan_sockaddr *, int); 296 static int vxlan_socket_mc_add_group(struct vxlan_socket *, 297 const union vxlan_sockaddr *, const union vxlan_sockaddr *, 298 int, int *); 299 static void vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *, 300 int); 301 302 static struct vxlan_softc * 303 vxlan_socket_lookup_softc_locked(struct vxlan_socket *, 304 uint32_t); 305 static struct vxlan_softc * 306 vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t); 307 static int vxlan_socket_insert_softc(struct vxlan_socket *, 308 struct vxlan_softc *); 309 static void vxlan_socket_remove_softc(struct vxlan_socket *, 310 struct vxlan_softc *); 311 312 static struct ifnet * 313 vxlan_multicast_if_ref(struct vxlan_softc *, int); 314 static void vxlan_free_multicast(struct vxlan_softc *); 315 static int vxlan_setup_multicast_interface(struct vxlan_softc *); 316 317 static int vxlan_setup_multicast(struct vxlan_softc *); 318 static int vxlan_setup_socket(struct vxlan_softc *); 319 #ifdef INET6 320 static void vxlan_setup_zero_checksum_port(struct vxlan_softc *); 321 #endif 322 static void vxlan_setup_interface_hdrlen(struct vxlan_softc *); 323 static int vxlan_valid_init_config(struct vxlan_softc *); 324 static void vxlan_init_wait(struct vxlan_softc *); 325 static void vxlan_init_complete(struct vxlan_softc *); 326 static void vxlan_init(void *); 327 static void vxlan_release(struct vxlan_softc *); 328 static void vxlan_teardown_wait(struct vxlan_softc *); 329 static void vxlan_teardown_complete(struct vxlan_softc *); 330 static void vxlan_teardown_locked(struct vxlan_softc *); 331 static void vxlan_teardown(struct vxlan_softc *); 332 static void vxlan_ifdetach(struct vxlan_softc *, struct ifnet *, 333 struct vxlan_softc_head *); 334 static void vxlan_timer(void *); 335 336 static int vxlan_ctrl_get_config(struct vxlan_softc *, void *); 337 static int vxlan_ctrl_set_vni(struct vxlan_softc *, void *); 338 static int vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *); 339 static int vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *); 340 static int vxlan_ctrl_set_local_port(struct vxlan_softc *, void *); 341 static int vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *); 342 static int vxlan_ctrl_set_port_range(struct vxlan_softc *, void *); 343 static int vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *); 344 static int vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *); 345 static int vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *); 346 static int vxlan_ctrl_set_ttl(struct vxlan_softc *, void *); 347 static int vxlan_ctrl_set_learn(struct vxlan_softc *, void *); 348 static int vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *); 349 static int vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *); 350 static int vxlan_ctrl_flush(struct vxlan_softc *, void *); 351 static int vxlan_ioctl_drvspec(struct vxlan_softc *, 352 struct ifdrv *, int); 353 static int vxlan_ioctl_ifflags(struct vxlan_softc *); 354 static int vxlan_ioctl(struct ifnet *, u_long, caddr_t); 355 356 #if defined(INET) || defined(INET6) 357 static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *); 358 static void vxlan_encap_header(struct vxlan_softc *, struct mbuf *, 359 int, uint16_t, uint16_t); 360 #endif 361 static int vxlan_encap4(struct vxlan_softc *, 362 const union vxlan_sockaddr *, struct mbuf *); 363 static int vxlan_encap6(struct vxlan_softc *, 364 const union vxlan_sockaddr *, struct mbuf *); 365 static int vxlan_transmit(struct ifnet *, struct mbuf *); 366 static void vxlan_qflush(struct ifnet *); 367 static bool vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *, 368 const struct sockaddr *, void *); 369 static int vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **, 370 const struct sockaddr *); 371 372 static int vxlan_stats_alloc(struct vxlan_softc *); 373 static void vxlan_stats_free(struct vxlan_softc *); 374 static void vxlan_set_default_config(struct vxlan_softc *); 375 static int vxlan_set_user_config(struct vxlan_softc *, 376 struct ifvxlanparam *); 377 static int vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int); 378 static void vxlan_set_hwcaps(struct vxlan_softc *); 379 static int vxlan_clone_create(struct if_clone *, char *, size_t, 380 struct ifc_data *, struct ifnet **); 381 static int vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t); 382 383 static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *); 384 static int vxlan_media_change(struct ifnet *); 385 static void vxlan_media_status(struct ifnet *, struct ifmediareq *); 386 387 static int vxlan_sockaddr_cmp(const union vxlan_sockaddr *, 388 const struct sockaddr *); 389 static void vxlan_sockaddr_copy(union vxlan_sockaddr *, 390 const struct sockaddr *); 391 static int vxlan_sockaddr_in_equal(const union vxlan_sockaddr *, 392 const struct sockaddr *); 393 static void vxlan_sockaddr_in_copy(union vxlan_sockaddr *, 394 const struct sockaddr *); 395 static int vxlan_sockaddr_supported(const union vxlan_sockaddr *, int); 396 static int vxlan_sockaddr_in_any(const union vxlan_sockaddr *); 397 static int vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *); 398 static int vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *); 399 400 static int vxlan_can_change_config(struct vxlan_softc *); 401 static int vxlan_check_vni(uint32_t); 402 static int vxlan_check_ttl(int); 403 static int vxlan_check_ftable_timeout(uint32_t); 404 static int vxlan_check_ftable_max(uint32_t); 405 406 static void vxlan_sysctl_setup(struct vxlan_softc *); 407 static void vxlan_sysctl_destroy(struct vxlan_softc *); 408 static int vxlan_tunable_int(struct vxlan_softc *, const char *, int); 409 410 static void vxlan_ifdetach_event(void *, struct ifnet *); 411 static void vxlan_load(void); 412 static void vxlan_unload(void); 413 static int vxlan_modevent(module_t, int, void *); 414 415 static const char vxlan_name[] = "vxlan"; 416 static MALLOC_DEFINE(M_VXLAN, vxlan_name, 417 "Virtual eXtensible LAN Interface"); 418 static struct if_clone *vxlan_cloner; 419 420 static struct mtx vxlan_list_mtx; 421 #define VXLAN_LIST_LOCK() mtx_lock(&vxlan_list_mtx) 422 #define VXLAN_LIST_UNLOCK() mtx_unlock(&vxlan_list_mtx) 423 424 static LIST_HEAD(, vxlan_socket) vxlan_socket_list; 425 426 static eventhandler_tag vxlan_ifdetach_event_tag; 427 428 SYSCTL_DECL(_net_link); 429 SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 430 "Virtual eXtensible Local Area Network"); 431 432 static int vxlan_legacy_port = 0; 433 TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port); 434 static int vxlan_reuse_port = 0; 435 TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port); 436 437 /* Default maximum number of addresses in the forwarding table. */ 438 #ifndef VXLAN_FTABLE_MAX 439 #define VXLAN_FTABLE_MAX 2000 440 #endif 441 442 /* Timeout (in seconds) of addresses learned in the forwarding table. */ 443 #ifndef VXLAN_FTABLE_TIMEOUT 444 #define VXLAN_FTABLE_TIMEOUT (20 * 60) 445 #endif 446 447 /* 448 * Maximum timeout (in seconds) of addresses learned in the forwarding 449 * table. 450 */ 451 #ifndef VXLAN_FTABLE_MAX_TIMEOUT 452 #define VXLAN_FTABLE_MAX_TIMEOUT (60 * 60 * 24) 453 #endif 454 455 /* Number of seconds between pruning attempts of the forwarding table. */ 456 #ifndef VXLAN_FTABLE_PRUNE 457 #define VXLAN_FTABLE_PRUNE (5 * 60) 458 #endif 459 460 static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE; 461 462 struct vxlan_control { 463 int (*vxlc_func)(struct vxlan_softc *, void *); 464 int vxlc_argsize; 465 int vxlc_flags; 466 #define VXLAN_CTRL_FLAG_COPYIN 0x01 467 #define VXLAN_CTRL_FLAG_COPYOUT 0x02 468 #define VXLAN_CTRL_FLAG_SUSER 0x04 469 }; 470 471 static const struct vxlan_control vxlan_control_table[] = { 472 [VXLAN_CMD_GET_CONFIG] = 473 { vxlan_ctrl_get_config, sizeof(struct ifvxlancfg), 474 VXLAN_CTRL_FLAG_COPYOUT 475 }, 476 477 [VXLAN_CMD_SET_VNI] = 478 { vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd), 479 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 480 }, 481 482 [VXLAN_CMD_SET_LOCAL_ADDR] = 483 { vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd), 484 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 485 }, 486 487 [VXLAN_CMD_SET_REMOTE_ADDR] = 488 { vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd), 489 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 490 }, 491 492 [VXLAN_CMD_SET_LOCAL_PORT] = 493 { vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd), 494 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 495 }, 496 497 [VXLAN_CMD_SET_REMOTE_PORT] = 498 { vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd), 499 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 500 }, 501 502 [VXLAN_CMD_SET_PORT_RANGE] = 503 { vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd), 504 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 505 }, 506 507 [VXLAN_CMD_SET_FTABLE_TIMEOUT] = 508 { vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd), 509 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 510 }, 511 512 [VXLAN_CMD_SET_FTABLE_MAX] = 513 { vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd), 514 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 515 }, 516 517 [VXLAN_CMD_SET_MULTICAST_IF] = 518 { vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd), 519 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 520 }, 521 522 [VXLAN_CMD_SET_TTL] = 523 { vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd), 524 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 525 }, 526 527 [VXLAN_CMD_SET_LEARN] = 528 { vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd), 529 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 530 }, 531 532 [VXLAN_CMD_FTABLE_ENTRY_ADD] = 533 { vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd), 534 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 535 }, 536 537 [VXLAN_CMD_FTABLE_ENTRY_REM] = 538 { vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd), 539 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 540 }, 541 542 [VXLAN_CMD_FLUSH] = 543 { vxlan_ctrl_flush, sizeof(struct ifvxlancmd), 544 VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER, 545 }, 546 }; 547 548 static const int vxlan_control_table_size = nitems(vxlan_control_table); 549 550 static int 551 vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b) 552 { 553 int i, d; 554 555 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) 556 d = ((int)a[i]) - ((int)b[i]); 557 558 return (d); 559 } 560 561 static void 562 vxlan_ftable_init(struct vxlan_softc *sc) 563 { 564 int i; 565 566 sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) * 567 VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK); 568 569 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) 570 LIST_INIT(&sc->vxl_ftable[i]); 571 sc->vxl_ftable_hash_key = arc4random(); 572 } 573 574 static void 575 vxlan_ftable_fini(struct vxlan_softc *sc) 576 { 577 int i; 578 579 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) { 580 KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]), 581 ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i)); 582 } 583 MPASS(sc->vxl_ftable_cnt == 0); 584 585 free(sc->vxl_ftable, M_VXLAN); 586 sc->vxl_ftable = NULL; 587 } 588 589 static void 590 vxlan_ftable_flush(struct vxlan_softc *sc, int all) 591 { 592 struct vxlan_ftable_entry *fe, *tfe; 593 int i; 594 595 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) { 596 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) { 597 if (all || VXLAN_FE_IS_DYNAMIC(fe)) 598 vxlan_ftable_entry_destroy(sc, fe); 599 } 600 } 601 } 602 603 static void 604 vxlan_ftable_expire(struct vxlan_softc *sc) 605 { 606 struct vxlan_ftable_entry *fe, *tfe; 607 int i; 608 609 VXLAN_LOCK_WASSERT(sc); 610 611 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) { 612 LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) { 613 if (VXLAN_FE_IS_DYNAMIC(fe) && 614 time_uptime >= fe->vxlfe_expire) 615 vxlan_ftable_entry_destroy(sc, fe); 616 } 617 } 618 } 619 620 static int 621 vxlan_ftable_update_locked(struct vxlan_softc *sc, 622 const union vxlan_sockaddr *vxlsa, const uint8_t *mac, 623 struct rm_priotracker *tracker) 624 { 625 struct vxlan_ftable_entry *fe; 626 int error __unused; 627 628 VXLAN_LOCK_ASSERT(sc); 629 630 again: 631 /* 632 * A forwarding entry for this MAC address might already exist. If 633 * so, update it, otherwise create a new one. We may have to upgrade 634 * the lock if we have to change or create an entry. 635 */ 636 fe = vxlan_ftable_entry_lookup(sc, mac); 637 if (fe != NULL) { 638 fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout; 639 640 if (!VXLAN_FE_IS_DYNAMIC(fe) || 641 vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa)) 642 return (0); 643 if (!VXLAN_LOCK_WOWNED(sc)) { 644 VXLAN_RUNLOCK(sc, tracker); 645 VXLAN_WLOCK(sc); 646 sc->vxl_stats.ftable_lock_upgrade_failed++; 647 goto again; 648 } 649 vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa); 650 return (0); 651 } 652 653 if (!VXLAN_LOCK_WOWNED(sc)) { 654 VXLAN_RUNLOCK(sc, tracker); 655 VXLAN_WLOCK(sc); 656 sc->vxl_stats.ftable_lock_upgrade_failed++; 657 goto again; 658 } 659 660 if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) { 661 sc->vxl_stats.ftable_nospace++; 662 return (ENOSPC); 663 } 664 665 fe = vxlan_ftable_entry_alloc(); 666 if (fe == NULL) 667 return (ENOMEM); 668 669 vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC); 670 671 /* The prior lookup failed, so the insert should not. */ 672 error = vxlan_ftable_entry_insert(sc, fe); 673 MPASS(error == 0); 674 675 return (0); 676 } 677 678 static int 679 vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa, 680 const uint8_t *mac) 681 { 682 struct rm_priotracker tracker; 683 union vxlan_sockaddr vxlsa; 684 int error; 685 686 /* 687 * The source port may be randomly selected by the remote host, so 688 * use the port of the default destination address. 689 */ 690 vxlan_sockaddr_copy(&vxlsa, sa); 691 vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port; 692 693 if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) { 694 error = vxlan_sockaddr_in6_embedscope(&vxlsa); 695 if (error) 696 return (error); 697 } 698 699 VXLAN_RLOCK(sc, &tracker); 700 error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker); 701 VXLAN_UNLOCK(sc, &tracker); 702 703 return (error); 704 } 705 706 static int 707 vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS) 708 { 709 struct rm_priotracker tracker; 710 struct sbuf sb; 711 struct vxlan_softc *sc; 712 struct vxlan_ftable_entry *fe; 713 size_t size; 714 int i, error; 715 716 /* 717 * This is mostly intended for debugging during development. It is 718 * not practical to dump an entire large table this way. 719 */ 720 721 sc = arg1; 722 size = PAGE_SIZE; /* Calculate later. */ 723 724 sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN); 725 sbuf_putc(&sb, '\n'); 726 727 VXLAN_RLOCK(sc, &tracker); 728 for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) { 729 LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) { 730 if (sbuf_error(&sb) != 0) 731 break; 732 vxlan_ftable_entry_dump(fe, &sb); 733 } 734 } 735 VXLAN_RUNLOCK(sc, &tracker); 736 737 if (sbuf_len(&sb) == 1) 738 sbuf_setpos(&sb, 0); 739 740 sbuf_finish(&sb); 741 error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 742 sbuf_delete(&sb); 743 744 return (error); 745 } 746 747 static struct vxlan_ftable_entry * 748 vxlan_ftable_entry_alloc(void) 749 { 750 struct vxlan_ftable_entry *fe; 751 752 fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT); 753 754 return (fe); 755 } 756 757 static void 758 vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe) 759 { 760 761 free(fe, M_VXLAN); 762 } 763 764 static void 765 vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe, 766 const uint8_t *mac, const struct sockaddr *sa, uint32_t flags) 767 { 768 769 fe->vxlfe_flags = flags; 770 fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout; 771 memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN); 772 vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa); 773 } 774 775 static void 776 vxlan_ftable_entry_destroy(struct vxlan_softc *sc, 777 struct vxlan_ftable_entry *fe) 778 { 779 780 sc->vxl_ftable_cnt--; 781 LIST_REMOVE(fe, vxlfe_hash); 782 vxlan_ftable_entry_free(fe); 783 } 784 785 static int 786 vxlan_ftable_entry_insert(struct vxlan_softc *sc, 787 struct vxlan_ftable_entry *fe) 788 { 789 struct vxlan_ftable_entry *lfe; 790 uint32_t hash; 791 int dir; 792 793 VXLAN_LOCK_WASSERT(sc); 794 hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac); 795 796 lfe = LIST_FIRST(&sc->vxl_ftable[hash]); 797 if (lfe == NULL) { 798 LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash); 799 goto out; 800 } 801 802 do { 803 dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac); 804 if (dir == 0) 805 return (EEXIST); 806 if (dir > 0) { 807 LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash); 808 goto out; 809 } else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) { 810 LIST_INSERT_AFTER(lfe, fe, vxlfe_hash); 811 goto out; 812 } else 813 lfe = LIST_NEXT(lfe, vxlfe_hash); 814 } while (lfe != NULL); 815 816 out: 817 sc->vxl_ftable_cnt++; 818 819 return (0); 820 } 821 822 static struct vxlan_ftable_entry * 823 vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac) 824 { 825 struct vxlan_ftable_entry *fe; 826 uint32_t hash; 827 int dir; 828 829 VXLAN_LOCK_ASSERT(sc); 830 hash = VXLAN_SC_FTABLE_HASH(sc, mac); 831 832 LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) { 833 dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac); 834 if (dir == 0) 835 return (fe); 836 if (dir > 0) 837 break; 838 } 839 840 return (NULL); 841 } 842 843 static void 844 vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb) 845 { 846 char buf[64]; 847 const union vxlan_sockaddr *sa; 848 const void *addr; 849 int i, len, af, width; 850 851 sa = &fe->vxlfe_raddr; 852 af = sa->sa.sa_family; 853 len = sbuf_len(sb); 854 855 sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S', 856 fe->vxlfe_flags); 857 858 for (i = 0; i < ETHER_ADDR_LEN - 1; i++) 859 sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]); 860 sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]); 861 862 if (af == AF_INET) { 863 addr = &sa->in4.sin_addr; 864 width = INET_ADDRSTRLEN - 1; 865 } else { 866 addr = &sa->in6.sin6_addr; 867 width = INET6_ADDRSTRLEN - 1; 868 } 869 inet_ntop(af, addr, buf, sizeof(buf)); 870 sbuf_printf(sb, "%*s ", width, buf); 871 872 sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire); 873 874 sbuf_putc(sb, '\n'); 875 876 /* Truncate a partial line. */ 877 if (sbuf_error(sb) != 0) 878 sbuf_setpos(sb, len); 879 } 880 881 static struct vxlan_socket * 882 vxlan_socket_alloc(const union vxlan_sockaddr *sa) 883 { 884 struct vxlan_socket *vso; 885 int i; 886 887 vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO); 888 rm_init(&vso->vxlso_lock, "vxlansorm"); 889 refcount_init(&vso->vxlso_refcnt, 0); 890 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) 891 LIST_INIT(&vso->vxlso_vni_hash[i]); 892 vso->vxlso_laddr = *sa; 893 894 return (vso); 895 } 896 897 static void 898 vxlan_socket_destroy(struct vxlan_socket *vso) 899 { 900 struct socket *so; 901 #ifdef INVARIANTS 902 int i; 903 struct vxlan_socket_mc_info *mc; 904 905 for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) { 906 mc = &vso->vxlso_mc[i]; 907 KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC, 908 ("%s: socket %p mc[%d] still has address", 909 __func__, vso, i)); 910 } 911 912 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) { 913 KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]), 914 ("%s: socket %p vni_hash[%d] not empty", 915 __func__, vso, i)); 916 } 917 #endif 918 so = vso->vxlso_sock; 919 if (so != NULL) { 920 vso->vxlso_sock = NULL; 921 soclose(so); 922 } 923 924 rm_destroy(&vso->vxlso_lock); 925 free(vso, M_VXLAN); 926 } 927 928 static void 929 vxlan_socket_release(struct vxlan_socket *vso) 930 { 931 int destroy; 932 933 VXLAN_LIST_LOCK(); 934 destroy = VXLAN_SO_RELEASE(vso); 935 if (destroy != 0) 936 LIST_REMOVE(vso, vxlso_entry); 937 VXLAN_LIST_UNLOCK(); 938 939 if (destroy != 0) 940 vxlan_socket_destroy(vso); 941 } 942 943 static struct vxlan_socket * 944 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa) 945 { 946 struct vxlan_socket *vso; 947 948 VXLAN_LIST_LOCK(); 949 LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) { 950 if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) { 951 VXLAN_SO_ACQUIRE(vso); 952 break; 953 } 954 } 955 VXLAN_LIST_UNLOCK(); 956 957 return (vso); 958 } 959 960 static void 961 vxlan_socket_insert(struct vxlan_socket *vso) 962 { 963 964 VXLAN_LIST_LOCK(); 965 VXLAN_SO_ACQUIRE(vso); 966 LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry); 967 VXLAN_LIST_UNLOCK(); 968 } 969 970 static int 971 vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp) 972 { 973 struct thread *td; 974 int error; 975 976 td = curthread; 977 978 error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock, 979 SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td); 980 if (error) { 981 if_printf(ifp, "cannot create socket: %d\n", error); 982 return (error); 983 } 984 985 error = udp_set_kernel_tunneling(vso->vxlso_sock, 986 vxlan_rcv_udp_packet, NULL, vso); 987 if (error) { 988 if_printf(ifp, "cannot set tunneling function: %d\n", error); 989 return (error); 990 } 991 992 if (vxlan_reuse_port != 0) { 993 struct sockopt sopt; 994 int val = 1; 995 996 bzero(&sopt, sizeof(sopt)); 997 sopt.sopt_dir = SOPT_SET; 998 sopt.sopt_level = IPPROTO_IP; 999 sopt.sopt_name = SO_REUSEPORT; 1000 sopt.sopt_val = &val; 1001 sopt.sopt_valsize = sizeof(val); 1002 error = sosetopt(vso->vxlso_sock, &sopt); 1003 if (error) { 1004 if_printf(ifp, 1005 "cannot set REUSEADDR socket opt: %d\n", error); 1006 return (error); 1007 } 1008 } 1009 1010 return (0); 1011 } 1012 1013 static int 1014 vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp) 1015 { 1016 union vxlan_sockaddr laddr; 1017 struct thread *td; 1018 int error; 1019 1020 td = curthread; 1021 laddr = vso->vxlso_laddr; 1022 1023 error = sobind(vso->vxlso_sock, &laddr.sa, td); 1024 if (error) { 1025 if (error != EADDRINUSE) 1026 if_printf(ifp, "cannot bind socket: %d\n", error); 1027 return (error); 1028 } 1029 1030 return (0); 1031 } 1032 1033 static int 1034 vxlan_socket_create(struct ifnet *ifp, int multicast, 1035 const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop) 1036 { 1037 union vxlan_sockaddr laddr; 1038 struct vxlan_socket *vso; 1039 int error; 1040 1041 laddr = *saddr; 1042 1043 /* 1044 * If this socket will be multicast, then only the local port 1045 * must be specified when binding. 1046 */ 1047 if (multicast != 0) { 1048 if (VXLAN_SOCKADDR_IS_IPV4(&laddr)) 1049 laddr.in4.sin_addr.s_addr = INADDR_ANY; 1050 #ifdef INET6 1051 else 1052 laddr.in6.sin6_addr = in6addr_any; 1053 #endif 1054 } 1055 1056 vso = vxlan_socket_alloc(&laddr); 1057 if (vso == NULL) 1058 return (ENOMEM); 1059 1060 error = vxlan_socket_init(vso, ifp); 1061 if (error) 1062 goto fail; 1063 1064 error = vxlan_socket_bind(vso, ifp); 1065 if (error) 1066 goto fail; 1067 1068 /* 1069 * There is a small window between the bind completing and 1070 * inserting the socket, so that a concurrent create may fail. 1071 * Let's not worry about that for now. 1072 */ 1073 vxlan_socket_insert(vso); 1074 *vsop = vso; 1075 1076 return (0); 1077 1078 fail: 1079 vxlan_socket_destroy(vso); 1080 1081 return (error); 1082 } 1083 1084 static void 1085 vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp, 1086 struct vxlan_softc_head *list) 1087 { 1088 struct rm_priotracker tracker; 1089 struct vxlan_softc *sc; 1090 int i; 1091 1092 VXLAN_SO_RLOCK(vso, &tracker); 1093 for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) { 1094 LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry) 1095 vxlan_ifdetach(sc, ifp, list); 1096 } 1097 VXLAN_SO_RUNLOCK(vso, &tracker); 1098 } 1099 1100 static struct vxlan_socket * 1101 vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa) 1102 { 1103 union vxlan_sockaddr laddr; 1104 struct vxlan_socket *vso; 1105 1106 laddr = *vxlsa; 1107 1108 if (VXLAN_SOCKADDR_IS_IPV4(&laddr)) 1109 laddr.in4.sin_addr.s_addr = INADDR_ANY; 1110 #ifdef INET6 1111 else 1112 laddr.in6.sin6_addr = in6addr_any; 1113 #endif 1114 1115 vso = vxlan_socket_lookup(&laddr); 1116 1117 return (vso); 1118 } 1119 1120 static int 1121 vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc, 1122 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local, 1123 int ifidx) 1124 { 1125 1126 if (!vxlan_sockaddr_in_any(local) && 1127 !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa)) 1128 return (0); 1129 if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa)) 1130 return (0); 1131 if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx) 1132 return (0); 1133 1134 return (1); 1135 } 1136 1137 static int 1138 vxlan_socket_mc_join_group(struct vxlan_socket *vso, 1139 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local, 1140 int *ifidx, union vxlan_sockaddr *source) 1141 { 1142 struct sockopt sopt; 1143 int error; 1144 1145 *source = *local; 1146 1147 if (VXLAN_SOCKADDR_IS_IPV4(group)) { 1148 struct ip_mreq mreq; 1149 1150 mreq.imr_multiaddr = group->in4.sin_addr; 1151 mreq.imr_interface = local->in4.sin_addr; 1152 1153 bzero(&sopt, sizeof(sopt)); 1154 sopt.sopt_dir = SOPT_SET; 1155 sopt.sopt_level = IPPROTO_IP; 1156 sopt.sopt_name = IP_ADD_MEMBERSHIP; 1157 sopt.sopt_val = &mreq; 1158 sopt.sopt_valsize = sizeof(mreq); 1159 error = sosetopt(vso->vxlso_sock, &sopt); 1160 if (error) 1161 return (error); 1162 1163 /* 1164 * BMV: Ideally, there would be a formal way for us to get 1165 * the local interface that was selected based on the 1166 * imr_interface address. We could then update *ifidx so 1167 * vxlan_sockaddr_mc_info_match() would return a match for 1168 * later creates that explicitly set the multicast interface. 1169 * 1170 * If we really need to, we can of course look in the INP's 1171 * membership list: 1172 * sotoinpcb(vso->vxlso_sock)->inp_moptions-> 1173 * imo_head[]->imf_inm->inm_ifp 1174 * similarly to imo_match_group(). 1175 */ 1176 source->in4.sin_addr = local->in4.sin_addr; 1177 1178 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) { 1179 struct ipv6_mreq mreq; 1180 1181 mreq.ipv6mr_multiaddr = group->in6.sin6_addr; 1182 mreq.ipv6mr_interface = *ifidx; 1183 1184 bzero(&sopt, sizeof(sopt)); 1185 sopt.sopt_dir = SOPT_SET; 1186 sopt.sopt_level = IPPROTO_IPV6; 1187 sopt.sopt_name = IPV6_JOIN_GROUP; 1188 sopt.sopt_val = &mreq; 1189 sopt.sopt_valsize = sizeof(mreq); 1190 error = sosetopt(vso->vxlso_sock, &sopt); 1191 if (error) 1192 return (error); 1193 1194 /* 1195 * BMV: As with IPv4, we would really like to know what 1196 * interface in6p_lookup_mcast_ifp() selected. 1197 */ 1198 } else 1199 error = EAFNOSUPPORT; 1200 1201 return (error); 1202 } 1203 1204 static int 1205 vxlan_socket_mc_leave_group(struct vxlan_socket *vso, 1206 const union vxlan_sockaddr *group, const union vxlan_sockaddr *source, 1207 int ifidx) 1208 { 1209 struct sockopt sopt; 1210 int error; 1211 1212 bzero(&sopt, sizeof(sopt)); 1213 sopt.sopt_dir = SOPT_SET; 1214 1215 if (VXLAN_SOCKADDR_IS_IPV4(group)) { 1216 struct ip_mreq mreq; 1217 1218 mreq.imr_multiaddr = group->in4.sin_addr; 1219 mreq.imr_interface = source->in4.sin_addr; 1220 1221 sopt.sopt_level = IPPROTO_IP; 1222 sopt.sopt_name = IP_DROP_MEMBERSHIP; 1223 sopt.sopt_val = &mreq; 1224 sopt.sopt_valsize = sizeof(mreq); 1225 error = sosetopt(vso->vxlso_sock, &sopt); 1226 1227 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) { 1228 struct ipv6_mreq mreq; 1229 1230 mreq.ipv6mr_multiaddr = group->in6.sin6_addr; 1231 mreq.ipv6mr_interface = ifidx; 1232 1233 sopt.sopt_level = IPPROTO_IPV6; 1234 sopt.sopt_name = IPV6_LEAVE_GROUP; 1235 sopt.sopt_val = &mreq; 1236 sopt.sopt_valsize = sizeof(mreq); 1237 error = sosetopt(vso->vxlso_sock, &sopt); 1238 1239 } else 1240 error = EAFNOSUPPORT; 1241 1242 return (error); 1243 } 1244 1245 static int 1246 vxlan_socket_mc_add_group(struct vxlan_socket *vso, 1247 const union vxlan_sockaddr *group, const union vxlan_sockaddr *local, 1248 int ifidx, int *idx) 1249 { 1250 union vxlan_sockaddr source; 1251 struct vxlan_socket_mc_info *mc; 1252 int i, empty, error; 1253 1254 /* 1255 * Within a socket, the same multicast group may be used by multiple 1256 * interfaces, each with a different network identifier. But a socket 1257 * may only join a multicast group once, so keep track of the users 1258 * here. 1259 */ 1260 1261 VXLAN_SO_WLOCK(vso); 1262 for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) { 1263 mc = &vso->vxlso_mc[i]; 1264 1265 if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) { 1266 empty++; 1267 continue; 1268 } 1269 1270 if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx)) 1271 goto out; 1272 } 1273 VXLAN_SO_WUNLOCK(vso); 1274 1275 if (empty == 0) 1276 return (ENOSPC); 1277 1278 error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source); 1279 if (error) 1280 return (error); 1281 1282 VXLAN_SO_WLOCK(vso); 1283 for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) { 1284 mc = &vso->vxlso_mc[i]; 1285 1286 if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) { 1287 vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa); 1288 vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa); 1289 mc->vxlsomc_ifidx = ifidx; 1290 goto out; 1291 } 1292 } 1293 VXLAN_SO_WUNLOCK(vso); 1294 1295 error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx); 1296 MPASS(error == 0); 1297 1298 return (ENOSPC); 1299 1300 out: 1301 mc->vxlsomc_users++; 1302 VXLAN_SO_WUNLOCK(vso); 1303 1304 *idx = i; 1305 1306 return (0); 1307 } 1308 1309 static void 1310 vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx) 1311 { 1312 union vxlan_sockaddr group, source; 1313 struct vxlan_socket_mc_info *mc; 1314 int ifidx, leave; 1315 1316 KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS, 1317 ("%s: vso %p idx %d out of bounds", __func__, vso, idx)); 1318 1319 leave = 0; 1320 mc = &vso->vxlso_mc[idx]; 1321 1322 VXLAN_SO_WLOCK(vso); 1323 mc->vxlsomc_users--; 1324 if (mc->vxlsomc_users == 0) { 1325 group = mc->vxlsomc_gaddr; 1326 source = mc->vxlsomc_saddr; 1327 ifidx = mc->vxlsomc_ifidx; 1328 bzero(mc, sizeof(*mc)); 1329 leave = 1; 1330 } 1331 VXLAN_SO_WUNLOCK(vso); 1332 1333 if (leave != 0) { 1334 /* 1335 * Our socket's membership in this group may have already 1336 * been removed if we joined through an interface that's 1337 * been detached. 1338 */ 1339 vxlan_socket_mc_leave_group(vso, &group, &source, ifidx); 1340 } 1341 } 1342 1343 static struct vxlan_softc * 1344 vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni) 1345 { 1346 struct vxlan_softc *sc; 1347 uint32_t hash; 1348 1349 VXLAN_SO_LOCK_ASSERT(vso); 1350 hash = VXLAN_SO_VNI_HASH(vni); 1351 1352 LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) { 1353 if (sc->vxl_vni == vni) { 1354 VXLAN_ACQUIRE(sc); 1355 break; 1356 } 1357 } 1358 1359 return (sc); 1360 } 1361 1362 static struct vxlan_softc * 1363 vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni) 1364 { 1365 struct rm_priotracker tracker; 1366 struct vxlan_softc *sc; 1367 1368 VXLAN_SO_RLOCK(vso, &tracker); 1369 sc = vxlan_socket_lookup_softc_locked(vso, vni); 1370 VXLAN_SO_RUNLOCK(vso, &tracker); 1371 1372 return (sc); 1373 } 1374 1375 static int 1376 vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc) 1377 { 1378 struct vxlan_softc *tsc; 1379 uint32_t vni, hash; 1380 1381 vni = sc->vxl_vni; 1382 hash = VXLAN_SO_VNI_HASH(vni); 1383 1384 VXLAN_SO_WLOCK(vso); 1385 tsc = vxlan_socket_lookup_softc_locked(vso, vni); 1386 if (tsc != NULL) { 1387 VXLAN_SO_WUNLOCK(vso); 1388 vxlan_release(tsc); 1389 return (EEXIST); 1390 } 1391 1392 VXLAN_ACQUIRE(sc); 1393 LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry); 1394 VXLAN_SO_WUNLOCK(vso); 1395 1396 return (0); 1397 } 1398 1399 static void 1400 vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc) 1401 { 1402 1403 VXLAN_SO_WLOCK(vso); 1404 LIST_REMOVE(sc, vxl_entry); 1405 VXLAN_SO_WUNLOCK(vso); 1406 1407 vxlan_release(sc); 1408 } 1409 1410 static struct ifnet * 1411 vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4) 1412 { 1413 struct ifnet *ifp; 1414 1415 VXLAN_LOCK_ASSERT(sc); 1416 1417 if (ipv4 && sc->vxl_im4o != NULL) 1418 ifp = sc->vxl_im4o->imo_multicast_ifp; 1419 else if (!ipv4 && sc->vxl_im6o != NULL) 1420 ifp = sc->vxl_im6o->im6o_multicast_ifp; 1421 else 1422 ifp = NULL; 1423 1424 if (ifp != NULL) 1425 if_ref(ifp); 1426 1427 return (ifp); 1428 } 1429 1430 static void 1431 vxlan_free_multicast(struct vxlan_softc *sc) 1432 { 1433 1434 if (sc->vxl_mc_ifp != NULL) { 1435 if_rele(sc->vxl_mc_ifp); 1436 sc->vxl_mc_ifp = NULL; 1437 sc->vxl_mc_ifindex = 0; 1438 } 1439 1440 if (sc->vxl_im4o != NULL) { 1441 free(sc->vxl_im4o, M_VXLAN); 1442 sc->vxl_im4o = NULL; 1443 } 1444 1445 if (sc->vxl_im6o != NULL) { 1446 free(sc->vxl_im6o, M_VXLAN); 1447 sc->vxl_im6o = NULL; 1448 } 1449 } 1450 1451 static int 1452 vxlan_setup_multicast_interface(struct vxlan_softc *sc) 1453 { 1454 struct ifnet *ifp; 1455 1456 ifp = ifunit_ref(sc->vxl_mc_ifname); 1457 if (ifp == NULL) { 1458 if_printf(sc->vxl_ifp, "multicast interface %s does " 1459 "not exist\n", sc->vxl_mc_ifname); 1460 return (ENOENT); 1461 } 1462 1463 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 1464 if_printf(sc->vxl_ifp, "interface %s does not support " 1465 "multicast\n", sc->vxl_mc_ifname); 1466 if_rele(ifp); 1467 return (ENOTSUP); 1468 } 1469 1470 sc->vxl_mc_ifp = ifp; 1471 sc->vxl_mc_ifindex = ifp->if_index; 1472 1473 return (0); 1474 } 1475 1476 static int 1477 vxlan_setup_multicast(struct vxlan_softc *sc) 1478 { 1479 const union vxlan_sockaddr *group; 1480 int error; 1481 1482 group = &sc->vxl_dst_addr; 1483 error = 0; 1484 1485 if (sc->vxl_mc_ifname[0] != '\0') { 1486 error = vxlan_setup_multicast_interface(sc); 1487 if (error) 1488 return (error); 1489 } 1490 1491 /* 1492 * Initialize an multicast options structure that is sufficiently 1493 * populated for use in the respective IP output routine. This 1494 * structure is typically stored in the socket, but our sockets 1495 * may be shared among multiple interfaces. 1496 */ 1497 if (VXLAN_SOCKADDR_IS_IPV4(group)) { 1498 sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN, 1499 M_ZERO | M_WAITOK); 1500 sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp; 1501 sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl; 1502 sc->vxl_im4o->imo_multicast_vif = -1; 1503 } else if (VXLAN_SOCKADDR_IS_IPV6(group)) { 1504 sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN, 1505 M_ZERO | M_WAITOK); 1506 sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp; 1507 sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl; 1508 } 1509 1510 return (error); 1511 } 1512 1513 static int 1514 vxlan_setup_socket(struct vxlan_softc *sc) 1515 { 1516 struct vxlan_socket *vso; 1517 struct ifnet *ifp; 1518 union vxlan_sockaddr *saddr, *daddr; 1519 int multicast, error; 1520 1521 vso = NULL; 1522 ifp = sc->vxl_ifp; 1523 saddr = &sc->vxl_src_addr; 1524 daddr = &sc->vxl_dst_addr; 1525 1526 multicast = vxlan_sockaddr_in_multicast(daddr); 1527 MPASS(multicast != -1); 1528 sc->vxl_vso_mc_index = -1; 1529 1530 /* 1531 * Try to create the socket. If that fails, attempt to use an 1532 * existing socket. 1533 */ 1534 error = vxlan_socket_create(ifp, multicast, saddr, &vso); 1535 if (error) { 1536 if (multicast != 0) 1537 vso = vxlan_socket_mc_lookup(saddr); 1538 else 1539 vso = vxlan_socket_lookup(saddr); 1540 1541 if (vso == NULL) { 1542 if_printf(ifp, "cannot create socket (error: %d), " 1543 "and no existing socket found\n", error); 1544 goto out; 1545 } 1546 } 1547 1548 if (multicast != 0) { 1549 error = vxlan_setup_multicast(sc); 1550 if (error) 1551 goto out; 1552 1553 error = vxlan_socket_mc_add_group(vso, daddr, saddr, 1554 sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index); 1555 if (error) 1556 goto out; 1557 } 1558 1559 sc->vxl_sock = vso; 1560 error = vxlan_socket_insert_softc(vso, sc); 1561 if (error) { 1562 sc->vxl_sock = NULL; 1563 if_printf(ifp, "network identifier %d already exists in " 1564 "this socket\n", sc->vxl_vni); 1565 goto out; 1566 } 1567 1568 return (0); 1569 1570 out: 1571 if (vso != NULL) { 1572 if (sc->vxl_vso_mc_index != -1) { 1573 vxlan_socket_mc_release_group_by_idx(vso, 1574 sc->vxl_vso_mc_index); 1575 sc->vxl_vso_mc_index = -1; 1576 } 1577 if (multicast != 0) 1578 vxlan_free_multicast(sc); 1579 vxlan_socket_release(vso); 1580 } 1581 1582 return (error); 1583 } 1584 1585 #ifdef INET6 1586 static void 1587 vxlan_setup_zero_checksum_port(struct vxlan_softc *sc) 1588 { 1589 1590 if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr)) 1591 return; 1592 1593 MPASS(sc->vxl_src_addr.in6.sin6_port != 0); 1594 MPASS(sc->vxl_dst_addr.in6.sin6_port != 0); 1595 1596 if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) { 1597 if_printf(sc->vxl_ifp, "port %d in src address does not match " 1598 "port %d in dst address, rfc6935_port (%d) not updated.\n", 1599 ntohs(sc->vxl_src_addr.in6.sin6_port), 1600 ntohs(sc->vxl_dst_addr.in6.sin6_port), 1601 V_zero_checksum_port); 1602 return; 1603 } 1604 1605 if (V_zero_checksum_port != 0) { 1606 if (V_zero_checksum_port != 1607 ntohs(sc->vxl_src_addr.in6.sin6_port)) { 1608 if_printf(sc->vxl_ifp, "rfc6935_port is already set to " 1609 "%d, cannot set it to %d.\n", V_zero_checksum_port, 1610 ntohs(sc->vxl_src_addr.in6.sin6_port)); 1611 } 1612 return; 1613 } 1614 1615 V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port); 1616 if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n", 1617 V_zero_checksum_port); 1618 } 1619 #endif 1620 1621 static void 1622 vxlan_setup_interface_hdrlen(struct vxlan_softc *sc) 1623 { 1624 struct ifnet *ifp; 1625 1626 VXLAN_LOCK_WASSERT(sc); 1627 1628 ifp = sc->vxl_ifp; 1629 ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr); 1630 1631 if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0) 1632 ifp->if_hdrlen += sizeof(struct ip); 1633 else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0) 1634 ifp->if_hdrlen += sizeof(struct ip6_hdr); 1635 1636 if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0) 1637 ifp->if_mtu = ETHERMTU - ifp->if_hdrlen; 1638 } 1639 1640 static int 1641 vxlan_valid_init_config(struct vxlan_softc *sc) 1642 { 1643 const char *reason; 1644 1645 if (vxlan_check_vni(sc->vxl_vni) != 0) { 1646 reason = "invalid virtual network identifier specified"; 1647 goto fail; 1648 } 1649 1650 if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) { 1651 reason = "source address type is not supported"; 1652 goto fail; 1653 } 1654 1655 if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) { 1656 reason = "destination address type is not supported"; 1657 goto fail; 1658 } 1659 1660 if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) { 1661 reason = "no valid destination address specified"; 1662 goto fail; 1663 } 1664 1665 if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 && 1666 sc->vxl_mc_ifname[0] != '\0') { 1667 reason = "can only specify interface with a group address"; 1668 goto fail; 1669 } 1670 1671 if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) { 1672 if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^ 1673 VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) { 1674 reason = "source and destination address must both " 1675 "be either IPv4 or IPv6"; 1676 goto fail; 1677 } 1678 } 1679 1680 if (sc->vxl_src_addr.in4.sin_port == 0) { 1681 reason = "local port not specified"; 1682 goto fail; 1683 } 1684 1685 if (sc->vxl_dst_addr.in4.sin_port == 0) { 1686 reason = "remote port not specified"; 1687 goto fail; 1688 } 1689 1690 return (0); 1691 1692 fail: 1693 if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason); 1694 return (EINVAL); 1695 } 1696 1697 static void 1698 vxlan_init_wait(struct vxlan_softc *sc) 1699 { 1700 1701 VXLAN_LOCK_WASSERT(sc); 1702 while (sc->vxl_flags & VXLAN_FLAG_INIT) 1703 rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz); 1704 } 1705 1706 static void 1707 vxlan_init_complete(struct vxlan_softc *sc) 1708 { 1709 1710 VXLAN_WLOCK(sc); 1711 sc->vxl_flags &= ~VXLAN_FLAG_INIT; 1712 wakeup(sc); 1713 VXLAN_WUNLOCK(sc); 1714 } 1715 1716 static void 1717 vxlan_init(void *xsc) 1718 { 1719 static const uint8_t empty_mac[ETHER_ADDR_LEN]; 1720 struct vxlan_softc *sc; 1721 struct ifnet *ifp; 1722 1723 sc = xsc; 1724 ifp = sc->vxl_ifp; 1725 1726 sx_xlock(&vxlan_sx); 1727 VXLAN_WLOCK(sc); 1728 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1729 VXLAN_WUNLOCK(sc); 1730 sx_xunlock(&vxlan_sx); 1731 return; 1732 } 1733 sc->vxl_flags |= VXLAN_FLAG_INIT; 1734 VXLAN_WUNLOCK(sc); 1735 1736 if (vxlan_valid_init_config(sc) != 0) 1737 goto out; 1738 1739 if (vxlan_setup_socket(sc) != 0) 1740 goto out; 1741 1742 #ifdef INET6 1743 vxlan_setup_zero_checksum_port(sc); 1744 #endif 1745 1746 /* Initialize the default forwarding entry. */ 1747 vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac, 1748 &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC); 1749 1750 VXLAN_WLOCK(sc); 1751 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1752 callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz, 1753 vxlan_timer, sc); 1754 VXLAN_WUNLOCK(sc); 1755 1756 if_link_state_change(ifp, LINK_STATE_UP); 1757 1758 EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family, 1759 ntohs(sc->vxl_src_addr.in4.sin_port)); 1760 out: 1761 vxlan_init_complete(sc); 1762 sx_xunlock(&vxlan_sx); 1763 } 1764 1765 static void 1766 vxlan_release(struct vxlan_softc *sc) 1767 { 1768 1769 /* 1770 * The softc may be destroyed as soon as we release our reference, 1771 * so we cannot serialize the wakeup with the softc lock. We use a 1772 * timeout in our sleeps so a missed wakeup is unfortunate but not 1773 * fatal. 1774 */ 1775 if (VXLAN_RELEASE(sc) != 0) 1776 wakeup(sc); 1777 } 1778 1779 static void 1780 vxlan_teardown_wait(struct vxlan_softc *sc) 1781 { 1782 1783 VXLAN_LOCK_WASSERT(sc); 1784 while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) 1785 rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz); 1786 } 1787 1788 static void 1789 vxlan_teardown_complete(struct vxlan_softc *sc) 1790 { 1791 1792 VXLAN_WLOCK(sc); 1793 sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN; 1794 wakeup(sc); 1795 VXLAN_WUNLOCK(sc); 1796 } 1797 1798 static void 1799 vxlan_teardown_locked(struct vxlan_softc *sc) 1800 { 1801 struct ifnet *ifp; 1802 struct vxlan_socket *vso; 1803 1804 sx_assert(&vxlan_sx, SA_XLOCKED); 1805 VXLAN_LOCK_WASSERT(sc); 1806 MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN); 1807 1808 ifp = sc->vxl_ifp; 1809 ifp->if_flags &= ~IFF_UP; 1810 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1811 callout_stop(&sc->vxl_callout); 1812 vso = sc->vxl_sock; 1813 sc->vxl_sock = NULL; 1814 1815 VXLAN_WUNLOCK(sc); 1816 if_link_state_change(ifp, LINK_STATE_DOWN); 1817 EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family, 1818 ntohs(sc->vxl_src_addr.in4.sin_port)); 1819 1820 if (vso != NULL) { 1821 vxlan_socket_remove_softc(vso, sc); 1822 1823 if (sc->vxl_vso_mc_index != -1) { 1824 vxlan_socket_mc_release_group_by_idx(vso, 1825 sc->vxl_vso_mc_index); 1826 sc->vxl_vso_mc_index = -1; 1827 } 1828 } 1829 1830 VXLAN_WLOCK(sc); 1831 while (sc->vxl_refcnt != 0) 1832 rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz); 1833 VXLAN_WUNLOCK(sc); 1834 1835 callout_drain(&sc->vxl_callout); 1836 1837 vxlan_free_multicast(sc); 1838 if (vso != NULL) 1839 vxlan_socket_release(vso); 1840 1841 vxlan_teardown_complete(sc); 1842 } 1843 1844 static void 1845 vxlan_teardown(struct vxlan_softc *sc) 1846 { 1847 1848 sx_xlock(&vxlan_sx); 1849 VXLAN_WLOCK(sc); 1850 if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) { 1851 vxlan_teardown_wait(sc); 1852 VXLAN_WUNLOCK(sc); 1853 sx_xunlock(&vxlan_sx); 1854 return; 1855 } 1856 1857 sc->vxl_flags |= VXLAN_FLAG_TEARDOWN; 1858 vxlan_teardown_locked(sc); 1859 sx_xunlock(&vxlan_sx); 1860 } 1861 1862 static void 1863 vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp, 1864 struct vxlan_softc_head *list) 1865 { 1866 1867 VXLAN_WLOCK(sc); 1868 1869 if (sc->vxl_mc_ifp != ifp) 1870 goto out; 1871 if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) 1872 goto out; 1873 1874 sc->vxl_flags |= VXLAN_FLAG_TEARDOWN; 1875 LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list); 1876 1877 out: 1878 VXLAN_WUNLOCK(sc); 1879 } 1880 1881 static void 1882 vxlan_timer(void *xsc) 1883 { 1884 struct vxlan_softc *sc; 1885 1886 sc = xsc; 1887 VXLAN_LOCK_WASSERT(sc); 1888 1889 vxlan_ftable_expire(sc); 1890 callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz); 1891 } 1892 1893 static int 1894 vxlan_ioctl_ifflags(struct vxlan_softc *sc) 1895 { 1896 struct ifnet *ifp; 1897 1898 ifp = sc->vxl_ifp; 1899 1900 if (ifp->if_flags & IFF_UP) { 1901 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1902 vxlan_init(sc); 1903 } else { 1904 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1905 vxlan_teardown(sc); 1906 } 1907 1908 return (0); 1909 } 1910 1911 static int 1912 vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg) 1913 { 1914 struct rm_priotracker tracker; 1915 struct ifvxlancfg *cfg; 1916 1917 cfg = arg; 1918 bzero(cfg, sizeof(*cfg)); 1919 1920 VXLAN_RLOCK(sc, &tracker); 1921 cfg->vxlc_vni = sc->vxl_vni; 1922 memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr, 1923 sizeof(union vxlan_sockaddr)); 1924 memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr, 1925 sizeof(union vxlan_sockaddr)); 1926 cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex; 1927 cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt; 1928 cfg->vxlc_ftable_max = sc->vxl_ftable_max; 1929 cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout; 1930 cfg->vxlc_port_min = sc->vxl_min_port; 1931 cfg->vxlc_port_max = sc->vxl_max_port; 1932 cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0; 1933 cfg->vxlc_ttl = sc->vxl_ttl; 1934 VXLAN_RUNLOCK(sc, &tracker); 1935 1936 #ifdef INET6 1937 if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa)) 1938 sa6_recoverscope(&cfg->vxlc_local_sa.in6); 1939 if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa)) 1940 sa6_recoverscope(&cfg->vxlc_remote_sa.in6); 1941 #endif 1942 1943 return (0); 1944 } 1945 1946 static int 1947 vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg) 1948 { 1949 struct ifvxlancmd *cmd; 1950 int error; 1951 1952 cmd = arg; 1953 1954 if (vxlan_check_vni(cmd->vxlcmd_vni) != 0) 1955 return (EINVAL); 1956 1957 VXLAN_WLOCK(sc); 1958 if (vxlan_can_change_config(sc)) { 1959 sc->vxl_vni = cmd->vxlcmd_vni; 1960 error = 0; 1961 } else 1962 error = EBUSY; 1963 VXLAN_WUNLOCK(sc); 1964 1965 return (error); 1966 } 1967 1968 static int 1969 vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg) 1970 { 1971 struct ifvxlancmd *cmd; 1972 union vxlan_sockaddr *vxlsa; 1973 int error; 1974 1975 cmd = arg; 1976 vxlsa = &cmd->vxlcmd_sa; 1977 1978 if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa)) 1979 return (EINVAL); 1980 if (vxlan_sockaddr_in_multicast(vxlsa) != 0) 1981 return (EINVAL); 1982 if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) { 1983 error = vxlan_sockaddr_in6_embedscope(vxlsa); 1984 if (error) 1985 return (error); 1986 } 1987 1988 VXLAN_WLOCK(sc); 1989 if (vxlan_can_change_config(sc)) { 1990 vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa); 1991 vxlan_set_hwcaps(sc); 1992 error = 0; 1993 } else 1994 error = EBUSY; 1995 VXLAN_WUNLOCK(sc); 1996 1997 return (error); 1998 } 1999 2000 static int 2001 vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg) 2002 { 2003 struct ifvxlancmd *cmd; 2004 union vxlan_sockaddr *vxlsa; 2005 int error; 2006 2007 cmd = arg; 2008 vxlsa = &cmd->vxlcmd_sa; 2009 2010 if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa)) 2011 return (EINVAL); 2012 if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) { 2013 error = vxlan_sockaddr_in6_embedscope(vxlsa); 2014 if (error) 2015 return (error); 2016 } 2017 2018 VXLAN_WLOCK(sc); 2019 if (vxlan_can_change_config(sc)) { 2020 vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa); 2021 vxlan_setup_interface_hdrlen(sc); 2022 error = 0; 2023 } else 2024 error = EBUSY; 2025 VXLAN_WUNLOCK(sc); 2026 2027 return (error); 2028 } 2029 2030 static int 2031 vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg) 2032 { 2033 struct ifvxlancmd *cmd; 2034 int error; 2035 2036 cmd = arg; 2037 2038 if (cmd->vxlcmd_port == 0) 2039 return (EINVAL); 2040 2041 VXLAN_WLOCK(sc); 2042 if (vxlan_can_change_config(sc)) { 2043 sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port); 2044 error = 0; 2045 } else 2046 error = EBUSY; 2047 VXLAN_WUNLOCK(sc); 2048 2049 return (error); 2050 } 2051 2052 static int 2053 vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg) 2054 { 2055 struct ifvxlancmd *cmd; 2056 int error; 2057 2058 cmd = arg; 2059 2060 if (cmd->vxlcmd_port == 0) 2061 return (EINVAL); 2062 2063 VXLAN_WLOCK(sc); 2064 if (vxlan_can_change_config(sc)) { 2065 sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port); 2066 error = 0; 2067 } else 2068 error = EBUSY; 2069 VXLAN_WUNLOCK(sc); 2070 2071 return (error); 2072 } 2073 2074 static int 2075 vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg) 2076 { 2077 struct ifvxlancmd *cmd; 2078 uint16_t min, max; 2079 int error; 2080 2081 cmd = arg; 2082 min = cmd->vxlcmd_port_min; 2083 max = cmd->vxlcmd_port_max; 2084 2085 if (max < min) 2086 return (EINVAL); 2087 2088 VXLAN_WLOCK(sc); 2089 if (vxlan_can_change_config(sc)) { 2090 sc->vxl_min_port = min; 2091 sc->vxl_max_port = max; 2092 error = 0; 2093 } else 2094 error = EBUSY; 2095 VXLAN_WUNLOCK(sc); 2096 2097 return (error); 2098 } 2099 2100 static int 2101 vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg) 2102 { 2103 struct ifvxlancmd *cmd; 2104 int error; 2105 2106 cmd = arg; 2107 2108 VXLAN_WLOCK(sc); 2109 if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) { 2110 sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout; 2111 error = 0; 2112 } else 2113 error = EINVAL; 2114 VXLAN_WUNLOCK(sc); 2115 2116 return (error); 2117 } 2118 2119 static int 2120 vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg) 2121 { 2122 struct ifvxlancmd *cmd; 2123 int error; 2124 2125 cmd = arg; 2126 2127 VXLAN_WLOCK(sc); 2128 if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) { 2129 sc->vxl_ftable_max = cmd->vxlcmd_ftable_max; 2130 error = 0; 2131 } else 2132 error = EINVAL; 2133 VXLAN_WUNLOCK(sc); 2134 2135 return (error); 2136 } 2137 2138 static int 2139 vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg) 2140 { 2141 struct ifvxlancmd *cmd; 2142 int error; 2143 2144 cmd = arg; 2145 2146 VXLAN_WLOCK(sc); 2147 if (vxlan_can_change_config(sc)) { 2148 strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ); 2149 vxlan_set_hwcaps(sc); 2150 error = 0; 2151 } else 2152 error = EBUSY; 2153 VXLAN_WUNLOCK(sc); 2154 2155 return (error); 2156 } 2157 2158 static int 2159 vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg) 2160 { 2161 struct ifvxlancmd *cmd; 2162 int error; 2163 2164 cmd = arg; 2165 2166 VXLAN_WLOCK(sc); 2167 if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) { 2168 sc->vxl_ttl = cmd->vxlcmd_ttl; 2169 if (sc->vxl_im4o != NULL) 2170 sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl; 2171 if (sc->vxl_im6o != NULL) 2172 sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl; 2173 error = 0; 2174 } else 2175 error = EINVAL; 2176 VXLAN_WUNLOCK(sc); 2177 2178 return (error); 2179 } 2180 2181 static int 2182 vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg) 2183 { 2184 struct ifvxlancmd *cmd; 2185 2186 cmd = arg; 2187 2188 VXLAN_WLOCK(sc); 2189 if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN) 2190 sc->vxl_flags |= VXLAN_FLAG_LEARN; 2191 else 2192 sc->vxl_flags &= ~VXLAN_FLAG_LEARN; 2193 VXLAN_WUNLOCK(sc); 2194 2195 return (0); 2196 } 2197 2198 static int 2199 vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg) 2200 { 2201 union vxlan_sockaddr vxlsa; 2202 struct ifvxlancmd *cmd; 2203 struct vxlan_ftable_entry *fe; 2204 int error; 2205 2206 cmd = arg; 2207 vxlsa = cmd->vxlcmd_sa; 2208 2209 if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa)) 2210 return (EINVAL); 2211 if (vxlan_sockaddr_in_any(&vxlsa) != 0) 2212 return (EINVAL); 2213 if (vxlan_sockaddr_in_multicast(&vxlsa) != 0) 2214 return (EINVAL); 2215 /* BMV: We could support both IPv4 and IPv6 later. */ 2216 if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family) 2217 return (EAFNOSUPPORT); 2218 2219 if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) { 2220 error = vxlan_sockaddr_in6_embedscope(&vxlsa); 2221 if (error) 2222 return (error); 2223 } 2224 2225 fe = vxlan_ftable_entry_alloc(); 2226 if (fe == NULL) 2227 return (ENOMEM); 2228 2229 if (vxlsa.in4.sin_port == 0) 2230 vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port; 2231 2232 vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa, 2233 VXLAN_FE_FLAG_STATIC); 2234 2235 VXLAN_WLOCK(sc); 2236 error = vxlan_ftable_entry_insert(sc, fe); 2237 VXLAN_WUNLOCK(sc); 2238 2239 if (error) 2240 vxlan_ftable_entry_free(fe); 2241 2242 return (error); 2243 } 2244 2245 static int 2246 vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg) 2247 { 2248 struct ifvxlancmd *cmd; 2249 struct vxlan_ftable_entry *fe; 2250 int error; 2251 2252 cmd = arg; 2253 2254 VXLAN_WLOCK(sc); 2255 fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac); 2256 if (fe != NULL) { 2257 vxlan_ftable_entry_destroy(sc, fe); 2258 error = 0; 2259 } else 2260 error = ENOENT; 2261 VXLAN_WUNLOCK(sc); 2262 2263 return (error); 2264 } 2265 2266 static int 2267 vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg) 2268 { 2269 struct ifvxlancmd *cmd; 2270 int all; 2271 2272 cmd = arg; 2273 all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL; 2274 2275 VXLAN_WLOCK(sc); 2276 vxlan_ftable_flush(sc, all); 2277 VXLAN_WUNLOCK(sc); 2278 2279 return (0); 2280 } 2281 2282 static int 2283 vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get) 2284 { 2285 const struct vxlan_control *vc; 2286 union { 2287 struct ifvxlancfg cfg; 2288 struct ifvxlancmd cmd; 2289 } args; 2290 int out, error; 2291 2292 if (ifd->ifd_cmd >= vxlan_control_table_size) 2293 return (EINVAL); 2294 2295 bzero(&args, sizeof(args)); 2296 vc = &vxlan_control_table[ifd->ifd_cmd]; 2297 out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0; 2298 2299 if ((get != 0 && out == 0) || (get == 0 && out != 0)) 2300 return (EINVAL); 2301 2302 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) { 2303 error = priv_check(curthread, PRIV_NET_VXLAN); 2304 if (error) 2305 return (error); 2306 } 2307 2308 if (ifd->ifd_len != vc->vxlc_argsize || 2309 ifd->ifd_len > sizeof(args)) 2310 return (EINVAL); 2311 2312 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) { 2313 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 2314 if (error) 2315 return (error); 2316 } 2317 2318 error = vc->vxlc_func(sc, &args); 2319 if (error) 2320 return (error); 2321 2322 if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) { 2323 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 2324 if (error) 2325 return (error); 2326 } 2327 2328 return (0); 2329 } 2330 2331 static int 2332 vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2333 { 2334 struct rm_priotracker tracker; 2335 struct vxlan_softc *sc; 2336 struct ifreq *ifr; 2337 struct ifdrv *ifd; 2338 int error; 2339 2340 sc = ifp->if_softc; 2341 ifr = (struct ifreq *) data; 2342 ifd = (struct ifdrv *) data; 2343 2344 error = 0; 2345 2346 switch (cmd) { 2347 case SIOCADDMULTI: 2348 case SIOCDELMULTI: 2349 break; 2350 2351 case SIOCGDRVSPEC: 2352 case SIOCSDRVSPEC: 2353 error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC); 2354 break; 2355 2356 case SIOCSIFFLAGS: 2357 error = vxlan_ioctl_ifflags(sc); 2358 break; 2359 2360 case SIOCSIFMEDIA: 2361 case SIOCGIFMEDIA: 2362 error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd); 2363 break; 2364 2365 case SIOCSIFMTU: 2366 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) { 2367 error = EINVAL; 2368 } else { 2369 VXLAN_WLOCK(sc); 2370 ifp->if_mtu = ifr->ifr_mtu; 2371 sc->vxl_flags |= VXLAN_FLAG_USER_MTU; 2372 VXLAN_WUNLOCK(sc); 2373 } 2374 break; 2375 2376 case SIOCSIFCAP: 2377 VXLAN_WLOCK(sc); 2378 error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap); 2379 if (error == 0) 2380 vxlan_set_hwcaps(sc); 2381 VXLAN_WUNLOCK(sc); 2382 break; 2383 2384 case SIOCGTUNFIB: 2385 VXLAN_RLOCK(sc, &tracker); 2386 ifr->ifr_fib = sc->vxl_fibnum; 2387 VXLAN_RUNLOCK(sc, &tracker); 2388 break; 2389 2390 case SIOCSTUNFIB: 2391 if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0) 2392 break; 2393 2394 if (ifr->ifr_fib >= rt_numfibs) 2395 error = EINVAL; 2396 else { 2397 VXLAN_WLOCK(sc); 2398 sc->vxl_fibnum = ifr->ifr_fib; 2399 VXLAN_WUNLOCK(sc); 2400 } 2401 break; 2402 2403 default: 2404 error = ether_ioctl(ifp, cmd, data); 2405 break; 2406 } 2407 2408 return (error); 2409 } 2410 2411 #if defined(INET) || defined(INET6) 2412 static uint16_t 2413 vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m) 2414 { 2415 int range; 2416 uint32_t hash; 2417 2418 range = sc->vxl_max_port - sc->vxl_min_port + 1; 2419 2420 if (M_HASHTYPE_ISHASH(m)) 2421 hash = m->m_pkthdr.flowid; 2422 else 2423 hash = jenkins_hash(m->m_data, ETHER_HDR_LEN, 2424 sc->vxl_port_hash_key); 2425 2426 return (sc->vxl_min_port + (hash % range)); 2427 } 2428 2429 static void 2430 vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff, 2431 uint16_t srcport, uint16_t dstport) 2432 { 2433 struct vxlanudphdr *hdr; 2434 struct udphdr *udph; 2435 struct vxlan_header *vxh; 2436 int len; 2437 2438 len = m->m_pkthdr.len - ipoff; 2439 MPASS(len >= sizeof(struct vxlanudphdr)); 2440 hdr = mtodo(m, ipoff); 2441 2442 udph = &hdr->vxlh_udp; 2443 udph->uh_sport = srcport; 2444 udph->uh_dport = dstport; 2445 udph->uh_ulen = htons(len); 2446 udph->uh_sum = 0; 2447 2448 vxh = &hdr->vxlh_hdr; 2449 vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI); 2450 vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT); 2451 } 2452 #endif 2453 2454 #if defined(INET6) || defined(INET) 2455 /* 2456 * Return the CSUM_INNER_* equivalent of CSUM_* caps. 2457 */ 2458 static uint32_t 2459 csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap) 2460 { 2461 uint32_t csum_flags = encap; 2462 const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP; 2463 2464 /* 2465 * csum_flags can request either v4 or v6 offload but not both. 2466 * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO) 2467 * so those bits are no good to detect the IP version. Other bits are 2468 * always set with CSUM_TSO and we use those to figure out the IP 2469 * version. 2470 */ 2471 if (csum_flags_in & v4) { 2472 if (csum_flags_in & CSUM_IP) 2473 csum_flags |= CSUM_INNER_IP; 2474 if (csum_flags_in & CSUM_IP_UDP) 2475 csum_flags |= CSUM_INNER_IP_UDP; 2476 if (csum_flags_in & CSUM_IP_TCP) 2477 csum_flags |= CSUM_INNER_IP_TCP; 2478 if (csum_flags_in & CSUM_IP_TSO) 2479 csum_flags |= CSUM_INNER_IP_TSO; 2480 } else { 2481 #ifdef INVARIANTS 2482 const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP; 2483 2484 MPASS((csum_flags_in & v6) != 0); 2485 #endif 2486 if (csum_flags_in & CSUM_IP6_UDP) 2487 csum_flags |= CSUM_INNER_IP6_UDP; 2488 if (csum_flags_in & CSUM_IP6_TCP) 2489 csum_flags |= CSUM_INNER_IP6_TCP; 2490 if (csum_flags_in & CSUM_IP6_TSO) 2491 csum_flags |= CSUM_INNER_IP6_TSO; 2492 } 2493 2494 return (csum_flags); 2495 } 2496 #endif 2497 2498 static int 2499 vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, 2500 struct mbuf *m) 2501 { 2502 #ifdef INET 2503 struct ifnet *ifp; 2504 struct ip *ip; 2505 struct in_addr srcaddr, dstaddr; 2506 uint16_t srcport, dstport; 2507 int len, mcast, error; 2508 struct route route, *ro; 2509 struct sockaddr_in *sin; 2510 uint32_t csum_flags; 2511 2512 NET_EPOCH_ASSERT(); 2513 2514 ifp = sc->vxl_ifp; 2515 srcaddr = sc->vxl_src_addr.in4.sin_addr; 2516 srcport = vxlan_pick_source_port(sc, m); 2517 dstaddr = fvxlsa->in4.sin_addr; 2518 dstport = fvxlsa->in4.sin_port; 2519 2520 M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr), 2521 M_NOWAIT); 2522 if (m == NULL) { 2523 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2524 return (ENOBUFS); 2525 } 2526 2527 len = m->m_pkthdr.len; 2528 2529 ip = mtod(m, struct ip *); 2530 ip->ip_tos = 0; 2531 ip->ip_len = htons(len); 2532 ip->ip_off = 0; 2533 ip->ip_ttl = sc->vxl_ttl; 2534 ip->ip_p = IPPROTO_UDP; 2535 ip->ip_sum = 0; 2536 ip->ip_src = srcaddr; 2537 ip->ip_dst = dstaddr; 2538 2539 vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport); 2540 2541 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 2542 m->m_flags &= ~(M_MCAST | M_BCAST); 2543 2544 m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX; 2545 if (m->m_pkthdr.csum_flags != 0) { 2546 /* 2547 * HW checksum (L3 and/or L4) or TSO has been requested. Look 2548 * up the ifnet for the outbound route and verify that the 2549 * outbound ifnet can perform the requested operation on the 2550 * inner frame. 2551 */ 2552 bzero(&route, sizeof(route)); 2553 ro = &route; 2554 sin = (struct sockaddr_in *)&ro->ro_dst; 2555 sin->sin_family = AF_INET; 2556 sin->sin_len = sizeof(*sin); 2557 sin->sin_addr = ip->ip_dst; 2558 ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE, 2559 0); 2560 if (ro->ro_nh == NULL) { 2561 m_freem(m); 2562 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2563 return (EHOSTUNREACH); 2564 } 2565 2566 csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags, 2567 CSUM_ENCAP_VXLAN); 2568 if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) != 2569 csum_flags) { 2570 if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) { 2571 const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp; 2572 2573 if_printf(ifp, "interface %s is missing hwcaps " 2574 "0x%08x, csum_flags 0x%08x -> 0x%08x, " 2575 "hwassist 0x%08x\n", nh_ifp->if_xname, 2576 csum_flags & ~(uint32_t)nh_ifp->if_hwassist, 2577 m->m_pkthdr.csum_flags, csum_flags, 2578 (uint32_t)nh_ifp->if_hwassist); 2579 } 2580 m_freem(m); 2581 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2582 return (ENXIO); 2583 } 2584 m->m_pkthdr.csum_flags = csum_flags; 2585 if (csum_flags & 2586 (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP | 2587 CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) { 2588 counter_u64_add(sc->vxl_stats.txcsum, 1); 2589 if (csum_flags & CSUM_INNER_TSO) 2590 counter_u64_add(sc->vxl_stats.tso, 1); 2591 } 2592 } else 2593 ro = NULL; 2594 error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL); 2595 if (error == 0) { 2596 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2597 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 2598 if (mcast != 0) 2599 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 2600 } else 2601 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2602 2603 return (error); 2604 #else 2605 m_freem(m); 2606 return (ENOTSUP); 2607 #endif 2608 } 2609 2610 static int 2611 vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa, 2612 struct mbuf *m) 2613 { 2614 #ifdef INET6 2615 struct ifnet *ifp; 2616 struct ip6_hdr *ip6; 2617 const struct in6_addr *srcaddr, *dstaddr; 2618 uint16_t srcport, dstport; 2619 int len, mcast, error; 2620 struct route_in6 route, *ro; 2621 struct sockaddr_in6 *sin6; 2622 uint32_t csum_flags; 2623 2624 NET_EPOCH_ASSERT(); 2625 2626 ifp = sc->vxl_ifp; 2627 srcaddr = &sc->vxl_src_addr.in6.sin6_addr; 2628 srcport = vxlan_pick_source_port(sc, m); 2629 dstaddr = &fvxlsa->in6.sin6_addr; 2630 dstport = fvxlsa->in6.sin6_port; 2631 2632 M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr), 2633 M_NOWAIT); 2634 if (m == NULL) { 2635 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2636 return (ENOBUFS); 2637 } 2638 2639 len = m->m_pkthdr.len; 2640 2641 ip6 = mtod(m, struct ip6_hdr *); 2642 ip6->ip6_flow = 0; /* BMV: Keep in forwarding entry? */ 2643 ip6->ip6_vfc = IPV6_VERSION; 2644 ip6->ip6_plen = 0; 2645 ip6->ip6_nxt = IPPROTO_UDP; 2646 ip6->ip6_hlim = sc->vxl_ttl; 2647 ip6->ip6_src = *srcaddr; 2648 ip6->ip6_dst = *dstaddr; 2649 2650 vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport); 2651 2652 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 2653 m->m_flags &= ~(M_MCAST | M_BCAST); 2654 2655 ro = NULL; 2656 m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX; 2657 if (m->m_pkthdr.csum_flags != 0) { 2658 /* 2659 * HW checksum (L3 and/or L4) or TSO has been requested. Look 2660 * up the ifnet for the outbound route and verify that the 2661 * outbound ifnet can perform the requested operation on the 2662 * inner frame. 2663 */ 2664 bzero(&route, sizeof(route)); 2665 ro = &route; 2666 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 2667 sin6->sin6_family = AF_INET6; 2668 sin6->sin6_len = sizeof(*sin6); 2669 sin6->sin6_addr = ip6->ip6_dst; 2670 ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0, 2671 NHR_NONE, 0); 2672 if (ro->ro_nh == NULL) { 2673 m_freem(m); 2674 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2675 return (EHOSTUNREACH); 2676 } 2677 2678 csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags, 2679 CSUM_ENCAP_VXLAN); 2680 if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) != 2681 csum_flags) { 2682 if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) { 2683 const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp; 2684 2685 if_printf(ifp, "interface %s is missing hwcaps " 2686 "0x%08x, csum_flags 0x%08x -> 0x%08x, " 2687 "hwassist 0x%08x\n", nh_ifp->if_xname, 2688 csum_flags & ~(uint32_t)nh_ifp->if_hwassist, 2689 m->m_pkthdr.csum_flags, csum_flags, 2690 (uint32_t)nh_ifp->if_hwassist); 2691 } 2692 m_freem(m); 2693 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2694 return (ENXIO); 2695 } 2696 m->m_pkthdr.csum_flags = csum_flags; 2697 if (csum_flags & 2698 (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP | 2699 CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) { 2700 counter_u64_add(sc->vxl_stats.txcsum, 1); 2701 if (csum_flags & CSUM_INNER_TSO) 2702 counter_u64_add(sc->vxl_stats.tso, 1); 2703 } 2704 } else if (ntohs(dstport) != V_zero_checksum_port) { 2705 struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr)); 2706 2707 hdr->uh_sum = in6_cksum_pseudo(ip6, 2708 m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0); 2709 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 2710 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 2711 } 2712 error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL); 2713 if (error == 0) { 2714 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2715 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 2716 if (mcast != 0) 2717 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 2718 } else 2719 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2720 2721 return (error); 2722 #else 2723 m_freem(m); 2724 return (ENOTSUP); 2725 #endif 2726 } 2727 2728 static int 2729 vxlan_transmit(struct ifnet *ifp, struct mbuf *m) 2730 { 2731 struct rm_priotracker tracker; 2732 union vxlan_sockaddr vxlsa; 2733 struct vxlan_softc *sc; 2734 struct vxlan_ftable_entry *fe; 2735 struct ifnet *mcifp; 2736 struct ether_header *eh; 2737 int ipv4, error; 2738 2739 sc = ifp->if_softc; 2740 eh = mtod(m, struct ether_header *); 2741 fe = NULL; 2742 mcifp = NULL; 2743 2744 ETHER_BPF_MTAP(ifp, m); 2745 2746 VXLAN_RLOCK(sc, &tracker); 2747 M_SETFIB(m, sc->vxl_fibnum); 2748 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2749 VXLAN_RUNLOCK(sc, &tracker); 2750 m_freem(m); 2751 return (ENETDOWN); 2752 } 2753 2754 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) 2755 fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost); 2756 if (fe == NULL) 2757 fe = &sc->vxl_default_fe; 2758 vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa); 2759 2760 ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0; 2761 if (vxlan_sockaddr_in_multicast(&vxlsa) != 0) 2762 mcifp = vxlan_multicast_if_ref(sc, ipv4); 2763 2764 VXLAN_ACQUIRE(sc); 2765 VXLAN_RUNLOCK(sc, &tracker); 2766 2767 if (ipv4 != 0) 2768 error = vxlan_encap4(sc, &vxlsa, m); 2769 else 2770 error = vxlan_encap6(sc, &vxlsa, m); 2771 2772 vxlan_release(sc); 2773 if (mcifp != NULL) 2774 if_rele(mcifp); 2775 2776 return (error); 2777 } 2778 2779 static void 2780 vxlan_qflush(struct ifnet *ifp __unused) 2781 { 2782 } 2783 2784 static bool 2785 vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb, 2786 const struct sockaddr *srcsa, void *xvso) 2787 { 2788 struct vxlan_socket *vso; 2789 struct vxlan_header *vxh, vxlanhdr; 2790 uint32_t vni; 2791 int error __unused; 2792 2793 M_ASSERTPKTHDR(m); 2794 vso = xvso; 2795 offset += sizeof(struct udphdr); 2796 2797 if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header)) 2798 goto out; 2799 2800 if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) { 2801 m_copydata(m, offset, sizeof(struct vxlan_header), 2802 (caddr_t) &vxlanhdr); 2803 vxh = &vxlanhdr; 2804 } else 2805 vxh = mtodo(m, offset); 2806 2807 /* 2808 * Drop if there is a reserved bit set in either the flags or VNI 2809 * fields of the header. This goes against the specification, but 2810 * a bit set may indicate an unsupported new feature. This matches 2811 * the behavior of the Linux implementation. 2812 */ 2813 if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) || 2814 vxh->vxlh_vni & ~VXLAN_VNI_MASK) 2815 goto out; 2816 2817 vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT; 2818 2819 /* Adjust to the start of the inner Ethernet frame. */ 2820 m_adj_decap(m, offset + sizeof(struct vxlan_header)); 2821 2822 error = vxlan_input(vso, vni, &m, srcsa); 2823 MPASS(error != 0 || m == NULL); 2824 2825 out: 2826 if (m != NULL) 2827 m_freem(m); 2828 2829 return (true); 2830 } 2831 2832 static int 2833 vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0, 2834 const struct sockaddr *sa) 2835 { 2836 struct vxlan_softc *sc; 2837 struct ifnet *ifp; 2838 struct mbuf *m; 2839 struct ether_header *eh; 2840 int error; 2841 2842 m = *m0; 2843 2844 if (m->m_pkthdr.len < ETHER_HDR_LEN) 2845 return (EINVAL); 2846 2847 sc = vxlan_socket_lookup_softc(vso, vni); 2848 if (sc == NULL) 2849 return (ENOENT); 2850 2851 ifp = sc->vxl_ifp; 2852 if (m->m_len < ETHER_HDR_LEN && 2853 (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) { 2854 *m0 = NULL; 2855 error = ENOBUFS; 2856 goto out; 2857 } 2858 eh = mtod(m, struct ether_header *); 2859 2860 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2861 error = ENETDOWN; 2862 goto out; 2863 } else if (ifp == m->m_pkthdr.rcvif) { 2864 /* XXX Does not catch more complex loops. */ 2865 error = EDEADLK; 2866 goto out; 2867 } 2868 2869 if (sc->vxl_flags & VXLAN_FLAG_LEARN) 2870 vxlan_ftable_learn(sc, sa, eh->ether_shost); 2871 2872 m_clrprotoflags(m); 2873 m->m_pkthdr.rcvif = ifp; 2874 M_SETFIB(m, ifp->if_fib); 2875 if (((ifp->if_capenable & IFCAP_RXCSUM && 2876 m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) || 2877 (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 2878 !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) { 2879 uint32_t csum_flags = 0; 2880 2881 if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) 2882 csum_flags |= CSUM_L3_CALC; 2883 if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID) 2884 csum_flags |= CSUM_L3_VALID; 2885 if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC) 2886 csum_flags |= CSUM_L4_CALC; 2887 if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID) 2888 csum_flags |= CSUM_L4_VALID; 2889 m->m_pkthdr.csum_flags = csum_flags; 2890 counter_u64_add(sc->vxl_stats.rxcsum, 1); 2891 } else { 2892 /* clear everything */ 2893 m->m_pkthdr.csum_flags = 0; 2894 m->m_pkthdr.csum_data = 0; 2895 } 2896 2897 (*ifp->if_input)(ifp, m); 2898 *m0 = NULL; 2899 error = 0; 2900 2901 out: 2902 vxlan_release(sc); 2903 return (error); 2904 } 2905 2906 static int 2907 vxlan_stats_alloc(struct vxlan_softc *sc) 2908 { 2909 struct vxlan_statistics *stats = &sc->vxl_stats; 2910 2911 stats->txcsum = counter_u64_alloc(M_WAITOK); 2912 if (stats->txcsum == NULL) 2913 goto failed; 2914 2915 stats->tso = counter_u64_alloc(M_WAITOK); 2916 if (stats->tso == NULL) 2917 goto failed; 2918 2919 stats->rxcsum = counter_u64_alloc(M_WAITOK); 2920 if (stats->rxcsum == NULL) 2921 goto failed; 2922 2923 return (0); 2924 failed: 2925 vxlan_stats_free(sc); 2926 return (ENOMEM); 2927 } 2928 2929 static void 2930 vxlan_stats_free(struct vxlan_softc *sc) 2931 { 2932 struct vxlan_statistics *stats = &sc->vxl_stats; 2933 2934 if (stats->txcsum != NULL) { 2935 counter_u64_free(stats->txcsum); 2936 stats->txcsum = NULL; 2937 } 2938 if (stats->tso != NULL) { 2939 counter_u64_free(stats->tso); 2940 stats->tso = NULL; 2941 } 2942 if (stats->rxcsum != NULL) { 2943 counter_u64_free(stats->rxcsum); 2944 stats->rxcsum = NULL; 2945 } 2946 } 2947 2948 static void 2949 vxlan_set_default_config(struct vxlan_softc *sc) 2950 { 2951 2952 sc->vxl_flags |= VXLAN_FLAG_LEARN; 2953 2954 sc->vxl_vni = VXLAN_VNI_MAX; 2955 sc->vxl_ttl = IPDEFTTL; 2956 2957 if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) { 2958 sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT); 2959 sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT); 2960 } else { 2961 sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT); 2962 sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT); 2963 } 2964 2965 sc->vxl_min_port = V_ipport_firstauto; 2966 sc->vxl_max_port = V_ipport_lastauto; 2967 2968 sc->vxl_ftable_max = VXLAN_FTABLE_MAX; 2969 sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT; 2970 } 2971 2972 static int 2973 vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp) 2974 { 2975 2976 #ifndef INET 2977 if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 | 2978 VXLAN_PARAM_WITH_REMOTE_ADDR4)) 2979 return (EAFNOSUPPORT); 2980 #endif 2981 2982 #ifndef INET6 2983 if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 | 2984 VXLAN_PARAM_WITH_REMOTE_ADDR6)) 2985 return (EAFNOSUPPORT); 2986 #else 2987 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) { 2988 int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa); 2989 if (error) 2990 return (error); 2991 } 2992 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) { 2993 int error = vxlan_sockaddr_in6_embedscope( 2994 &vxlp->vxlp_remote_sa); 2995 if (error) 2996 return (error); 2997 } 2998 #endif 2999 3000 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) { 3001 if (vxlan_check_vni(vxlp->vxlp_vni) == 0) 3002 sc->vxl_vni = vxlp->vxlp_vni; 3003 } 3004 3005 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) { 3006 sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in); 3007 sc->vxl_src_addr.in4.sin_family = AF_INET; 3008 sc->vxl_src_addr.in4.sin_addr = 3009 vxlp->vxlp_local_sa.in4.sin_addr; 3010 } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) { 3011 sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6); 3012 sc->vxl_src_addr.in6.sin6_family = AF_INET6; 3013 sc->vxl_src_addr.in6.sin6_addr = 3014 vxlp->vxlp_local_sa.in6.sin6_addr; 3015 } 3016 3017 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) { 3018 sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in); 3019 sc->vxl_dst_addr.in4.sin_family = AF_INET; 3020 sc->vxl_dst_addr.in4.sin_addr = 3021 vxlp->vxlp_remote_sa.in4.sin_addr; 3022 } else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) { 3023 sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6); 3024 sc->vxl_dst_addr.in6.sin6_family = AF_INET6; 3025 sc->vxl_dst_addr.in6.sin6_addr = 3026 vxlp->vxlp_remote_sa.in6.sin6_addr; 3027 } 3028 3029 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT) 3030 sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port); 3031 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT) 3032 sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port); 3033 3034 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) { 3035 if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) { 3036 sc->vxl_min_port = vxlp->vxlp_min_port; 3037 sc->vxl_max_port = vxlp->vxlp_max_port; 3038 } 3039 } 3040 3041 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF) 3042 strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ); 3043 3044 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) { 3045 if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0) 3046 sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout; 3047 } 3048 3049 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) { 3050 if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0) 3051 sc->vxl_ftable_max = vxlp->vxlp_ftable_max; 3052 } 3053 3054 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) { 3055 if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0) 3056 sc->vxl_ttl = vxlp->vxlp_ttl; 3057 } 3058 3059 if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) { 3060 if (vxlp->vxlp_learn == 0) 3061 sc->vxl_flags &= ~VXLAN_FLAG_LEARN; 3062 } 3063 3064 return (0); 3065 } 3066 3067 static int 3068 vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap) 3069 { 3070 int mask = reqcap ^ ifp->if_capenable; 3071 3072 /* Disable TSO if tx checksums are disabled. */ 3073 if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) && 3074 reqcap & IFCAP_TSO4) { 3075 reqcap &= ~IFCAP_TSO4; 3076 if_printf(ifp, "tso4 disabled due to -txcsum.\n"); 3077 } 3078 if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) && 3079 reqcap & IFCAP_TSO6) { 3080 reqcap &= ~IFCAP_TSO6; 3081 if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); 3082 } 3083 3084 /* Do not enable TSO if tx checksums are disabled. */ 3085 if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 && 3086 !(reqcap & IFCAP_TXCSUM)) { 3087 if_printf(ifp, "enable txcsum first.\n"); 3088 return (EAGAIN); 3089 } 3090 if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 && 3091 !(reqcap & IFCAP_TXCSUM_IPV6)) { 3092 if_printf(ifp, "enable txcsum6 first.\n"); 3093 return (EAGAIN); 3094 } 3095 3096 sc->vxl_reqcap = reqcap; 3097 return (0); 3098 } 3099 3100 /* 3101 * A VXLAN interface inherits the capabilities of the vxlandev or the interface 3102 * hosting the vxlanlocal address. 3103 */ 3104 static void 3105 vxlan_set_hwcaps(struct vxlan_softc *sc) 3106 { 3107 struct epoch_tracker et; 3108 struct ifnet *p; 3109 struct ifaddr *ifa; 3110 u_long hwa; 3111 int cap, ena; 3112 bool rel; 3113 struct ifnet *ifp = sc->vxl_ifp; 3114 3115 /* reset caps */ 3116 ifp->if_capabilities &= VXLAN_BASIC_IFCAPS; 3117 ifp->if_capenable &= VXLAN_BASIC_IFCAPS; 3118 ifp->if_hwassist = 0; 3119 3120 NET_EPOCH_ENTER(et); 3121 CURVNET_SET(ifp->if_vnet); 3122 3123 rel = false; 3124 p = NULL; 3125 if (sc->vxl_mc_ifname[0] != '\0') { 3126 rel = true; 3127 p = ifunit_ref(sc->vxl_mc_ifname); 3128 } else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) { 3129 if (sc->vxl_src_addr.sa.sa_family == AF_INET) { 3130 struct sockaddr_in in4 = sc->vxl_src_addr.in4; 3131 3132 in4.sin_port = 0; 3133 ifa = ifa_ifwithaddr((struct sockaddr *)&in4); 3134 if (ifa != NULL) 3135 p = ifa->ifa_ifp; 3136 } else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) { 3137 struct sockaddr_in6 in6 = sc->vxl_src_addr.in6; 3138 3139 in6.sin6_port = 0; 3140 ifa = ifa_ifwithaddr((struct sockaddr *)&in6); 3141 if (ifa != NULL) 3142 p = ifa->ifa_ifp; 3143 } 3144 } 3145 if (p == NULL) 3146 goto done; 3147 3148 cap = ena = hwa = 0; 3149 3150 /* checksum offload */ 3151 if (p->if_capabilities & IFCAP_VXLAN_HWCSUM) 3152 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 3153 if (p->if_capenable & IFCAP_VXLAN_HWCSUM) { 3154 ena |= sc->vxl_reqcap & p->if_capenable & 3155 (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 3156 if (ena & IFCAP_TXCSUM) { 3157 if (p->if_hwassist & CSUM_INNER_IP) 3158 hwa |= CSUM_IP; 3159 if (p->if_hwassist & CSUM_INNER_IP_UDP) 3160 hwa |= CSUM_IP_UDP; 3161 if (p->if_hwassist & CSUM_INNER_IP_TCP) 3162 hwa |= CSUM_IP_TCP; 3163 } 3164 if (ena & IFCAP_TXCSUM_IPV6) { 3165 if (p->if_hwassist & CSUM_INNER_IP6_UDP) 3166 hwa |= CSUM_IP6_UDP; 3167 if (p->if_hwassist & CSUM_INNER_IP6_TCP) 3168 hwa |= CSUM_IP6_TCP; 3169 } 3170 } 3171 3172 /* hardware TSO */ 3173 if (p->if_capabilities & IFCAP_VXLAN_HWTSO) { 3174 cap |= p->if_capabilities & IFCAP_TSO; 3175 if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen) 3176 ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen; 3177 else 3178 ifp->if_hw_tsomax = p->if_hw_tsomax; 3179 /* XXX: tsomaxsegcount decrement is cxgbe specific */ 3180 ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1; 3181 ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize; 3182 } 3183 if (p->if_capenable & IFCAP_VXLAN_HWTSO) { 3184 ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO; 3185 if (ena & IFCAP_TSO) { 3186 if (p->if_hwassist & CSUM_INNER_IP_TSO) 3187 hwa |= CSUM_IP_TSO; 3188 if (p->if_hwassist & CSUM_INNER_IP6_TSO) 3189 hwa |= CSUM_IP6_TSO; 3190 } 3191 } 3192 3193 ifp->if_capabilities |= cap; 3194 ifp->if_capenable |= ena; 3195 ifp->if_hwassist |= hwa; 3196 if (rel) 3197 if_rele(p); 3198 done: 3199 CURVNET_RESTORE(); 3200 NET_EPOCH_EXIT(et); 3201 } 3202 3203 static int 3204 vxlan_clone_create(struct if_clone *ifc, char *name, size_t len, 3205 struct ifc_data *ifd, struct ifnet **ifpp) 3206 { 3207 struct vxlan_softc *sc; 3208 struct ifnet *ifp; 3209 struct ifvxlanparam vxlp; 3210 int error; 3211 3212 sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO); 3213 sc->vxl_unit = ifd->unit; 3214 sc->vxl_fibnum = curthread->td_proc->p_fibnum; 3215 vxlan_set_default_config(sc); 3216 error = vxlan_stats_alloc(sc); 3217 if (error != 0) 3218 goto fail; 3219 3220 if (ifd->params != NULL) { 3221 error = ifc_copyin(ifd, &vxlp, sizeof(vxlp)); 3222 if (error) 3223 goto fail; 3224 3225 error = vxlan_set_user_config(sc, &vxlp); 3226 if (error) 3227 goto fail; 3228 } 3229 3230 ifp = if_alloc(IFT_ETHER); 3231 if (ifp == NULL) { 3232 error = ENOSPC; 3233 goto fail; 3234 } 3235 3236 sc->vxl_ifp = ifp; 3237 rm_init(&sc->vxl_lock, "vxlanrm"); 3238 callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0); 3239 sc->vxl_port_hash_key = arc4random(); 3240 vxlan_ftable_init(sc); 3241 3242 vxlan_sysctl_setup(sc); 3243 3244 ifp->if_softc = sc; 3245 if_initname(ifp, vxlan_name, ifd->unit); 3246 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3247 ifp->if_init = vxlan_init; 3248 ifp->if_ioctl = vxlan_ioctl; 3249 ifp->if_transmit = vxlan_transmit; 3250 ifp->if_qflush = vxlan_qflush; 3251 ifp->if_capabilities = VXLAN_BASIC_IFCAPS; 3252 ifp->if_capenable = VXLAN_BASIC_IFCAPS; 3253 sc->vxl_reqcap = -1; 3254 vxlan_set_hwcaps(sc); 3255 3256 ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status); 3257 ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL); 3258 ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO); 3259 3260 ether_gen_addr(ifp, &sc->vxl_hwaddr); 3261 ether_ifattach(ifp, sc->vxl_hwaddr.octet); 3262 3263 ifp->if_baudrate = 0; 3264 3265 VXLAN_WLOCK(sc); 3266 vxlan_setup_interface_hdrlen(sc); 3267 VXLAN_WUNLOCK(sc); 3268 *ifpp = ifp; 3269 3270 return (0); 3271 3272 fail: 3273 free(sc, M_VXLAN); 3274 return (error); 3275 } 3276 3277 static int 3278 vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) 3279 { 3280 struct vxlan_softc *sc; 3281 3282 sc = ifp->if_softc; 3283 3284 vxlan_teardown(sc); 3285 3286 vxlan_ftable_flush(sc, 1); 3287 3288 ether_ifdetach(ifp); 3289 if_free(ifp); 3290 ifmedia_removeall(&sc->vxl_media); 3291 3292 vxlan_ftable_fini(sc); 3293 3294 vxlan_sysctl_destroy(sc); 3295 rm_destroy(&sc->vxl_lock); 3296 vxlan_stats_free(sc); 3297 free(sc, M_VXLAN); 3298 3299 return (0); 3300 } 3301 3302 /* BMV: Taken from if_bridge. */ 3303 static uint32_t 3304 vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr) 3305 { 3306 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key; 3307 3308 b += addr[5] << 8; 3309 b += addr[4]; 3310 a += addr[3] << 24; 3311 a += addr[2] << 16; 3312 a += addr[1] << 8; 3313 a += addr[0]; 3314 3315 /* 3316 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 3317 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 3318 */ 3319 #define mix(a, b, c) \ 3320 do { \ 3321 a -= b; a -= c; a ^= (c >> 13); \ 3322 b -= c; b -= a; b ^= (a << 8); \ 3323 c -= a; c -= b; c ^= (b >> 13); \ 3324 a -= b; a -= c; a ^= (c >> 12); \ 3325 b -= c; b -= a; b ^= (a << 16); \ 3326 c -= a; c -= b; c ^= (b >> 5); \ 3327 a -= b; a -= c; a ^= (c >> 3); \ 3328 b -= c; b -= a; b ^= (a << 10); \ 3329 c -= a; c -= b; c ^= (b >> 15); \ 3330 } while (0) 3331 3332 mix(a, b, c); 3333 3334 #undef mix 3335 3336 return (c); 3337 } 3338 3339 static int 3340 vxlan_media_change(struct ifnet *ifp) 3341 { 3342 3343 /* Ignore. */ 3344 return (0); 3345 } 3346 3347 static void 3348 vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 3349 { 3350 3351 ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID; 3352 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 3353 } 3354 3355 static int 3356 vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr, 3357 const struct sockaddr *sa) 3358 { 3359 3360 return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len)); 3361 } 3362 3363 static void 3364 vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr, 3365 const struct sockaddr *sa) 3366 { 3367 3368 MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6); 3369 bzero(vxladdr, sizeof(*vxladdr)); 3370 3371 if (sa->sa_family == AF_INET) { 3372 vxladdr->in4 = *satoconstsin(sa); 3373 vxladdr->in4.sin_len = sizeof(struct sockaddr_in); 3374 } else if (sa->sa_family == AF_INET6) { 3375 vxladdr->in6 = *satoconstsin6(sa); 3376 vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6); 3377 } 3378 } 3379 3380 static int 3381 vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr, 3382 const struct sockaddr *sa) 3383 { 3384 int equal; 3385 3386 if (sa->sa_family == AF_INET) { 3387 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr; 3388 equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr; 3389 } else if (sa->sa_family == AF_INET6) { 3390 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr; 3391 equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr); 3392 } else 3393 equal = 0; 3394 3395 return (equal); 3396 } 3397 3398 static void 3399 vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr, 3400 const struct sockaddr *sa) 3401 { 3402 3403 MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6); 3404 3405 if (sa->sa_family == AF_INET) { 3406 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr; 3407 vxladdr->in4.sin_family = AF_INET; 3408 vxladdr->in4.sin_len = sizeof(struct sockaddr_in); 3409 vxladdr->in4.sin_addr = *in4; 3410 } else if (sa->sa_family == AF_INET6) { 3411 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr; 3412 vxladdr->in6.sin6_family = AF_INET6; 3413 vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6); 3414 vxladdr->in6.sin6_addr = *in6; 3415 } 3416 } 3417 3418 static int 3419 vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec) 3420 { 3421 const struct sockaddr *sa; 3422 int supported; 3423 3424 sa = &vxladdr->sa; 3425 supported = 0; 3426 3427 if (sa->sa_family == AF_UNSPEC && unspec != 0) { 3428 supported = 1; 3429 } else if (sa->sa_family == AF_INET) { 3430 #ifdef INET 3431 supported = 1; 3432 #endif 3433 } else if (sa->sa_family == AF_INET6) { 3434 #ifdef INET6 3435 supported = 1; 3436 #endif 3437 } 3438 3439 return (supported); 3440 } 3441 3442 static int 3443 vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr) 3444 { 3445 const struct sockaddr *sa; 3446 int any; 3447 3448 sa = &vxladdr->sa; 3449 3450 if (sa->sa_family == AF_INET) { 3451 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr; 3452 any = in4->s_addr == INADDR_ANY; 3453 } else if (sa->sa_family == AF_INET6) { 3454 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr; 3455 any = IN6_IS_ADDR_UNSPECIFIED(in6); 3456 } else 3457 any = -1; 3458 3459 return (any); 3460 } 3461 3462 static int 3463 vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr) 3464 { 3465 const struct sockaddr *sa; 3466 int mc; 3467 3468 sa = &vxladdr->sa; 3469 3470 if (sa->sa_family == AF_INET) { 3471 const struct in_addr *in4 = &satoconstsin(sa)->sin_addr; 3472 mc = IN_MULTICAST(ntohl(in4->s_addr)); 3473 } else if (sa->sa_family == AF_INET6) { 3474 const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr; 3475 mc = IN6_IS_ADDR_MULTICAST(in6); 3476 } else 3477 mc = -1; 3478 3479 return (mc); 3480 } 3481 3482 static int 3483 vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr) 3484 { 3485 int error; 3486 3487 MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr)); 3488 #ifdef INET6 3489 error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone); 3490 #else 3491 error = EAFNOSUPPORT; 3492 #endif 3493 3494 return (error); 3495 } 3496 3497 static int 3498 vxlan_can_change_config(struct vxlan_softc *sc) 3499 { 3500 struct ifnet *ifp; 3501 3502 ifp = sc->vxl_ifp; 3503 VXLAN_LOCK_ASSERT(sc); 3504 3505 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3506 return (0); 3507 if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN)) 3508 return (0); 3509 3510 return (1); 3511 } 3512 3513 static int 3514 vxlan_check_vni(uint32_t vni) 3515 { 3516 3517 return (vni >= VXLAN_VNI_MAX); 3518 } 3519 3520 static int 3521 vxlan_check_ttl(int ttl) 3522 { 3523 3524 return (ttl > MAXTTL); 3525 } 3526 3527 static int 3528 vxlan_check_ftable_timeout(uint32_t timeout) 3529 { 3530 3531 return (timeout > VXLAN_FTABLE_MAX_TIMEOUT); 3532 } 3533 3534 static int 3535 vxlan_check_ftable_max(uint32_t max) 3536 { 3537 3538 return (max > VXLAN_FTABLE_MAX); 3539 } 3540 3541 static void 3542 vxlan_sysctl_setup(struct vxlan_softc *sc) 3543 { 3544 struct sysctl_ctx_list *ctx; 3545 struct sysctl_oid *node; 3546 struct vxlan_statistics *stats; 3547 char namebuf[8]; 3548 3549 ctx = &sc->vxl_sysctl_ctx; 3550 stats = &sc->vxl_stats; 3551 snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit); 3552 3553 sysctl_ctx_init(ctx); 3554 sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx, 3555 SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf, 3556 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 3557 3558 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node), 3559 OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 3560 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count", 3561 CTLFLAG_RD, &sc->vxl_ftable_cnt, 0, 3562 "Number of entries in forwarding table"); 3563 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max", 3564 CTLFLAG_RD, &sc->vxl_ftable_max, 0, 3565 "Maximum number of entries allowed in forwarding table"); 3566 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout", 3567 CTLFLAG_RD, &sc->vxl_ftable_timeout, 0, 3568 "Number of seconds between prunes of the forwarding table"); 3569 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump", 3570 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP, 3571 sc, 0, vxlan_ftable_sysctl_dump, "A", 3572 "Dump the forwarding table entries"); 3573 3574 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node), 3575 OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 3576 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, 3577 "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0, 3578 "Fowarding table reached maximum entries"); 3579 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, 3580 "ftable_lock_upgrade_failed", CTLFLAG_RD, 3581 &stats->ftable_lock_upgrade_failed, 0, 3582 "Forwarding table update required lock upgrade"); 3583 3584 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum", 3585 CTLFLAG_RD, &stats->txcsum, 3586 "# of times hardware assisted with tx checksum"); 3587 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso", 3588 CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO"); 3589 SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum", 3590 CTLFLAG_RD, &stats->rxcsum, 3591 "# of times hardware assisted with rx checksum"); 3592 } 3593 3594 static void 3595 vxlan_sysctl_destroy(struct vxlan_softc *sc) 3596 { 3597 3598 sysctl_ctx_free(&sc->vxl_sysctl_ctx); 3599 sc->vxl_sysctl_node = NULL; 3600 } 3601 3602 static int 3603 vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def) 3604 { 3605 char path[64]; 3606 3607 snprintf(path, sizeof(path), "net.link.vxlan.%d.%s", 3608 sc->vxl_unit, knob); 3609 TUNABLE_INT_FETCH(path, &def); 3610 3611 return (def); 3612 } 3613 3614 static void 3615 vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp) 3616 { 3617 struct vxlan_softc_head list; 3618 struct vxlan_socket *vso; 3619 struct vxlan_softc *sc, *tsc; 3620 3621 LIST_INIT(&list); 3622 3623 if (ifp->if_flags & IFF_RENAMING) 3624 return; 3625 if ((ifp->if_flags & IFF_MULTICAST) == 0) 3626 return; 3627 3628 VXLAN_LIST_LOCK(); 3629 LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) 3630 vxlan_socket_ifdetach(vso, ifp, &list); 3631 VXLAN_LIST_UNLOCK(); 3632 3633 LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) { 3634 LIST_REMOVE(sc, vxl_ifdetach_list); 3635 3636 sx_xlock(&vxlan_sx); 3637 VXLAN_WLOCK(sc); 3638 if (sc->vxl_flags & VXLAN_FLAG_INIT) 3639 vxlan_init_wait(sc); 3640 vxlan_teardown_locked(sc); 3641 sx_xunlock(&vxlan_sx); 3642 } 3643 } 3644 3645 static void 3646 vxlan_load(void) 3647 { 3648 3649 mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF); 3650 LIST_INIT(&vxlan_socket_list); 3651 vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 3652 vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY); 3653 3654 struct if_clone_addreq req = { 3655 .create_f = vxlan_clone_create, 3656 .destroy_f = vxlan_clone_destroy, 3657 .flags = IFC_F_AUTOUNIT, 3658 }; 3659 vxlan_cloner = ifc_attach_cloner(vxlan_name, &req); 3660 } 3661 3662 static void 3663 vxlan_unload(void) 3664 { 3665 3666 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 3667 vxlan_ifdetach_event_tag); 3668 ifc_detach_cloner(vxlan_cloner); 3669 mtx_destroy(&vxlan_list_mtx); 3670 MPASS(LIST_EMPTY(&vxlan_socket_list)); 3671 } 3672 3673 static int 3674 vxlan_modevent(module_t mod, int type, void *unused) 3675 { 3676 int error; 3677 3678 error = 0; 3679 3680 switch (type) { 3681 case MOD_LOAD: 3682 vxlan_load(); 3683 break; 3684 case MOD_UNLOAD: 3685 vxlan_unload(); 3686 break; 3687 default: 3688 error = ENOTSUP; 3689 break; 3690 } 3691 3692 return (error); 3693 } 3694 3695 static moduledata_t vxlan_mod = { 3696 "if_vxlan", 3697 vxlan_modevent, 3698 0 3699 }; 3700 3701 DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3702 MODULE_VERSION(if_vxlan, 1); 3703