xref: /freebsd/sys/net/if_vxlan.c (revision dd3603749cb7f20a628f04d595b105962b21a3d2)
1 /*-
2  * Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  * Copyright (c) 2020, Chelsio Communications.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 
31 #include <sys/param.h>
32 #include <sys/eventhandler.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/hash.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/refcount.h>
40 #include <sys/rmlock.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 #include <sys/queue.h>
44 #include <sys/sbuf.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <net/bpf.h>
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56 #include <net/if_clone.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 #include <net/if_vxlan.h>
61 #include <net/netisr.h>
62 #include <net/route.h>
63 #include <net/route/nhop.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip6.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/in_fib.h>
75 #include <netinet6/in6_fib.h>
76 
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/scope6_var.h>
79 
80 struct vxlan_softc;
81 LIST_HEAD(vxlan_softc_head, vxlan_softc);
82 
83 struct sx vxlan_sx;
84 SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
85 
86 struct vxlan_socket_mc_info {
87 	union vxlan_sockaddr		 vxlsomc_saddr;
88 	union vxlan_sockaddr		 vxlsomc_gaddr;
89 	int				 vxlsomc_ifidx;
90 	int				 vxlsomc_users;
91 };
92 
93 /*
94  * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
95  */
96 #define VXLAN_MAX_MTU	(IP_MAXPACKET - \
97 		60 /* Maximum IPv4 header len */ - \
98 		sizeof(struct udphdr) - \
99 		sizeof(struct vxlan_header) - \
100 		ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN)
101 #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
102 
103 #define VXLAN_SO_MC_MAX_GROUPS		32
104 
105 #define VXLAN_SO_VNI_HASH_SHIFT		6
106 #define VXLAN_SO_VNI_HASH_SIZE		(1 << VXLAN_SO_VNI_HASH_SHIFT)
107 #define VXLAN_SO_VNI_HASH(_vni)		((_vni) % VXLAN_SO_VNI_HASH_SIZE)
108 
109 struct vxlan_socket {
110 	struct socket			*vxlso_sock;
111 	struct rmlock			 vxlso_lock;
112 	u_int				 vxlso_refcnt;
113 	union vxlan_sockaddr		 vxlso_laddr;
114 	LIST_ENTRY(vxlan_socket)	 vxlso_entry;
115 	struct vxlan_softc_head		 vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
116 	struct vxlan_socket_mc_info	 vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
117 };
118 
119 #define VXLAN_SO_RLOCK(_vso, _p)	rm_rlock(&(_vso)->vxlso_lock, (_p))
120 #define VXLAN_SO_RUNLOCK(_vso, _p)	rm_runlock(&(_vso)->vxlso_lock, (_p))
121 #define VXLAN_SO_WLOCK(_vso)		rm_wlock(&(_vso)->vxlso_lock)
122 #define VXLAN_SO_WUNLOCK(_vso)		rm_wunlock(&(_vso)->vxlso_lock)
123 #define VXLAN_SO_LOCK_ASSERT(_vso) \
124     rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
125 #define VXLAN_SO_LOCK_WASSERT(_vso) \
126     rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
127 
128 #define VXLAN_SO_ACQUIRE(_vso)		refcount_acquire(&(_vso)->vxlso_refcnt)
129 #define VXLAN_SO_RELEASE(_vso)		refcount_release(&(_vso)->vxlso_refcnt)
130 
131 struct vxlan_ftable_entry {
132 	LIST_ENTRY(vxlan_ftable_entry)	 vxlfe_hash;
133 	uint16_t			 vxlfe_flags;
134 	uint8_t				 vxlfe_mac[ETHER_ADDR_LEN];
135 	union vxlan_sockaddr		 vxlfe_raddr;
136 	time_t				 vxlfe_expire;
137 };
138 
139 #define VXLAN_FE_FLAG_DYNAMIC		0x01
140 #define VXLAN_FE_FLAG_STATIC		0x02
141 
142 #define VXLAN_FE_IS_DYNAMIC(_fe) \
143     ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
144 
145 #define VXLAN_SC_FTABLE_SHIFT		9
146 #define VXLAN_SC_FTABLE_SIZE		(1 << VXLAN_SC_FTABLE_SHIFT)
147 #define VXLAN_SC_FTABLE_MASK		(VXLAN_SC_FTABLE_SIZE - 1)
148 #define VXLAN_SC_FTABLE_HASH(_sc, _mac)	\
149     (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
150 
151 LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
152 
153 struct vxlan_statistics {
154 	uint32_t	ftable_nospace;
155 	uint32_t	ftable_lock_upgrade_failed;
156 	counter_u64_t	txcsum;
157 	counter_u64_t	tso;
158 	counter_u64_t	rxcsum;
159 };
160 
161 struct vxlan_softc {
162 	struct ifnet			*vxl_ifp;
163 	int				 vxl_reqcap;
164 	u_int				 vxl_fibnum;
165 	struct vxlan_socket		*vxl_sock;
166 	uint32_t			 vxl_vni;
167 	union vxlan_sockaddr		 vxl_src_addr;
168 	union vxlan_sockaddr		 vxl_dst_addr;
169 	uint32_t			 vxl_flags;
170 #define VXLAN_FLAG_INIT		0x0001
171 #define VXLAN_FLAG_TEARDOWN	0x0002
172 #define VXLAN_FLAG_LEARN	0x0004
173 #define VXLAN_FLAG_USER_MTU	0x0008
174 
175 	uint32_t			 vxl_port_hash_key;
176 	uint16_t			 vxl_min_port;
177 	uint16_t			 vxl_max_port;
178 	uint8_t				 vxl_ttl;
179 
180 	/* Lookup table from MAC address to forwarding entry. */
181 	uint32_t			 vxl_ftable_cnt;
182 	uint32_t			 vxl_ftable_max;
183 	uint32_t			 vxl_ftable_timeout;
184 	uint32_t			 vxl_ftable_hash_key;
185 	struct vxlan_ftable_head	*vxl_ftable;
186 
187 	/* Derived from vxl_dst_addr. */
188 	struct vxlan_ftable_entry	 vxl_default_fe;
189 
190 	struct ip_moptions		*vxl_im4o;
191 	struct ip6_moptions		*vxl_im6o;
192 
193 	struct rmlock			 vxl_lock;
194 	volatile u_int			 vxl_refcnt;
195 
196 	int				 vxl_unit;
197 	int				 vxl_vso_mc_index;
198 	struct vxlan_statistics		 vxl_stats;
199 	struct sysctl_oid		*vxl_sysctl_node;
200 	struct sysctl_ctx_list		 vxl_sysctl_ctx;
201 	struct callout			 vxl_callout;
202 	struct ether_addr		 vxl_hwaddr;
203 	int				 vxl_mc_ifindex;
204 	struct ifnet			*vxl_mc_ifp;
205 	struct ifmedia 			 vxl_media;
206 	char				 vxl_mc_ifname[IFNAMSIZ];
207 	LIST_ENTRY(vxlan_softc)		 vxl_entry;
208 	LIST_ENTRY(vxlan_softc)		 vxl_ifdetach_list;
209 
210 	/* For rate limiting errors on the tx fast path. */
211 	struct timeval err_time;
212 	int err_pps;
213 };
214 
215 #define VXLAN_RLOCK(_sc, _p)	rm_rlock(&(_sc)->vxl_lock, (_p))
216 #define VXLAN_RUNLOCK(_sc, _p)	rm_runlock(&(_sc)->vxl_lock, (_p))
217 #define VXLAN_WLOCK(_sc)	rm_wlock(&(_sc)->vxl_lock)
218 #define VXLAN_WUNLOCK(_sc)	rm_wunlock(&(_sc)->vxl_lock)
219 #define VXLAN_LOCK_WOWNED(_sc)	rm_wowned(&(_sc)->vxl_lock)
220 #define VXLAN_LOCK_ASSERT(_sc)	rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
221 #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
222 #define VXLAN_UNLOCK(_sc, _p) do {		\
223     if (VXLAN_LOCK_WOWNED(_sc))			\
224 	VXLAN_WUNLOCK(_sc);			\
225     else					\
226 	VXLAN_RUNLOCK(_sc, _p);			\
227 } while (0)
228 
229 #define VXLAN_ACQUIRE(_sc)	refcount_acquire(&(_sc)->vxl_refcnt)
230 #define VXLAN_RELEASE(_sc)	refcount_release(&(_sc)->vxl_refcnt)
231 
232 #define	satoconstsin(sa)	((const struct sockaddr_in *)(sa))
233 #define	satoconstsin6(sa)	((const struct sockaddr_in6 *)(sa))
234 
235 struct vxlanudphdr {
236 	struct udphdr		vxlh_udp;
237 	struct vxlan_header	vxlh_hdr;
238 } __packed;
239 
240 static int	vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
241 static void	vxlan_ftable_init(struct vxlan_softc *);
242 static void	vxlan_ftable_fini(struct vxlan_softc *);
243 static void	vxlan_ftable_flush(struct vxlan_softc *, int);
244 static void	vxlan_ftable_expire(struct vxlan_softc *);
245 static int	vxlan_ftable_update_locked(struct vxlan_softc *,
246 		    const union vxlan_sockaddr *, const uint8_t *,
247 		    struct rm_priotracker *);
248 static int	vxlan_ftable_learn(struct vxlan_softc *,
249 		    const struct sockaddr *, const uint8_t *);
250 static int	vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
251 
252 static struct vxlan_ftable_entry *
253 		vxlan_ftable_entry_alloc(void);
254 static void	vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
255 static void	vxlan_ftable_entry_init(struct vxlan_softc *,
256 		    struct vxlan_ftable_entry *, const uint8_t *,
257 		    const struct sockaddr *, uint32_t);
258 static void	vxlan_ftable_entry_destroy(struct vxlan_softc *,
259 		    struct vxlan_ftable_entry *);
260 static int	vxlan_ftable_entry_insert(struct vxlan_softc *,
261 		    struct vxlan_ftable_entry *);
262 static struct vxlan_ftable_entry *
263 		vxlan_ftable_entry_lookup(struct vxlan_softc *,
264 		    const uint8_t *);
265 static void	vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
266 		    struct sbuf *);
267 
268 static struct vxlan_socket *
269 		vxlan_socket_alloc(const union vxlan_sockaddr *);
270 static void	vxlan_socket_destroy(struct vxlan_socket *);
271 static void	vxlan_socket_release(struct vxlan_socket *);
272 static struct vxlan_socket *
273 		vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
274 static void	vxlan_socket_insert(struct vxlan_socket *);
275 static int	vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
276 static int	vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
277 static int	vxlan_socket_create(struct ifnet *, int,
278 		    const union vxlan_sockaddr *, struct vxlan_socket **);
279 static void	vxlan_socket_ifdetach(struct vxlan_socket *,
280 		    struct ifnet *, struct vxlan_softc_head *);
281 
282 static struct vxlan_socket *
283 		vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
284 static int	vxlan_sockaddr_mc_info_match(
285 		    const struct vxlan_socket_mc_info *,
286 		    const union vxlan_sockaddr *,
287 		    const union vxlan_sockaddr *, int);
288 static int	vxlan_socket_mc_join_group(struct vxlan_socket *,
289 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
290 		    int *, union vxlan_sockaddr *);
291 static int	vxlan_socket_mc_leave_group(struct vxlan_socket *,
292 		    const union vxlan_sockaddr *,
293 		    const union vxlan_sockaddr *, int);
294 static int	vxlan_socket_mc_add_group(struct vxlan_socket *,
295 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
296 		    int, int *);
297 static void	vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
298 		    int);
299 
300 static struct vxlan_softc *
301 		vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
302 		    uint32_t);
303 static struct vxlan_softc *
304 		vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
305 static int	vxlan_socket_insert_softc(struct vxlan_socket *,
306 		    struct vxlan_softc *);
307 static void	vxlan_socket_remove_softc(struct vxlan_socket *,
308 		    struct vxlan_softc *);
309 
310 static struct ifnet *
311 		vxlan_multicast_if_ref(struct vxlan_softc *, int);
312 static void	vxlan_free_multicast(struct vxlan_softc *);
313 static int	vxlan_setup_multicast_interface(struct vxlan_softc *);
314 
315 static int	vxlan_setup_multicast(struct vxlan_softc *);
316 static int	vxlan_setup_socket(struct vxlan_softc *);
317 #ifdef INET6
318 static void	vxlan_setup_zero_checksum_port(struct vxlan_softc *);
319 #endif
320 static void	vxlan_setup_interface_hdrlen(struct vxlan_softc *);
321 static int	vxlan_valid_init_config(struct vxlan_softc *);
322 static void	vxlan_init_wait(struct vxlan_softc *);
323 static void	vxlan_init_complete(struct vxlan_softc *);
324 static void	vxlan_init(void *);
325 static void	vxlan_release(struct vxlan_softc *);
326 static void	vxlan_teardown_wait(struct vxlan_softc *);
327 static void	vxlan_teardown_complete(struct vxlan_softc *);
328 static void	vxlan_teardown_locked(struct vxlan_softc *);
329 static void	vxlan_teardown(struct vxlan_softc *);
330 static void	vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
331 		    struct vxlan_softc_head *);
332 static void	vxlan_timer(void *);
333 
334 static int	vxlan_ctrl_get_config(struct vxlan_softc *, void *);
335 static int	vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
336 static int	vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
337 static int	vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
338 static int	vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
339 static int	vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
340 static int	vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
341 static int	vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
342 static int	vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
343 static int	vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
344 static int	vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
345 static int	vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
346 static int	vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
347 static int	vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
348 static int	vxlan_ctrl_flush(struct vxlan_softc *, void *);
349 static int	vxlan_ioctl_drvspec(struct vxlan_softc *,
350 		    struct ifdrv *, int);
351 static int	vxlan_ioctl_ifflags(struct vxlan_softc *);
352 static int	vxlan_ioctl(struct ifnet *, u_long, caddr_t);
353 
354 #if defined(INET) || defined(INET6)
355 static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
356 static void	vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
357 		    int, uint16_t, uint16_t);
358 #endif
359 static int	vxlan_encap4(struct vxlan_softc *,
360 		    const union vxlan_sockaddr *, struct mbuf *);
361 static int	vxlan_encap6(struct vxlan_softc *,
362 		    const union vxlan_sockaddr *, struct mbuf *);
363 static int	vxlan_transmit(struct ifnet *, struct mbuf *);
364 static void	vxlan_qflush(struct ifnet *);
365 static bool	vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
366 		    const struct sockaddr *, void *);
367 static int	vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
368 		    const struct sockaddr *);
369 
370 static void	vxlan_stats_alloc(struct vxlan_softc *);
371 static void	vxlan_stats_free(struct vxlan_softc *);
372 static void	vxlan_set_default_config(struct vxlan_softc *);
373 static int	vxlan_set_user_config(struct vxlan_softc *,
374 		     struct ifvxlanparam *);
375 static int	vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
376 static void	vxlan_set_hwcaps(struct vxlan_softc *);
377 static int	vxlan_clone_create(struct if_clone *, char *, size_t,
378 		    struct ifc_data *, struct ifnet **);
379 static int	vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
380 
381 static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
382 static int	vxlan_media_change(struct ifnet *);
383 static void	vxlan_media_status(struct ifnet *, struct ifmediareq *);
384 
385 static int	vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
386 		    const struct sockaddr *);
387 static void	vxlan_sockaddr_copy(union vxlan_sockaddr *,
388 		    const struct sockaddr *);
389 static int	vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
390 		    const struct sockaddr *);
391 static void	vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
392 		    const struct sockaddr *);
393 static int	vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
394 static int	vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
395 static int	vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
396 static int	vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
397 
398 static int	vxlan_can_change_config(struct vxlan_softc *);
399 static int	vxlan_check_vni(uint32_t);
400 static int	vxlan_check_ttl(int);
401 static int	vxlan_check_ftable_timeout(uint32_t);
402 static int	vxlan_check_ftable_max(uint32_t);
403 
404 static void	vxlan_sysctl_setup(struct vxlan_softc *);
405 static void	vxlan_sysctl_destroy(struct vxlan_softc *);
406 static int	vxlan_tunable_int(struct vxlan_softc *, const char *, int);
407 
408 static void	vxlan_ifdetach_event(void *, struct ifnet *);
409 static void	vxlan_load(void);
410 static void	vxlan_unload(void);
411 static int	vxlan_modevent(module_t, int, void *);
412 
413 static const char vxlan_name[] = "vxlan";
414 static MALLOC_DEFINE(M_VXLAN, vxlan_name,
415     "Virtual eXtensible LAN Interface");
416 static struct if_clone *vxlan_cloner;
417 
418 static struct mtx vxlan_list_mtx;
419 #define VXLAN_LIST_LOCK()	mtx_lock(&vxlan_list_mtx)
420 #define VXLAN_LIST_UNLOCK()	mtx_unlock(&vxlan_list_mtx)
421 
422 static LIST_HEAD(, vxlan_socket) vxlan_socket_list =
423     LIST_HEAD_INITIALIZER(vxlan_socket_list);
424 
425 static eventhandler_tag vxlan_ifdetach_event_tag;
426 
427 SYSCTL_DECL(_net_link);
428 SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
429     "Virtual eXtensible Local Area Network");
430 
431 static int vxlan_legacy_port = 0;
432 TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
433 static int vxlan_reuse_port = 0;
434 TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
435 
436 /*
437  * This macro controls the default upper limitation on nesting of vxlan
438  * tunnels. By default it is 3, as the overhead of IPv6 vxlan tunnel is 70
439  * bytes, this will create at most 210 bytes overhead and the most inner
440  * tunnel's MTU will be 1290 which will meet IPv6 minimum MTU size 1280.
441  * Be careful to configure the tunnels when raising the limit. A large
442  * number of nested tunnels can introduce system crash.
443  */
444 #ifndef MAX_VXLAN_NEST
445 #define MAX_VXLAN_NEST	3
446 #endif
447 static int max_vxlan_nesting = MAX_VXLAN_NEST;
448 SYSCTL_INT(_net_link_vxlan, OID_AUTO, max_nesting, CTLFLAG_RW,
449     &max_vxlan_nesting, 0, "Max nested tunnels");
450 
451 /* Default maximum number of addresses in the forwarding table. */
452 #ifndef VXLAN_FTABLE_MAX
453 #define VXLAN_FTABLE_MAX	2000
454 #endif
455 
456 /* Timeout (in seconds) of addresses learned in the forwarding table. */
457 #ifndef VXLAN_FTABLE_TIMEOUT
458 #define VXLAN_FTABLE_TIMEOUT	(20 * 60)
459 #endif
460 
461 /*
462  * Maximum timeout (in seconds) of addresses learned in the forwarding
463  * table.
464  */
465 #ifndef VXLAN_FTABLE_MAX_TIMEOUT
466 #define VXLAN_FTABLE_MAX_TIMEOUT	(60 * 60 * 24)
467 #endif
468 
469 /* Number of seconds between pruning attempts of the forwarding table. */
470 #ifndef VXLAN_FTABLE_PRUNE
471 #define VXLAN_FTABLE_PRUNE	(5 * 60)
472 #endif
473 
474 static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
475 
476 struct vxlan_control {
477 	int	(*vxlc_func)(struct vxlan_softc *, void *);
478 	int	vxlc_argsize;
479 	int	vxlc_flags;
480 #define VXLAN_CTRL_FLAG_COPYIN	0x01
481 #define VXLAN_CTRL_FLAG_COPYOUT	0x02
482 #define VXLAN_CTRL_FLAG_SUSER	0x04
483 };
484 
485 static const struct vxlan_control vxlan_control_table[] = {
486 	[VXLAN_CMD_GET_CONFIG] =
487 	    {	vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
488 		VXLAN_CTRL_FLAG_COPYOUT
489 	    },
490 
491 	[VXLAN_CMD_SET_VNI] =
492 	    {   vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
493 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
494 	    },
495 
496 	[VXLAN_CMD_SET_LOCAL_ADDR] =
497 	    {   vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
498 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
499 	    },
500 
501 	[VXLAN_CMD_SET_REMOTE_ADDR] =
502 	    {   vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
503 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
504 	    },
505 
506 	[VXLAN_CMD_SET_LOCAL_PORT] =
507 	    {   vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
508 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
509 	    },
510 
511 	[VXLAN_CMD_SET_REMOTE_PORT] =
512 	    {   vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
513 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
514 	    },
515 
516 	[VXLAN_CMD_SET_PORT_RANGE] =
517 	    {   vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
518 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
519 	    },
520 
521 	[VXLAN_CMD_SET_FTABLE_TIMEOUT] =
522 	    {	vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
523 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
524 	    },
525 
526 	[VXLAN_CMD_SET_FTABLE_MAX] =
527 	    {	vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
528 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
529 	    },
530 
531 	[VXLAN_CMD_SET_MULTICAST_IF] =
532 	    {	vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
533 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
534 	    },
535 
536 	[VXLAN_CMD_SET_TTL] =
537 	    {	vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
538 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
539 	    },
540 
541 	[VXLAN_CMD_SET_LEARN] =
542 	    {	vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
543 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
544 	    },
545 
546 	[VXLAN_CMD_FTABLE_ENTRY_ADD] =
547 	    {	vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
548 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
549 	    },
550 
551 	[VXLAN_CMD_FTABLE_ENTRY_REM] =
552 	    {	vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
553 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
554 	    },
555 
556 	[VXLAN_CMD_FLUSH] =
557 	    {   vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
558 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
559 	    },
560 };
561 
562 static const int vxlan_control_table_size = nitems(vxlan_control_table);
563 
564 static int
565 vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
566 {
567 	int i, d;
568 
569 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
570 		d = ((int)a[i]) - ((int)b[i]);
571 
572 	return (d);
573 }
574 
575 static void
576 vxlan_ftable_init(struct vxlan_softc *sc)
577 {
578 	int i;
579 
580 	sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
581 	    VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
582 
583 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
584 		LIST_INIT(&sc->vxl_ftable[i]);
585 	sc->vxl_ftable_hash_key = arc4random();
586 }
587 
588 static void
589 vxlan_ftable_fini(struct vxlan_softc *sc)
590 {
591 	int i;
592 
593 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
594 		KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
595 		    ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
596 	}
597 	MPASS(sc->vxl_ftable_cnt == 0);
598 
599 	free(sc->vxl_ftable, M_VXLAN);
600 	sc->vxl_ftable = NULL;
601 }
602 
603 static void
604 vxlan_ftable_flush(struct vxlan_softc *sc, int all)
605 {
606 	struct vxlan_ftable_entry *fe, *tfe;
607 	int i;
608 
609 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
610 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
611 			if (all || VXLAN_FE_IS_DYNAMIC(fe))
612 				vxlan_ftable_entry_destroy(sc, fe);
613 		}
614 	}
615 }
616 
617 static void
618 vxlan_ftable_expire(struct vxlan_softc *sc)
619 {
620 	struct vxlan_ftable_entry *fe, *tfe;
621 	int i;
622 
623 	VXLAN_LOCK_WASSERT(sc);
624 
625 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
626 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
627 			if (VXLAN_FE_IS_DYNAMIC(fe) &&
628 			    time_uptime >= fe->vxlfe_expire)
629 				vxlan_ftable_entry_destroy(sc, fe);
630 		}
631 	}
632 }
633 
634 static int
635 vxlan_ftable_update_locked(struct vxlan_softc *sc,
636     const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
637     struct rm_priotracker *tracker)
638 {
639 	struct vxlan_ftable_entry *fe;
640 	int error __unused;
641 
642 	VXLAN_LOCK_ASSERT(sc);
643 
644 again:
645 	/*
646 	 * A forwarding entry for this MAC address might already exist. If
647 	 * so, update it, otherwise create a new one. We may have to upgrade
648 	 * the lock if we have to change or create an entry.
649 	 */
650 	fe = vxlan_ftable_entry_lookup(sc, mac);
651 	if (fe != NULL) {
652 		fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
653 
654 		if (!VXLAN_FE_IS_DYNAMIC(fe) ||
655 		    vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
656 			return (0);
657 		if (!VXLAN_LOCK_WOWNED(sc)) {
658 			VXLAN_RUNLOCK(sc, tracker);
659 			VXLAN_WLOCK(sc);
660 			sc->vxl_stats.ftable_lock_upgrade_failed++;
661 			goto again;
662 		}
663 		vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
664 		return (0);
665 	}
666 
667 	if (!VXLAN_LOCK_WOWNED(sc)) {
668 		VXLAN_RUNLOCK(sc, tracker);
669 		VXLAN_WLOCK(sc);
670 		sc->vxl_stats.ftable_lock_upgrade_failed++;
671 		goto again;
672 	}
673 
674 	if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
675 		sc->vxl_stats.ftable_nospace++;
676 		return (ENOSPC);
677 	}
678 
679 	fe = vxlan_ftable_entry_alloc();
680 	if (fe == NULL)
681 		return (ENOMEM);
682 
683 	vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
684 
685 	/* The prior lookup failed, so the insert should not. */
686 	error = vxlan_ftable_entry_insert(sc, fe);
687 	MPASS(error == 0);
688 
689 	return (0);
690 }
691 
692 static int
693 vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
694     const uint8_t *mac)
695 {
696 	struct rm_priotracker tracker;
697 	union vxlan_sockaddr vxlsa;
698 	int error;
699 
700 	/*
701 	 * The source port may be randomly selected by the remote host, so
702 	 * use the port of the default destination address.
703 	 */
704 	vxlan_sockaddr_copy(&vxlsa, sa);
705 	vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
706 
707 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
708 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
709 		if (error)
710 			return (error);
711 	}
712 
713 	VXLAN_RLOCK(sc, &tracker);
714 	error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
715 	VXLAN_UNLOCK(sc, &tracker);
716 
717 	return (error);
718 }
719 
720 static int
721 vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
722 {
723 	struct rm_priotracker tracker;
724 	struct sbuf sb;
725 	struct vxlan_softc *sc;
726 	struct vxlan_ftable_entry *fe;
727 	size_t size;
728 	int i, error;
729 
730 	/*
731 	 * This is mostly intended for debugging during development. It is
732 	 * not practical to dump an entire large table this way.
733 	 */
734 
735 	sc = arg1;
736 	size = PAGE_SIZE;	/* Calculate later. */
737 
738 	sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
739 	sbuf_putc(&sb, '\n');
740 
741 	VXLAN_RLOCK(sc, &tracker);
742 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
743 		LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
744 			if (sbuf_error(&sb) != 0)
745 				break;
746 			vxlan_ftable_entry_dump(fe, &sb);
747 		}
748 	}
749 	VXLAN_RUNLOCK(sc, &tracker);
750 
751 	if (sbuf_len(&sb) == 1)
752 		sbuf_setpos(&sb, 0);
753 
754 	sbuf_finish(&sb);
755 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
756 	sbuf_delete(&sb);
757 
758 	return (error);
759 }
760 
761 static struct vxlan_ftable_entry *
762 vxlan_ftable_entry_alloc(void)
763 {
764 	struct vxlan_ftable_entry *fe;
765 
766 	fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
767 
768 	return (fe);
769 }
770 
771 static void
772 vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
773 {
774 
775 	free(fe, M_VXLAN);
776 }
777 
778 static void
779 vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
780     const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
781 {
782 
783 	fe->vxlfe_flags = flags;
784 	fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
785 	memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
786 	vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
787 }
788 
789 static void
790 vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
791     struct vxlan_ftable_entry *fe)
792 {
793 
794 	sc->vxl_ftable_cnt--;
795 	LIST_REMOVE(fe, vxlfe_hash);
796 	vxlan_ftable_entry_free(fe);
797 }
798 
799 static int
800 vxlan_ftable_entry_insert(struct vxlan_softc *sc,
801     struct vxlan_ftable_entry *fe)
802 {
803 	struct vxlan_ftable_entry *lfe;
804 	uint32_t hash;
805 	int dir;
806 
807 	VXLAN_LOCK_WASSERT(sc);
808 	hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
809 
810 	lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
811 	if (lfe == NULL) {
812 		LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
813 		goto out;
814 	}
815 
816 	do {
817 		dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
818 		if (dir == 0)
819 			return (EEXIST);
820 		if (dir > 0) {
821 			LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
822 			goto out;
823 		} else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
824 			LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
825 			goto out;
826 		} else
827 			lfe = LIST_NEXT(lfe, vxlfe_hash);
828 	} while (lfe != NULL);
829 
830 out:
831 	sc->vxl_ftable_cnt++;
832 
833 	return (0);
834 }
835 
836 static struct vxlan_ftable_entry *
837 vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
838 {
839 	struct vxlan_ftable_entry *fe;
840 	uint32_t hash;
841 	int dir;
842 
843 	VXLAN_LOCK_ASSERT(sc);
844 	hash = VXLAN_SC_FTABLE_HASH(sc, mac);
845 
846 	LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
847 		dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
848 		if (dir == 0)
849 			return (fe);
850 		if (dir > 0)
851 			break;
852 	}
853 
854 	return (NULL);
855 }
856 
857 static void
858 vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
859 {
860 	char buf[64];
861 	const union vxlan_sockaddr *sa;
862 	const void *addr;
863 	int i, len, af, width;
864 
865 	sa = &fe->vxlfe_raddr;
866 	af = sa->sa.sa_family;
867 	len = sbuf_len(sb);
868 
869 	sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
870 	    fe->vxlfe_flags);
871 
872 	for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
873 		sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
874 	sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
875 
876 	if (af == AF_INET) {
877 		addr = &sa->in4.sin_addr;
878 		width = INET_ADDRSTRLEN - 1;
879 	} else {
880 		addr = &sa->in6.sin6_addr;
881 		width = INET6_ADDRSTRLEN - 1;
882 	}
883 	inet_ntop(af, addr, buf, sizeof(buf));
884 	sbuf_printf(sb, "%*s ", width, buf);
885 
886 	sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
887 
888 	sbuf_putc(sb, '\n');
889 
890 	/* Truncate a partial line. */
891 	if (sbuf_error(sb) != 0)
892 		sbuf_setpos(sb, len);
893 }
894 
895 static struct vxlan_socket *
896 vxlan_socket_alloc(const union vxlan_sockaddr *sa)
897 {
898 	struct vxlan_socket *vso;
899 	int i;
900 
901 	vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
902 	rm_init(&vso->vxlso_lock, "vxlansorm");
903 	refcount_init(&vso->vxlso_refcnt, 0);
904 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
905 		LIST_INIT(&vso->vxlso_vni_hash[i]);
906 	vso->vxlso_laddr = *sa;
907 
908 	return (vso);
909 }
910 
911 static void
912 vxlan_socket_destroy(struct vxlan_socket *vso)
913 {
914 	struct socket *so;
915 #ifdef INVARIANTS
916 	int i;
917 	struct vxlan_socket_mc_info *mc;
918 
919 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
920 		mc = &vso->vxlso_mc[i];
921 		KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
922 		    ("%s: socket %p mc[%d] still has address",
923 		     __func__, vso, i));
924 	}
925 
926 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
927 		KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
928 		    ("%s: socket %p vni_hash[%d] not empty",
929 		     __func__, vso, i));
930 	}
931 #endif
932 	so = vso->vxlso_sock;
933 	if (so != NULL) {
934 		vso->vxlso_sock = NULL;
935 		soclose(so);
936 	}
937 
938 	rm_destroy(&vso->vxlso_lock);
939 	free(vso, M_VXLAN);
940 }
941 
942 static void
943 vxlan_socket_release(struct vxlan_socket *vso)
944 {
945 	int destroy;
946 
947 	VXLAN_LIST_LOCK();
948 	destroy = VXLAN_SO_RELEASE(vso);
949 	if (destroy != 0)
950 		LIST_REMOVE(vso, vxlso_entry);
951 	VXLAN_LIST_UNLOCK();
952 
953 	if (destroy != 0)
954 		vxlan_socket_destroy(vso);
955 }
956 
957 static struct vxlan_socket *
958 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
959 {
960 	struct vxlan_socket *vso;
961 
962 	VXLAN_LIST_LOCK();
963 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
964 		if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
965 			VXLAN_SO_ACQUIRE(vso);
966 			break;
967 		}
968 	}
969 	VXLAN_LIST_UNLOCK();
970 
971 	return (vso);
972 }
973 
974 static void
975 vxlan_socket_insert(struct vxlan_socket *vso)
976 {
977 
978 	VXLAN_LIST_LOCK();
979 	VXLAN_SO_ACQUIRE(vso);
980 	LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
981 	VXLAN_LIST_UNLOCK();
982 }
983 
984 static int
985 vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
986 {
987 	struct thread *td;
988 	int error;
989 
990 	td = curthread;
991 
992 	error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
993 	    SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
994 	if (error) {
995 		if_printf(ifp, "cannot create socket: %d\n", error);
996 		return (error);
997 	}
998 
999 	error = udp_set_kernel_tunneling(vso->vxlso_sock,
1000 	    vxlan_rcv_udp_packet, NULL, vso);
1001 	if (error) {
1002 		if_printf(ifp, "cannot set tunneling function: %d\n", error);
1003 		return (error);
1004 	}
1005 
1006 	if (vxlan_reuse_port != 0) {
1007 		struct sockopt sopt;
1008 		int val = 1;
1009 
1010 		bzero(&sopt, sizeof(sopt));
1011 		sopt.sopt_dir = SOPT_SET;
1012 		sopt.sopt_level = IPPROTO_IP;
1013 		sopt.sopt_name = SO_REUSEPORT;
1014 		sopt.sopt_val = &val;
1015 		sopt.sopt_valsize = sizeof(val);
1016 		error = sosetopt(vso->vxlso_sock, &sopt);
1017 		if (error) {
1018 			if_printf(ifp,
1019 			    "cannot set REUSEADDR socket opt: %d\n", error);
1020 			return (error);
1021 		}
1022 	}
1023 
1024 	return (0);
1025 }
1026 
1027 static int
1028 vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
1029 {
1030 	union vxlan_sockaddr laddr;
1031 	struct thread *td;
1032 	int error;
1033 
1034 	td = curthread;
1035 	laddr = vso->vxlso_laddr;
1036 
1037 	error = sobind(vso->vxlso_sock, &laddr.sa, td);
1038 	if (error) {
1039 		if (error != EADDRINUSE)
1040 			if_printf(ifp, "cannot bind socket: %d\n", error);
1041 		return (error);
1042 	}
1043 
1044 	return (0);
1045 }
1046 
1047 static int
1048 vxlan_socket_create(struct ifnet *ifp, int multicast,
1049     const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
1050 {
1051 	union vxlan_sockaddr laddr;
1052 	struct vxlan_socket *vso;
1053 	int error;
1054 
1055 	laddr = *saddr;
1056 
1057 	/*
1058 	 * If this socket will be multicast, then only the local port
1059 	 * must be specified when binding.
1060 	 */
1061 	if (multicast != 0) {
1062 		if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1063 			laddr.in4.sin_addr.s_addr = INADDR_ANY;
1064 #ifdef INET6
1065 		else
1066 			laddr.in6.sin6_addr = in6addr_any;
1067 #endif
1068 	}
1069 
1070 	vso = vxlan_socket_alloc(&laddr);
1071 	if (vso == NULL)
1072 		return (ENOMEM);
1073 
1074 	error = vxlan_socket_init(vso, ifp);
1075 	if (error)
1076 		goto fail;
1077 
1078 	error = vxlan_socket_bind(vso, ifp);
1079 	if (error)
1080 		goto fail;
1081 
1082 	/*
1083 	 * There is a small window between the bind completing and
1084 	 * inserting the socket, so that a concurrent create may fail.
1085 	 * Let's not worry about that for now.
1086 	 */
1087 	vxlan_socket_insert(vso);
1088 	*vsop = vso;
1089 
1090 	return (0);
1091 
1092 fail:
1093 	vxlan_socket_destroy(vso);
1094 
1095 	return (error);
1096 }
1097 
1098 static void
1099 vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
1100     struct vxlan_softc_head *list)
1101 {
1102 	struct rm_priotracker tracker;
1103 	struct vxlan_softc *sc;
1104 	int i;
1105 
1106 	VXLAN_SO_RLOCK(vso, &tracker);
1107 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
1108 		LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
1109 			vxlan_ifdetach(sc, ifp, list);
1110 	}
1111 	VXLAN_SO_RUNLOCK(vso, &tracker);
1112 }
1113 
1114 static struct vxlan_socket *
1115 vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
1116 {
1117 	union vxlan_sockaddr laddr;
1118 	struct vxlan_socket *vso;
1119 
1120 	laddr = *vxlsa;
1121 
1122 	if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1123 		laddr.in4.sin_addr.s_addr = INADDR_ANY;
1124 #ifdef INET6
1125 	else
1126 		laddr.in6.sin6_addr = in6addr_any;
1127 #endif
1128 
1129 	vso = vxlan_socket_lookup(&laddr);
1130 
1131 	return (vso);
1132 }
1133 
1134 static int
1135 vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
1136     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1137     int ifidx)
1138 {
1139 
1140 	if (!vxlan_sockaddr_in_any(local) &&
1141 	    !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
1142 		return (0);
1143 	if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
1144 		return (0);
1145 	if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
1146 		return (0);
1147 
1148 	return (1);
1149 }
1150 
1151 static int
1152 vxlan_socket_mc_join_group(struct vxlan_socket *vso,
1153     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1154     int *ifidx, union vxlan_sockaddr *source)
1155 {
1156 	struct sockopt sopt;
1157 	int error;
1158 
1159 	*source = *local;
1160 
1161 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1162 		struct ip_mreq mreq;
1163 
1164 		mreq.imr_multiaddr = group->in4.sin_addr;
1165 		mreq.imr_interface = local->in4.sin_addr;
1166 
1167 		bzero(&sopt, sizeof(sopt));
1168 		sopt.sopt_dir = SOPT_SET;
1169 		sopt.sopt_level = IPPROTO_IP;
1170 		sopt.sopt_name = IP_ADD_MEMBERSHIP;
1171 		sopt.sopt_val = &mreq;
1172 		sopt.sopt_valsize = sizeof(mreq);
1173 		error = sosetopt(vso->vxlso_sock, &sopt);
1174 		if (error)
1175 			return (error);
1176 
1177 		/*
1178 		 * BMV: Ideally, there would be a formal way for us to get
1179 		 * the local interface that was selected based on the
1180 		 * imr_interface address. We could then update *ifidx so
1181 		 * vxlan_sockaddr_mc_info_match() would return a match for
1182 		 * later creates that explicitly set the multicast interface.
1183 		 *
1184 		 * If we really need to, we can of course look in the INP's
1185 		 * membership list:
1186 		 *     sotoinpcb(vso->vxlso_sock)->inp_moptions->
1187 		 *         imo_head[]->imf_inm->inm_ifp
1188 		 * similarly to imo_match_group().
1189 		 */
1190 		source->in4.sin_addr = local->in4.sin_addr;
1191 
1192 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1193 		struct ipv6_mreq mreq;
1194 
1195 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1196 		mreq.ipv6mr_interface = *ifidx;
1197 
1198 		bzero(&sopt, sizeof(sopt));
1199 		sopt.sopt_dir = SOPT_SET;
1200 		sopt.sopt_level = IPPROTO_IPV6;
1201 		sopt.sopt_name = IPV6_JOIN_GROUP;
1202 		sopt.sopt_val = &mreq;
1203 		sopt.sopt_valsize = sizeof(mreq);
1204 		error = sosetopt(vso->vxlso_sock, &sopt);
1205 		if (error)
1206 			return (error);
1207 
1208 		/*
1209 		 * BMV: As with IPv4, we would really like to know what
1210 		 * interface in6p_lookup_mcast_ifp() selected.
1211 		 */
1212 	} else
1213 		error = EAFNOSUPPORT;
1214 
1215 	return (error);
1216 }
1217 
1218 static int
1219 vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
1220     const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
1221     int ifidx)
1222 {
1223 	struct sockopt sopt;
1224 	int error;
1225 
1226 	bzero(&sopt, sizeof(sopt));
1227 	sopt.sopt_dir = SOPT_SET;
1228 
1229 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1230 		struct ip_mreq mreq;
1231 
1232 		mreq.imr_multiaddr = group->in4.sin_addr;
1233 		mreq.imr_interface = source->in4.sin_addr;
1234 
1235 		sopt.sopt_level = IPPROTO_IP;
1236 		sopt.sopt_name = IP_DROP_MEMBERSHIP;
1237 		sopt.sopt_val = &mreq;
1238 		sopt.sopt_valsize = sizeof(mreq);
1239 		error = sosetopt(vso->vxlso_sock, &sopt);
1240 
1241 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1242 		struct ipv6_mreq mreq;
1243 
1244 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1245 		mreq.ipv6mr_interface = ifidx;
1246 
1247 		sopt.sopt_level = IPPROTO_IPV6;
1248 		sopt.sopt_name = IPV6_LEAVE_GROUP;
1249 		sopt.sopt_val = &mreq;
1250 		sopt.sopt_valsize = sizeof(mreq);
1251 		error = sosetopt(vso->vxlso_sock, &sopt);
1252 
1253 	} else
1254 		error = EAFNOSUPPORT;
1255 
1256 	return (error);
1257 }
1258 
1259 static int
1260 vxlan_socket_mc_add_group(struct vxlan_socket *vso,
1261     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1262     int ifidx, int *idx)
1263 {
1264 	union vxlan_sockaddr source;
1265 	struct vxlan_socket_mc_info *mc;
1266 	int i, empty, error;
1267 
1268 	/*
1269 	 * Within a socket, the same multicast group may be used by multiple
1270 	 * interfaces, each with a different network identifier. But a socket
1271 	 * may only join a multicast group once, so keep track of the users
1272 	 * here.
1273 	 */
1274 
1275 	VXLAN_SO_WLOCK(vso);
1276 	for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1277 		mc = &vso->vxlso_mc[i];
1278 
1279 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1280 			empty++;
1281 			continue;
1282 		}
1283 
1284 		if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
1285 			goto out;
1286 	}
1287 	VXLAN_SO_WUNLOCK(vso);
1288 
1289 	if (empty == 0)
1290 		return (ENOSPC);
1291 
1292 	error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
1293 	if (error)
1294 		return (error);
1295 
1296 	VXLAN_SO_WLOCK(vso);
1297 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1298 		mc = &vso->vxlso_mc[i];
1299 
1300 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1301 			vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
1302 			vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
1303 			mc->vxlsomc_ifidx = ifidx;
1304 			goto out;
1305 		}
1306 	}
1307 	VXLAN_SO_WUNLOCK(vso);
1308 
1309 	error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
1310 	MPASS(error == 0);
1311 
1312 	return (ENOSPC);
1313 
1314 out:
1315 	mc->vxlsomc_users++;
1316 	VXLAN_SO_WUNLOCK(vso);
1317 
1318 	*idx = i;
1319 
1320 	return (0);
1321 }
1322 
1323 static void
1324 vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
1325 {
1326 	union vxlan_sockaddr group, source;
1327 	struct vxlan_socket_mc_info *mc;
1328 	int ifidx, leave;
1329 
1330 	KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
1331 	    ("%s: vso %p idx %d out of bounds", __func__, vso, idx));
1332 
1333 	leave = 0;
1334 	mc = &vso->vxlso_mc[idx];
1335 
1336 	VXLAN_SO_WLOCK(vso);
1337 	mc->vxlsomc_users--;
1338 	if (mc->vxlsomc_users == 0) {
1339 		group = mc->vxlsomc_gaddr;
1340 		source = mc->vxlsomc_saddr;
1341 		ifidx = mc->vxlsomc_ifidx;
1342 		bzero(mc, sizeof(*mc));
1343 		leave = 1;
1344 	}
1345 	VXLAN_SO_WUNLOCK(vso);
1346 
1347 	if (leave != 0) {
1348 		/*
1349 		 * Our socket's membership in this group may have already
1350 		 * been removed if we joined through an interface that's
1351 		 * been detached.
1352 		 */
1353 		vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
1354 	}
1355 }
1356 
1357 static struct vxlan_softc *
1358 vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
1359 {
1360 	struct vxlan_softc *sc;
1361 	uint32_t hash;
1362 
1363 	VXLAN_SO_LOCK_ASSERT(vso);
1364 	hash = VXLAN_SO_VNI_HASH(vni);
1365 
1366 	LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
1367 		if (sc->vxl_vni == vni) {
1368 			VXLAN_ACQUIRE(sc);
1369 			break;
1370 		}
1371 	}
1372 
1373 	return (sc);
1374 }
1375 
1376 static struct vxlan_softc *
1377 vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
1378 {
1379 	struct rm_priotracker tracker;
1380 	struct vxlan_softc *sc;
1381 
1382 	VXLAN_SO_RLOCK(vso, &tracker);
1383 	sc = vxlan_socket_lookup_softc_locked(vso, vni);
1384 	VXLAN_SO_RUNLOCK(vso, &tracker);
1385 
1386 	return (sc);
1387 }
1388 
1389 static int
1390 vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1391 {
1392 	struct vxlan_softc *tsc;
1393 	uint32_t vni, hash;
1394 
1395 	vni = sc->vxl_vni;
1396 	hash = VXLAN_SO_VNI_HASH(vni);
1397 
1398 	VXLAN_SO_WLOCK(vso);
1399 	tsc = vxlan_socket_lookup_softc_locked(vso, vni);
1400 	if (tsc != NULL) {
1401 		VXLAN_SO_WUNLOCK(vso);
1402 		vxlan_release(tsc);
1403 		return (EEXIST);
1404 	}
1405 
1406 	VXLAN_ACQUIRE(sc);
1407 	LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
1408 	VXLAN_SO_WUNLOCK(vso);
1409 
1410 	return (0);
1411 }
1412 
1413 static void
1414 vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1415 {
1416 
1417 	VXLAN_SO_WLOCK(vso);
1418 	LIST_REMOVE(sc, vxl_entry);
1419 	VXLAN_SO_WUNLOCK(vso);
1420 
1421 	vxlan_release(sc);
1422 }
1423 
1424 static struct ifnet *
1425 vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
1426 {
1427 	struct ifnet *ifp;
1428 
1429 	VXLAN_LOCK_ASSERT(sc);
1430 
1431 	if (ipv4 && sc->vxl_im4o != NULL)
1432 		ifp = sc->vxl_im4o->imo_multicast_ifp;
1433 	else if (!ipv4 && sc->vxl_im6o != NULL)
1434 		ifp = sc->vxl_im6o->im6o_multicast_ifp;
1435 	else
1436 		ifp = NULL;
1437 
1438 	if (ifp != NULL)
1439 		if_ref(ifp);
1440 
1441 	return (ifp);
1442 }
1443 
1444 static void
1445 vxlan_free_multicast(struct vxlan_softc *sc)
1446 {
1447 
1448 	if (sc->vxl_mc_ifp != NULL) {
1449 		if_rele(sc->vxl_mc_ifp);
1450 		sc->vxl_mc_ifp = NULL;
1451 		sc->vxl_mc_ifindex = 0;
1452 	}
1453 
1454 	if (sc->vxl_im4o != NULL) {
1455 		free(sc->vxl_im4o, M_VXLAN);
1456 		sc->vxl_im4o = NULL;
1457 	}
1458 
1459 	if (sc->vxl_im6o != NULL) {
1460 		free(sc->vxl_im6o, M_VXLAN);
1461 		sc->vxl_im6o = NULL;
1462 	}
1463 }
1464 
1465 static int
1466 vxlan_setup_multicast_interface(struct vxlan_softc *sc)
1467 {
1468 	struct ifnet *ifp;
1469 
1470 	ifp = ifunit_ref(sc->vxl_mc_ifname);
1471 	if (ifp == NULL) {
1472 		if_printf(sc->vxl_ifp, "multicast interface %s does "
1473 		    "not exist\n", sc->vxl_mc_ifname);
1474 		return (ENOENT);
1475 	}
1476 
1477 	if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1478 		if_printf(sc->vxl_ifp, "interface %s does not support "
1479 		     "multicast\n", sc->vxl_mc_ifname);
1480 		if_rele(ifp);
1481 		return (ENOTSUP);
1482 	}
1483 
1484 	sc->vxl_mc_ifp = ifp;
1485 	sc->vxl_mc_ifindex = ifp->if_index;
1486 
1487 	return (0);
1488 }
1489 
1490 static int
1491 vxlan_setup_multicast(struct vxlan_softc *sc)
1492 {
1493 	const union vxlan_sockaddr *group;
1494 	int error;
1495 
1496 	group = &sc->vxl_dst_addr;
1497 	error = 0;
1498 
1499 	if (sc->vxl_mc_ifname[0] != '\0') {
1500 		error = vxlan_setup_multicast_interface(sc);
1501 		if (error)
1502 			return (error);
1503 	}
1504 
1505 	/*
1506 	 * Initialize an multicast options structure that is sufficiently
1507 	 * populated for use in the respective IP output routine. This
1508 	 * structure is typically stored in the socket, but our sockets
1509 	 * may be shared among multiple interfaces.
1510 	 */
1511 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1512 		sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
1513 		    M_ZERO | M_WAITOK);
1514 		sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
1515 		sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
1516 		sc->vxl_im4o->imo_multicast_vif = -1;
1517 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1518 		sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
1519 		    M_ZERO | M_WAITOK);
1520 		sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
1521 		sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
1522 	}
1523 
1524 	return (error);
1525 }
1526 
1527 static int
1528 vxlan_setup_socket(struct vxlan_softc *sc)
1529 {
1530 	struct vxlan_socket *vso;
1531 	struct ifnet *ifp;
1532 	union vxlan_sockaddr *saddr, *daddr;
1533 	int multicast, error;
1534 
1535 	vso = NULL;
1536 	ifp = sc->vxl_ifp;
1537 	saddr = &sc->vxl_src_addr;
1538 	daddr = &sc->vxl_dst_addr;
1539 
1540 	multicast = vxlan_sockaddr_in_multicast(daddr);
1541 	MPASS(multicast != -1);
1542 	sc->vxl_vso_mc_index = -1;
1543 
1544 	/*
1545 	 * Try to create the socket. If that fails, attempt to use an
1546 	 * existing socket.
1547 	 */
1548 	error = vxlan_socket_create(ifp, multicast, saddr, &vso);
1549 	if (error) {
1550 		if (multicast != 0)
1551 			vso = vxlan_socket_mc_lookup(saddr);
1552 		else
1553 			vso = vxlan_socket_lookup(saddr);
1554 
1555 		if (vso == NULL) {
1556 			if_printf(ifp, "cannot create socket (error: %d), "
1557 			    "and no existing socket found\n", error);
1558 			goto out;
1559 		}
1560 	}
1561 
1562 	if (multicast != 0) {
1563 		error = vxlan_setup_multicast(sc);
1564 		if (error)
1565 			goto out;
1566 
1567 		error = vxlan_socket_mc_add_group(vso, daddr, saddr,
1568 		    sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
1569 		if (error)
1570 			goto out;
1571 	}
1572 
1573 	sc->vxl_sock = vso;
1574 	error = vxlan_socket_insert_softc(vso, sc);
1575 	if (error) {
1576 		sc->vxl_sock = NULL;
1577 		if_printf(ifp, "network identifier %d already exists in "
1578 		    "this socket\n", sc->vxl_vni);
1579 		goto out;
1580 	}
1581 
1582 	return (0);
1583 
1584 out:
1585 	if (vso != NULL) {
1586 		if (sc->vxl_vso_mc_index != -1) {
1587 			vxlan_socket_mc_release_group_by_idx(vso,
1588 			    sc->vxl_vso_mc_index);
1589 			sc->vxl_vso_mc_index = -1;
1590 		}
1591 		if (multicast != 0)
1592 			vxlan_free_multicast(sc);
1593 		vxlan_socket_release(vso);
1594 	}
1595 
1596 	return (error);
1597 }
1598 
1599 #ifdef INET6
1600 static void
1601 vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
1602 {
1603 
1604 	if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
1605 		return;
1606 
1607 	MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
1608 	MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
1609 
1610 	if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
1611 		if_printf(sc->vxl_ifp, "port %d in src address does not match "
1612 		    "port %d in dst address, rfc6935_port (%d) not updated.\n",
1613 		    ntohs(sc->vxl_src_addr.in6.sin6_port),
1614 		    ntohs(sc->vxl_dst_addr.in6.sin6_port),
1615 		    V_zero_checksum_port);
1616 		return;
1617 	}
1618 
1619 	if (V_zero_checksum_port != 0) {
1620 		if (V_zero_checksum_port !=
1621 		    ntohs(sc->vxl_src_addr.in6.sin6_port)) {
1622 			if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
1623 			    "%d, cannot set it to %d.\n", V_zero_checksum_port,
1624 			    ntohs(sc->vxl_src_addr.in6.sin6_port));
1625 		}
1626 		return;
1627 	}
1628 
1629 	V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
1630 	if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
1631 	    V_zero_checksum_port);
1632 }
1633 #endif
1634 
1635 static void
1636 vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
1637 {
1638 	struct ifnet *ifp;
1639 
1640 	VXLAN_LOCK_WASSERT(sc);
1641 
1642 	ifp = sc->vxl_ifp;
1643 	ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
1644 
1645 	if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
1646 		ifp->if_hdrlen += sizeof(struct ip);
1647 	else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
1648 		ifp->if_hdrlen += sizeof(struct ip6_hdr);
1649 
1650 	if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0)
1651 		ifp->if_mtu = ETHERMTU - ifp->if_hdrlen;
1652 }
1653 
1654 static int
1655 vxlan_valid_init_config(struct vxlan_softc *sc)
1656 {
1657 	const char *reason;
1658 
1659 	if (vxlan_check_vni(sc->vxl_vni) != 0) {
1660 		reason = "invalid virtual network identifier specified";
1661 		goto fail;
1662 	}
1663 
1664 	if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
1665 		reason = "source address type is not supported";
1666 		goto fail;
1667 	}
1668 
1669 	if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
1670 		reason = "destination address type is not supported";
1671 		goto fail;
1672 	}
1673 
1674 	if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
1675 		reason = "no valid destination address specified";
1676 		goto fail;
1677 	}
1678 
1679 	if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
1680 	    sc->vxl_mc_ifname[0] != '\0') {
1681 		reason = "can only specify interface with a group address";
1682 		goto fail;
1683 	}
1684 
1685 	if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
1686 		if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
1687 		    VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
1688 			reason = "source and destination address must both "
1689 			    "be either IPv4 or IPv6";
1690 			goto fail;
1691 		}
1692 	}
1693 
1694 	if (sc->vxl_src_addr.in4.sin_port == 0) {
1695 		reason = "local port not specified";
1696 		goto fail;
1697 	}
1698 
1699 	if (sc->vxl_dst_addr.in4.sin_port == 0) {
1700 		reason = "remote port not specified";
1701 		goto fail;
1702 	}
1703 
1704 	return (0);
1705 
1706 fail:
1707 	if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
1708 	return (EINVAL);
1709 }
1710 
1711 static void
1712 vxlan_init_wait(struct vxlan_softc *sc)
1713 {
1714 
1715 	VXLAN_LOCK_WASSERT(sc);
1716 	while (sc->vxl_flags & VXLAN_FLAG_INIT)
1717 		rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
1718 }
1719 
1720 static void
1721 vxlan_init_complete(struct vxlan_softc *sc)
1722 {
1723 
1724 	VXLAN_WLOCK(sc);
1725 	sc->vxl_flags &= ~VXLAN_FLAG_INIT;
1726 	wakeup(sc);
1727 	VXLAN_WUNLOCK(sc);
1728 }
1729 
1730 static void
1731 vxlan_init(void *xsc)
1732 {
1733 	static const uint8_t empty_mac[ETHER_ADDR_LEN];
1734 	struct vxlan_softc *sc;
1735 	struct ifnet *ifp;
1736 
1737 	sc = xsc;
1738 	ifp = sc->vxl_ifp;
1739 
1740 	sx_xlock(&vxlan_sx);
1741 	VXLAN_WLOCK(sc);
1742 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1743 		VXLAN_WUNLOCK(sc);
1744 		sx_xunlock(&vxlan_sx);
1745 		return;
1746 	}
1747 	sc->vxl_flags |= VXLAN_FLAG_INIT;
1748 	VXLAN_WUNLOCK(sc);
1749 
1750 	if (vxlan_valid_init_config(sc) != 0)
1751 		goto out;
1752 
1753 	if (vxlan_setup_socket(sc) != 0)
1754 		goto out;
1755 
1756 #ifdef INET6
1757 	vxlan_setup_zero_checksum_port(sc);
1758 #endif
1759 
1760 	/* Initialize the default forwarding entry. */
1761 	vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
1762 	    &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
1763 
1764 	VXLAN_WLOCK(sc);
1765 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1766 	callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
1767 	    vxlan_timer, sc);
1768 	VXLAN_WUNLOCK(sc);
1769 
1770 	if_link_state_change(ifp, LINK_STATE_UP);
1771 
1772 	EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
1773 	    ntohs(sc->vxl_src_addr.in4.sin_port));
1774 out:
1775 	vxlan_init_complete(sc);
1776 	sx_xunlock(&vxlan_sx);
1777 }
1778 
1779 static void
1780 vxlan_release(struct vxlan_softc *sc)
1781 {
1782 
1783 	/*
1784 	 * The softc may be destroyed as soon as we release our reference,
1785 	 * so we cannot serialize the wakeup with the softc lock. We use a
1786 	 * timeout in our sleeps so a missed wakeup is unfortunate but not
1787 	 * fatal.
1788 	 */
1789 	if (VXLAN_RELEASE(sc) != 0)
1790 		wakeup(sc);
1791 }
1792 
1793 static void
1794 vxlan_teardown_wait(struct vxlan_softc *sc)
1795 {
1796 
1797 	VXLAN_LOCK_WASSERT(sc);
1798 	while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1799 		rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
1800 }
1801 
1802 static void
1803 vxlan_teardown_complete(struct vxlan_softc *sc)
1804 {
1805 
1806 	VXLAN_WLOCK(sc);
1807 	sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
1808 	wakeup(sc);
1809 	VXLAN_WUNLOCK(sc);
1810 }
1811 
1812 static void
1813 vxlan_teardown_locked(struct vxlan_softc *sc)
1814 {
1815 	struct ifnet *ifp;
1816 	struct vxlan_socket *vso;
1817 	bool running;
1818 
1819 	sx_assert(&vxlan_sx, SA_XLOCKED);
1820 	VXLAN_LOCK_WASSERT(sc);
1821 	MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
1822 
1823 	ifp = sc->vxl_ifp;
1824 	ifp->if_flags &= ~IFF_UP;
1825 	running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1826 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1827 	callout_stop(&sc->vxl_callout);
1828 	vso = sc->vxl_sock;
1829 	sc->vxl_sock = NULL;
1830 
1831 	VXLAN_WUNLOCK(sc);
1832 	if_link_state_change(ifp, LINK_STATE_DOWN);
1833 	if (running)
1834 		EVENTHANDLER_INVOKE(vxlan_stop, ifp,
1835 		    sc->vxl_src_addr.in4.sin_family,
1836 		    ntohs(sc->vxl_src_addr.in4.sin_port));
1837 
1838 	if (vso != NULL) {
1839 		vxlan_socket_remove_softc(vso, sc);
1840 
1841 		if (sc->vxl_vso_mc_index != -1) {
1842 			vxlan_socket_mc_release_group_by_idx(vso,
1843 			    sc->vxl_vso_mc_index);
1844 			sc->vxl_vso_mc_index = -1;
1845 		}
1846 	}
1847 
1848 	VXLAN_WLOCK(sc);
1849 	while (sc->vxl_refcnt != 0)
1850 		rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
1851 	VXLAN_WUNLOCK(sc);
1852 
1853 	callout_drain(&sc->vxl_callout);
1854 
1855 	vxlan_free_multicast(sc);
1856 	if (vso != NULL)
1857 		vxlan_socket_release(vso);
1858 
1859 	vxlan_teardown_complete(sc);
1860 }
1861 
1862 static void
1863 vxlan_teardown(struct vxlan_softc *sc)
1864 {
1865 
1866 	sx_xlock(&vxlan_sx);
1867 	VXLAN_WLOCK(sc);
1868 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
1869 		vxlan_teardown_wait(sc);
1870 		VXLAN_WUNLOCK(sc);
1871 		sx_xunlock(&vxlan_sx);
1872 		return;
1873 	}
1874 
1875 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1876 	vxlan_teardown_locked(sc);
1877 	sx_xunlock(&vxlan_sx);
1878 }
1879 
1880 static void
1881 vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
1882     struct vxlan_softc_head *list)
1883 {
1884 
1885 	VXLAN_WLOCK(sc);
1886 
1887 	if (sc->vxl_mc_ifp != ifp)
1888 		goto out;
1889 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1890 		goto out;
1891 
1892 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1893 	LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
1894 
1895 out:
1896 	VXLAN_WUNLOCK(sc);
1897 }
1898 
1899 static void
1900 vxlan_timer(void *xsc)
1901 {
1902 	struct vxlan_softc *sc;
1903 
1904 	sc = xsc;
1905 	VXLAN_LOCK_WASSERT(sc);
1906 
1907 	vxlan_ftable_expire(sc);
1908 	callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
1909 }
1910 
1911 static int
1912 vxlan_ioctl_ifflags(struct vxlan_softc *sc)
1913 {
1914 	struct ifnet *ifp;
1915 
1916 	ifp = sc->vxl_ifp;
1917 
1918 	if (ifp->if_flags & IFF_UP) {
1919 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1920 			vxlan_init(sc);
1921 	} else {
1922 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1923 			vxlan_teardown(sc);
1924 	}
1925 
1926 	return (0);
1927 }
1928 
1929 static int
1930 vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
1931 {
1932 	struct rm_priotracker tracker;
1933 	struct ifvxlancfg *cfg;
1934 
1935 	cfg = arg;
1936 	bzero(cfg, sizeof(*cfg));
1937 
1938 	VXLAN_RLOCK(sc, &tracker);
1939 	cfg->vxlc_vni = sc->vxl_vni;
1940 	memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
1941 	    sizeof(union vxlan_sockaddr));
1942 	memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
1943 	    sizeof(union vxlan_sockaddr));
1944 	cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
1945 	cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
1946 	cfg->vxlc_ftable_max = sc->vxl_ftable_max;
1947 	cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
1948 	cfg->vxlc_port_min = sc->vxl_min_port;
1949 	cfg->vxlc_port_max = sc->vxl_max_port;
1950 	cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
1951 	cfg->vxlc_ttl = sc->vxl_ttl;
1952 	VXLAN_RUNLOCK(sc, &tracker);
1953 
1954 #ifdef INET6
1955 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
1956 		sa6_recoverscope(&cfg->vxlc_local_sa.in6);
1957 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
1958 		sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
1959 #endif
1960 
1961 	return (0);
1962 }
1963 
1964 static int
1965 vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
1966 {
1967 	struct ifvxlancmd *cmd;
1968 	int error;
1969 
1970 	cmd = arg;
1971 
1972 	if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
1973 		return (EINVAL);
1974 
1975 	VXLAN_WLOCK(sc);
1976 	if (vxlan_can_change_config(sc)) {
1977 		sc->vxl_vni = cmd->vxlcmd_vni;
1978 		error = 0;
1979 	} else
1980 		error = EBUSY;
1981 	VXLAN_WUNLOCK(sc);
1982 
1983 	return (error);
1984 }
1985 
1986 static int
1987 vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
1988 {
1989 	struct ifvxlancmd *cmd;
1990 	union vxlan_sockaddr *vxlsa;
1991 	int error;
1992 
1993 	cmd = arg;
1994 	vxlsa = &cmd->vxlcmd_sa;
1995 
1996 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
1997 		return (EINVAL);
1998 	if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
1999 		return (EINVAL);
2000 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
2001 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
2002 		if (error)
2003 			return (error);
2004 	}
2005 
2006 	VXLAN_WLOCK(sc);
2007 	if (vxlan_can_change_config(sc)) {
2008 		vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
2009 		vxlan_set_hwcaps(sc);
2010 		error = 0;
2011 	} else
2012 		error = EBUSY;
2013 	VXLAN_WUNLOCK(sc);
2014 
2015 	return (error);
2016 }
2017 
2018 static int
2019 vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
2020 {
2021 	struct ifvxlancmd *cmd;
2022 	union vxlan_sockaddr *vxlsa;
2023 	int error;
2024 
2025 	cmd = arg;
2026 	vxlsa = &cmd->vxlcmd_sa;
2027 
2028 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
2029 		return (EINVAL);
2030 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
2031 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
2032 		if (error)
2033 			return (error);
2034 	}
2035 
2036 	VXLAN_WLOCK(sc);
2037 	if (vxlan_can_change_config(sc)) {
2038 		vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
2039 		vxlan_setup_interface_hdrlen(sc);
2040 		error = 0;
2041 	} else
2042 		error = EBUSY;
2043 	VXLAN_WUNLOCK(sc);
2044 
2045 	return (error);
2046 }
2047 
2048 static int
2049 vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
2050 {
2051 	struct ifvxlancmd *cmd;
2052 	int error;
2053 
2054 	cmd = arg;
2055 
2056 	if (cmd->vxlcmd_port == 0)
2057 		return (EINVAL);
2058 
2059 	VXLAN_WLOCK(sc);
2060 	if (vxlan_can_change_config(sc)) {
2061 		sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2062 		error = 0;
2063 	} else
2064 		error = EBUSY;
2065 	VXLAN_WUNLOCK(sc);
2066 
2067 	return (error);
2068 }
2069 
2070 static int
2071 vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
2072 {
2073 	struct ifvxlancmd *cmd;
2074 	int error;
2075 
2076 	cmd = arg;
2077 
2078 	if (cmd->vxlcmd_port == 0)
2079 		return (EINVAL);
2080 
2081 	VXLAN_WLOCK(sc);
2082 	if (vxlan_can_change_config(sc)) {
2083 		sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2084 		error = 0;
2085 	} else
2086 		error = EBUSY;
2087 	VXLAN_WUNLOCK(sc);
2088 
2089 	return (error);
2090 }
2091 
2092 static int
2093 vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
2094 {
2095 	struct ifvxlancmd *cmd;
2096 	uint16_t min, max;
2097 	int error;
2098 
2099 	cmd = arg;
2100 	min = cmd->vxlcmd_port_min;
2101 	max = cmd->vxlcmd_port_max;
2102 
2103 	if (max < min)
2104 		return (EINVAL);
2105 
2106 	VXLAN_WLOCK(sc);
2107 	if (vxlan_can_change_config(sc)) {
2108 		sc->vxl_min_port = min;
2109 		sc->vxl_max_port = max;
2110 		error = 0;
2111 	} else
2112 		error = EBUSY;
2113 	VXLAN_WUNLOCK(sc);
2114 
2115 	return (error);
2116 }
2117 
2118 static int
2119 vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
2120 {
2121 	struct ifvxlancmd *cmd;
2122 	int error;
2123 
2124 	cmd = arg;
2125 
2126 	VXLAN_WLOCK(sc);
2127 	if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
2128 		sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
2129 		error = 0;
2130 	} else
2131 		error = EINVAL;
2132 	VXLAN_WUNLOCK(sc);
2133 
2134 	return (error);
2135 }
2136 
2137 static int
2138 vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
2139 {
2140 	struct ifvxlancmd *cmd;
2141 	int error;
2142 
2143 	cmd = arg;
2144 
2145 	VXLAN_WLOCK(sc);
2146 	if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
2147 		sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
2148 		error = 0;
2149 	} else
2150 		error = EINVAL;
2151 	VXLAN_WUNLOCK(sc);
2152 
2153 	return (error);
2154 }
2155 
2156 static int
2157 vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
2158 {
2159 	struct ifvxlancmd *cmd;
2160 	int error;
2161 
2162 	cmd = arg;
2163 
2164 	VXLAN_WLOCK(sc);
2165 	if (vxlan_can_change_config(sc)) {
2166 		strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
2167 		vxlan_set_hwcaps(sc);
2168 		error = 0;
2169 	} else
2170 		error = EBUSY;
2171 	VXLAN_WUNLOCK(sc);
2172 
2173 	return (error);
2174 }
2175 
2176 static int
2177 vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
2178 {
2179 	struct ifvxlancmd *cmd;
2180 	int error;
2181 
2182 	cmd = arg;
2183 
2184 	VXLAN_WLOCK(sc);
2185 	if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
2186 		sc->vxl_ttl = cmd->vxlcmd_ttl;
2187 		if (sc->vxl_im4o != NULL)
2188 			sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
2189 		if (sc->vxl_im6o != NULL)
2190 			sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
2191 		error = 0;
2192 	} else
2193 		error = EINVAL;
2194 	VXLAN_WUNLOCK(sc);
2195 
2196 	return (error);
2197 }
2198 
2199 static int
2200 vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
2201 {
2202 	struct ifvxlancmd *cmd;
2203 
2204 	cmd = arg;
2205 
2206 	VXLAN_WLOCK(sc);
2207 	if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
2208 		sc->vxl_flags |= VXLAN_FLAG_LEARN;
2209 	else
2210 		sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
2211 	VXLAN_WUNLOCK(sc);
2212 
2213 	return (0);
2214 }
2215 
2216 static int
2217 vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
2218 {
2219 	union vxlan_sockaddr vxlsa;
2220 	struct ifvxlancmd *cmd;
2221 	struct vxlan_ftable_entry *fe;
2222 	int error;
2223 
2224 	cmd = arg;
2225 	vxlsa = cmd->vxlcmd_sa;
2226 
2227 	if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
2228 		return (EINVAL);
2229 	if (vxlan_sockaddr_in_any(&vxlsa) != 0)
2230 		return (EINVAL);
2231 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2232 		return (EINVAL);
2233 	/* BMV: We could support both IPv4 and IPv6 later. */
2234 	if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
2235 		return (EAFNOSUPPORT);
2236 
2237 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
2238 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
2239 		if (error)
2240 			return (error);
2241 	}
2242 
2243 	fe = vxlan_ftable_entry_alloc();
2244 	if (fe == NULL)
2245 		return (ENOMEM);
2246 
2247 	if (vxlsa.in4.sin_port == 0)
2248 		vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
2249 
2250 	vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
2251 	    VXLAN_FE_FLAG_STATIC);
2252 
2253 	VXLAN_WLOCK(sc);
2254 	error = vxlan_ftable_entry_insert(sc, fe);
2255 	VXLAN_WUNLOCK(sc);
2256 
2257 	if (error)
2258 		vxlan_ftable_entry_free(fe);
2259 
2260 	return (error);
2261 }
2262 
2263 static int
2264 vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
2265 {
2266 	struct ifvxlancmd *cmd;
2267 	struct vxlan_ftable_entry *fe;
2268 	int error;
2269 
2270 	cmd = arg;
2271 
2272 	VXLAN_WLOCK(sc);
2273 	fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
2274 	if (fe != NULL) {
2275 		vxlan_ftable_entry_destroy(sc, fe);
2276 		error = 0;
2277 	} else
2278 		error = ENOENT;
2279 	VXLAN_WUNLOCK(sc);
2280 
2281 	return (error);
2282 }
2283 
2284 static int
2285 vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
2286 {
2287 	struct ifvxlancmd *cmd;
2288 	int all;
2289 
2290 	cmd = arg;
2291 	all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
2292 
2293 	VXLAN_WLOCK(sc);
2294 	vxlan_ftable_flush(sc, all);
2295 	VXLAN_WUNLOCK(sc);
2296 
2297 	return (0);
2298 }
2299 
2300 static int
2301 vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
2302 {
2303 	const struct vxlan_control *vc;
2304 	union {
2305 		struct ifvxlancfg	cfg;
2306 		struct ifvxlancmd	cmd;
2307 	} args;
2308 	int out, error;
2309 
2310 	if (ifd->ifd_cmd >= vxlan_control_table_size)
2311 		return (EINVAL);
2312 
2313 	bzero(&args, sizeof(args));
2314 	vc = &vxlan_control_table[ifd->ifd_cmd];
2315 	out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
2316 
2317 	if ((get != 0 && out == 0) || (get == 0 && out != 0))
2318 		return (EINVAL);
2319 
2320 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
2321 		error = priv_check(curthread, PRIV_NET_VXLAN);
2322 		if (error)
2323 			return (error);
2324 	}
2325 
2326 	if (ifd->ifd_len != vc->vxlc_argsize ||
2327 	    ifd->ifd_len > sizeof(args))
2328 		return (EINVAL);
2329 
2330 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
2331 		error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
2332 		if (error)
2333 			return (error);
2334 	}
2335 
2336 	error = vc->vxlc_func(sc, &args);
2337 	if (error)
2338 		return (error);
2339 
2340 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
2341 		error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
2342 		if (error)
2343 			return (error);
2344 	}
2345 
2346 	return (0);
2347 }
2348 
2349 static int
2350 vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2351 {
2352 	struct rm_priotracker tracker;
2353 	struct vxlan_softc *sc;
2354 	struct ifreq *ifr;
2355 	struct ifdrv *ifd;
2356 	int error;
2357 
2358 	sc = ifp->if_softc;
2359 	ifr = (struct ifreq *) data;
2360 	ifd = (struct ifdrv *) data;
2361 
2362 	error = 0;
2363 
2364 	switch (cmd) {
2365 	case SIOCADDMULTI:
2366 	case SIOCDELMULTI:
2367 		break;
2368 
2369 	case SIOCGDRVSPEC:
2370 	case SIOCSDRVSPEC:
2371 		error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
2372 		break;
2373 
2374 	case SIOCSIFFLAGS:
2375 		error = vxlan_ioctl_ifflags(sc);
2376 		break;
2377 
2378 	case SIOCSIFMEDIA:
2379 	case SIOCGIFMEDIA:
2380 		error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
2381 		break;
2382 
2383 	case SIOCSIFMTU:
2384 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) {
2385 			error = EINVAL;
2386 		} else {
2387 			VXLAN_WLOCK(sc);
2388 			ifp->if_mtu = ifr->ifr_mtu;
2389 			sc->vxl_flags |= VXLAN_FLAG_USER_MTU;
2390 			VXLAN_WUNLOCK(sc);
2391 		}
2392 		break;
2393 
2394 	case SIOCSIFCAP:
2395 		VXLAN_WLOCK(sc);
2396 		error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
2397 		if (error == 0)
2398 			vxlan_set_hwcaps(sc);
2399 		VXLAN_WUNLOCK(sc);
2400 		break;
2401 
2402 	case SIOCGTUNFIB:
2403 		VXLAN_RLOCK(sc, &tracker);
2404 		ifr->ifr_fib = sc->vxl_fibnum;
2405 		VXLAN_RUNLOCK(sc, &tracker);
2406 		break;
2407 
2408 	case SIOCSTUNFIB:
2409 		if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0)
2410 			break;
2411 
2412 		if (ifr->ifr_fib >= rt_numfibs)
2413 			error = EINVAL;
2414 		else {
2415 			VXLAN_WLOCK(sc);
2416 			sc->vxl_fibnum = ifr->ifr_fib;
2417 			VXLAN_WUNLOCK(sc);
2418 		}
2419 		break;
2420 
2421 	default:
2422 		error = ether_ioctl(ifp, cmd, data);
2423 		break;
2424 	}
2425 
2426 	return (error);
2427 }
2428 
2429 #if defined(INET) || defined(INET6)
2430 static uint16_t
2431 vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
2432 {
2433 	int range;
2434 	uint32_t hash;
2435 
2436 	range = sc->vxl_max_port - sc->vxl_min_port + 1;
2437 
2438 	if (M_HASHTYPE_ISHASH(m))
2439 		hash = m->m_pkthdr.flowid;
2440 	else
2441 		hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
2442 		    sc->vxl_port_hash_key);
2443 
2444 	return (sc->vxl_min_port + (hash % range));
2445 }
2446 
2447 static void
2448 vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
2449     uint16_t srcport, uint16_t dstport)
2450 {
2451 	struct vxlanudphdr *hdr;
2452 	struct udphdr *udph;
2453 	struct vxlan_header *vxh;
2454 	int len;
2455 
2456 	len = m->m_pkthdr.len - ipoff;
2457 	MPASS(len >= sizeof(struct vxlanudphdr));
2458 	hdr = mtodo(m, ipoff);
2459 
2460 	udph = &hdr->vxlh_udp;
2461 	udph->uh_sport = srcport;
2462 	udph->uh_dport = dstport;
2463 	udph->uh_ulen = htons(len);
2464 	udph->uh_sum = 0;
2465 
2466 	vxh = &hdr->vxlh_hdr;
2467 	vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
2468 	vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
2469 }
2470 #endif
2471 
2472 #if defined(INET6) || defined(INET)
2473 /*
2474  * Return the CSUM_INNER_* equivalent of CSUM_* caps.
2475  */
2476 static uint32_t
2477 csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
2478 {
2479 	uint32_t csum_flags = encap;
2480 	const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
2481 
2482 	/*
2483 	 * csum_flags can request either v4 or v6 offload but not both.
2484 	 * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
2485 	 * so those bits are no good to detect the IP version.  Other bits are
2486 	 * always set with CSUM_TSO and we use those to figure out the IP
2487 	 * version.
2488 	 */
2489 	if (csum_flags_in & v4) {
2490 		if (csum_flags_in & CSUM_IP)
2491 			csum_flags |= CSUM_INNER_IP;
2492 		if (csum_flags_in & CSUM_IP_UDP)
2493 			csum_flags |= CSUM_INNER_IP_UDP;
2494 		if (csum_flags_in & CSUM_IP_TCP)
2495 			csum_flags |= CSUM_INNER_IP_TCP;
2496 		if (csum_flags_in & CSUM_IP_TSO)
2497 			csum_flags |= CSUM_INNER_IP_TSO;
2498 	} else {
2499 #ifdef INVARIANTS
2500 		const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
2501 
2502 		MPASS((csum_flags_in & v6) != 0);
2503 #endif
2504 		if (csum_flags_in & CSUM_IP6_UDP)
2505 			csum_flags |= CSUM_INNER_IP6_UDP;
2506 		if (csum_flags_in & CSUM_IP6_TCP)
2507 			csum_flags |= CSUM_INNER_IP6_TCP;
2508 		if (csum_flags_in & CSUM_IP6_TSO)
2509 			csum_flags |= CSUM_INNER_IP6_TSO;
2510 	}
2511 
2512 	return (csum_flags);
2513 }
2514 #endif
2515 
2516 static int
2517 vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2518     struct mbuf *m)
2519 {
2520 #ifdef INET
2521 	struct ifnet *ifp;
2522 	struct ip *ip;
2523 	struct in_addr srcaddr, dstaddr;
2524 	uint16_t srcport, dstport;
2525 	int plen, mcast, error;
2526 	struct route route, *ro;
2527 	struct sockaddr_in *sin;
2528 	uint32_t csum_flags;
2529 
2530 	NET_EPOCH_ASSERT();
2531 
2532 	ifp = sc->vxl_ifp;
2533 	srcaddr = sc->vxl_src_addr.in4.sin_addr;
2534 	srcport = vxlan_pick_source_port(sc, m);
2535 	dstaddr = fvxlsa->in4.sin_addr;
2536 	dstport = fvxlsa->in4.sin_port;
2537 
2538 	plen = m->m_pkthdr.len;
2539 	M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
2540 	    M_NOWAIT);
2541 	if (m == NULL) {
2542 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2543 		return (ENOBUFS);
2544 	}
2545 
2546 	ip = mtod(m, struct ip *);
2547 	ip->ip_tos = 0;
2548 	ip->ip_len = htons(m->m_pkthdr.len);
2549 	ip->ip_off = 0;
2550 	ip->ip_ttl = sc->vxl_ttl;
2551 	ip->ip_p = IPPROTO_UDP;
2552 	ip->ip_sum = 0;
2553 	ip->ip_src = srcaddr;
2554 	ip->ip_dst = dstaddr;
2555 
2556 	vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
2557 
2558 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2559 	m->m_flags &= ~(M_MCAST | M_BCAST);
2560 
2561 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2562 	if (m->m_pkthdr.csum_flags != 0) {
2563 		/*
2564 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2565 		 * up the ifnet for the outbound route and verify that the
2566 		 * outbound ifnet can perform the requested operation on the
2567 		 * inner frame.
2568 		 */
2569 		bzero(&route, sizeof(route));
2570 		ro = &route;
2571 		sin = (struct sockaddr_in *)&ro->ro_dst;
2572 		sin->sin_family = AF_INET;
2573 		sin->sin_len = sizeof(*sin);
2574 		sin->sin_addr = ip->ip_dst;
2575 		ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE,
2576 		    0);
2577 		if (ro->ro_nh == NULL) {
2578 			m_freem(m);
2579 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2580 			return (EHOSTUNREACH);
2581 		}
2582 
2583 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2584 		    CSUM_ENCAP_VXLAN);
2585 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2586 		    csum_flags) {
2587 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2588 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2589 
2590 				if_printf(ifp, "interface %s is missing hwcaps "
2591 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2592 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2593 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2594 				    m->m_pkthdr.csum_flags, csum_flags,
2595 				    (uint32_t)nh_ifp->if_hwassist);
2596 			}
2597 			m_freem(m);
2598 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2599 			return (ENXIO);
2600 		}
2601 		m->m_pkthdr.csum_flags = csum_flags;
2602 		if (csum_flags &
2603 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2604 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2605 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2606 			if (csum_flags & CSUM_INNER_TSO)
2607 				counter_u64_add(sc->vxl_stats.tso, 1);
2608 		}
2609 	} else
2610 		ro = NULL;
2611 	error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
2612 	if (error == 0) {
2613 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2614 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2615 		if (mcast != 0)
2616 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2617 	} else
2618 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2619 
2620 	return (error);
2621 #else
2622 	m_freem(m);
2623 	return (ENOTSUP);
2624 #endif
2625 }
2626 
2627 static int
2628 vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2629     struct mbuf *m)
2630 {
2631 #ifdef INET6
2632 	struct ifnet *ifp;
2633 	struct ip6_hdr *ip6;
2634 	const struct in6_addr *srcaddr, *dstaddr;
2635 	uint16_t srcport, dstport;
2636 	int plen, mcast, error;
2637 	struct route_in6 route, *ro;
2638 	struct sockaddr_in6 *sin6;
2639 	uint32_t csum_flags;
2640 
2641 	NET_EPOCH_ASSERT();
2642 
2643 	ifp = sc->vxl_ifp;
2644 	srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
2645 	srcport = vxlan_pick_source_port(sc, m);
2646 	dstaddr = &fvxlsa->in6.sin6_addr;
2647 	dstport = fvxlsa->in6.sin6_port;
2648 
2649 	plen = m->m_pkthdr.len;
2650 	M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
2651 	    M_NOWAIT);
2652 	if (m == NULL) {
2653 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2654 		return (ENOBUFS);
2655 	}
2656 
2657 	ip6 = mtod(m, struct ip6_hdr *);
2658 	ip6->ip6_flow = 0;		/* BMV: Keep in forwarding entry? */
2659 	ip6->ip6_vfc = IPV6_VERSION;
2660 	ip6->ip6_plen = 0;
2661 	ip6->ip6_nxt = IPPROTO_UDP;
2662 	ip6->ip6_hlim = sc->vxl_ttl;
2663 	ip6->ip6_src = *srcaddr;
2664 	ip6->ip6_dst = *dstaddr;
2665 
2666 	vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
2667 
2668 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2669 	m->m_flags &= ~(M_MCAST | M_BCAST);
2670 
2671 	ro = NULL;
2672 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2673 	if (m->m_pkthdr.csum_flags != 0) {
2674 		/*
2675 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2676 		 * up the ifnet for the outbound route and verify that the
2677 		 * outbound ifnet can perform the requested operation on the
2678 		 * inner frame.
2679 		 */
2680 		bzero(&route, sizeof(route));
2681 		ro = &route;
2682 		sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
2683 		sin6->sin6_family = AF_INET6;
2684 		sin6->sin6_len = sizeof(*sin6);
2685 		sin6->sin6_addr = ip6->ip6_dst;
2686 		ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0,
2687 		    NHR_NONE, 0);
2688 		if (ro->ro_nh == NULL) {
2689 			m_freem(m);
2690 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2691 			return (EHOSTUNREACH);
2692 		}
2693 
2694 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2695 		    CSUM_ENCAP_VXLAN);
2696 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2697 		    csum_flags) {
2698 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2699 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2700 
2701 				if_printf(ifp, "interface %s is missing hwcaps "
2702 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2703 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2704 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2705 				    m->m_pkthdr.csum_flags, csum_flags,
2706 				    (uint32_t)nh_ifp->if_hwassist);
2707 			}
2708 			m_freem(m);
2709 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2710 			return (ENXIO);
2711 		}
2712 		m->m_pkthdr.csum_flags = csum_flags;
2713 		if (csum_flags &
2714 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2715 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2716 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2717 			if (csum_flags & CSUM_INNER_TSO)
2718 				counter_u64_add(sc->vxl_stats.tso, 1);
2719 		}
2720 	} else if (ntohs(dstport) != V_zero_checksum_port) {
2721 		struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
2722 
2723 		hdr->uh_sum = in6_cksum_pseudo(ip6,
2724 		    m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
2725 		m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2726 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2727 	}
2728 	error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
2729 	if (error == 0) {
2730 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2731 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2732 		if (mcast != 0)
2733 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2734 	} else
2735 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2736 
2737 	return (error);
2738 #else
2739 	m_freem(m);
2740 	return (ENOTSUP);
2741 #endif
2742 }
2743 
2744 #define MTAG_VXLAN_LOOP	0x7876706c /* vxlp */
2745 static int
2746 vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
2747 {
2748 	struct rm_priotracker tracker;
2749 	union vxlan_sockaddr vxlsa;
2750 	struct vxlan_softc *sc;
2751 	struct vxlan_ftable_entry *fe;
2752 	struct ifnet *mcifp;
2753 	struct ether_header *eh;
2754 	int ipv4, error;
2755 
2756 	sc = ifp->if_softc;
2757 	eh = mtod(m, struct ether_header *);
2758 	fe = NULL;
2759 	mcifp = NULL;
2760 
2761 	ETHER_BPF_MTAP(ifp, m);
2762 
2763 	VXLAN_RLOCK(sc, &tracker);
2764 	M_SETFIB(m, sc->vxl_fibnum);
2765 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2766 		VXLAN_RUNLOCK(sc, &tracker);
2767 		m_freem(m);
2768 		return (ENETDOWN);
2769 	}
2770 	if (__predict_false(if_tunnel_check_nesting(ifp, m, MTAG_VXLAN_LOOP,
2771 	    max_vxlan_nesting) != 0)) {
2772 		VXLAN_RUNLOCK(sc, &tracker);
2773 		m_freem(m);
2774 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2775 		return (ELOOP);
2776 	}
2777 
2778 	if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
2779 		fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
2780 	if (fe == NULL)
2781 		fe = &sc->vxl_default_fe;
2782 	vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
2783 
2784 	ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
2785 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2786 		mcifp = vxlan_multicast_if_ref(sc, ipv4);
2787 
2788 	VXLAN_ACQUIRE(sc);
2789 	VXLAN_RUNLOCK(sc, &tracker);
2790 
2791 	if (ipv4 != 0)
2792 		error = vxlan_encap4(sc, &vxlsa, m);
2793 	else
2794 		error = vxlan_encap6(sc, &vxlsa, m);
2795 
2796 	vxlan_release(sc);
2797 	if (mcifp != NULL)
2798 		if_rele(mcifp);
2799 
2800 	return (error);
2801 }
2802 
2803 static void
2804 vxlan_qflush(struct ifnet *ifp __unused)
2805 {
2806 }
2807 
2808 static bool
2809 vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
2810     const struct sockaddr *srcsa, void *xvso)
2811 {
2812 	struct vxlan_socket *vso;
2813 	struct vxlan_header *vxh, vxlanhdr;
2814 	uint32_t vni;
2815 	int error __unused;
2816 
2817 	M_ASSERTPKTHDR(m);
2818 	vso = xvso;
2819 	offset += sizeof(struct udphdr);
2820 
2821 	if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
2822 		goto out;
2823 
2824 	if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
2825 		m_copydata(m, offset, sizeof(struct vxlan_header),
2826 		    (caddr_t) &vxlanhdr);
2827 		vxh = &vxlanhdr;
2828 	} else
2829 		vxh = mtodo(m, offset);
2830 
2831 	/*
2832 	 * Drop if there is a reserved bit set in either the flags or VNI
2833 	 * fields of the header. This goes against the specification, but
2834 	 * a bit set may indicate an unsupported new feature. This matches
2835 	 * the behavior of the Linux implementation.
2836 	 */
2837 	if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
2838 	    vxh->vxlh_vni & ~VXLAN_VNI_MASK)
2839 		goto out;
2840 
2841 	vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
2842 
2843 	/* Adjust to the start of the inner Ethernet frame. */
2844 	m_adj_decap(m, offset + sizeof(struct vxlan_header));
2845 
2846 	error = vxlan_input(vso, vni, &m, srcsa);
2847 	MPASS(error != 0 || m == NULL);
2848 
2849 out:
2850 	if (m != NULL)
2851 		m_freem(m);
2852 
2853 	return (true);
2854 }
2855 
2856 static int
2857 vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
2858     const struct sockaddr *sa)
2859 {
2860 	struct vxlan_softc *sc;
2861 	struct ifnet *ifp;
2862 	struct mbuf *m;
2863 	struct ether_header *eh;
2864 	int error;
2865 
2866 	m = *m0;
2867 
2868 	if (m->m_pkthdr.len < ETHER_HDR_LEN)
2869 		return (EINVAL);
2870 
2871 	sc = vxlan_socket_lookup_softc(vso, vni);
2872 	if (sc == NULL)
2873 		return (ENOENT);
2874 
2875 	ifp = sc->vxl_ifp;
2876 	if (m->m_len < ETHER_HDR_LEN &&
2877 	    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
2878 		*m0 = NULL;
2879 		error = ENOBUFS;
2880 		goto out;
2881 	}
2882 	eh = mtod(m, struct ether_header *);
2883 
2884 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2885 		error = ENETDOWN;
2886 		goto out;
2887 	} else if (ifp == m->m_pkthdr.rcvif) {
2888 		/* XXX Does not catch more complex loops. */
2889 		error = EDEADLK;
2890 		goto out;
2891 	}
2892 
2893 	if (sc->vxl_flags & VXLAN_FLAG_LEARN)
2894 		vxlan_ftable_learn(sc, sa, eh->ether_shost);
2895 
2896 	m_clrprotoflags(m);
2897 	m->m_pkthdr.rcvif = ifp;
2898 	M_SETFIB(m, ifp->if_fib);
2899 	if (((ifp->if_capenable & IFCAP_RXCSUM &&
2900 	    m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
2901 	    (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2902 	    !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
2903 		uint32_t csum_flags = 0;
2904 
2905 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
2906 			csum_flags |= CSUM_L3_CALC;
2907 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
2908 			csum_flags |= CSUM_L3_VALID;
2909 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
2910 			csum_flags |= CSUM_L4_CALC;
2911 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
2912 			csum_flags |= CSUM_L4_VALID;
2913 		m->m_pkthdr.csum_flags = csum_flags;
2914 		counter_u64_add(sc->vxl_stats.rxcsum, 1);
2915 	} else {
2916 		/* clear everything */
2917 		m->m_pkthdr.csum_flags = 0;
2918 		m->m_pkthdr.csum_data = 0;
2919 	}
2920 
2921 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2922 	(*ifp->if_input)(ifp, m);
2923 	*m0 = NULL;
2924 	error = 0;
2925 
2926 out:
2927 	vxlan_release(sc);
2928 	return (error);
2929 }
2930 
2931 static void
2932 vxlan_stats_alloc(struct vxlan_softc *sc)
2933 {
2934 	struct vxlan_statistics *stats = &sc->vxl_stats;
2935 
2936 	stats->txcsum = counter_u64_alloc(M_WAITOK);
2937 	stats->tso = counter_u64_alloc(M_WAITOK);
2938 	stats->rxcsum = counter_u64_alloc(M_WAITOK);
2939 }
2940 
2941 static void
2942 vxlan_stats_free(struct vxlan_softc *sc)
2943 {
2944 	struct vxlan_statistics *stats = &sc->vxl_stats;
2945 
2946 	counter_u64_free(stats->txcsum);
2947 	counter_u64_free(stats->tso);
2948 	counter_u64_free(stats->rxcsum);
2949 }
2950 
2951 static void
2952 vxlan_set_default_config(struct vxlan_softc *sc)
2953 {
2954 
2955 	sc->vxl_flags |= VXLAN_FLAG_LEARN;
2956 
2957 	sc->vxl_vni = VXLAN_VNI_MAX;
2958 	sc->vxl_ttl = IPDEFTTL;
2959 
2960 	if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
2961 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
2962 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
2963 	} else {
2964 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2965 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2966 	}
2967 
2968 	sc->vxl_min_port = V_ipport_firstauto;
2969 	sc->vxl_max_port = V_ipport_lastauto;
2970 
2971 	sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
2972 	sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
2973 }
2974 
2975 static int
2976 vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
2977 {
2978 
2979 #ifndef INET
2980 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
2981 	    VXLAN_PARAM_WITH_REMOTE_ADDR4))
2982 		return (EAFNOSUPPORT);
2983 #endif
2984 
2985 #ifndef INET6
2986 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
2987 	    VXLAN_PARAM_WITH_REMOTE_ADDR6))
2988 		return (EAFNOSUPPORT);
2989 #else
2990 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
2991 		int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
2992 		if (error)
2993 			return (error);
2994 	}
2995 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
2996 		int error = vxlan_sockaddr_in6_embedscope(
2997 		   &vxlp->vxlp_remote_sa);
2998 		if (error)
2999 			return (error);
3000 	}
3001 #endif
3002 
3003 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
3004 		if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
3005 			sc->vxl_vni = vxlp->vxlp_vni;
3006 	}
3007 
3008 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
3009 		sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
3010 		sc->vxl_src_addr.in4.sin_family = AF_INET;
3011 		sc->vxl_src_addr.in4.sin_addr =
3012 		    vxlp->vxlp_local_sa.in4.sin_addr;
3013 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
3014 		sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3015 		sc->vxl_src_addr.in6.sin6_family = AF_INET6;
3016 		sc->vxl_src_addr.in6.sin6_addr =
3017 		    vxlp->vxlp_local_sa.in6.sin6_addr;
3018 	}
3019 
3020 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
3021 		sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
3022 		sc->vxl_dst_addr.in4.sin_family = AF_INET;
3023 		sc->vxl_dst_addr.in4.sin_addr =
3024 		    vxlp->vxlp_remote_sa.in4.sin_addr;
3025 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
3026 		sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3027 		sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
3028 		sc->vxl_dst_addr.in6.sin6_addr =
3029 		    vxlp->vxlp_remote_sa.in6.sin6_addr;
3030 	}
3031 
3032 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
3033 		sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
3034 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
3035 		sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
3036 
3037 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
3038 		if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
3039 			sc->vxl_min_port = vxlp->vxlp_min_port;
3040 			sc->vxl_max_port = vxlp->vxlp_max_port;
3041 		}
3042 	}
3043 
3044 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
3045 		strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
3046 
3047 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
3048 		if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
3049 			sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
3050 	}
3051 
3052 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
3053 		if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
3054 			sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
3055 	}
3056 
3057 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
3058 		if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
3059 			sc->vxl_ttl = vxlp->vxlp_ttl;
3060 	}
3061 
3062 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
3063 		if (vxlp->vxlp_learn == 0)
3064 			sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
3065 	}
3066 
3067 	return (0);
3068 }
3069 
3070 static int
3071 vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
3072 {
3073 	int mask = reqcap ^ ifp->if_capenable;
3074 
3075 	/* Disable TSO if tx checksums are disabled. */
3076 	if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
3077 	    reqcap & IFCAP_TSO4) {
3078 		reqcap &= ~IFCAP_TSO4;
3079 		if_printf(ifp, "tso4 disabled due to -txcsum.\n");
3080 	}
3081 	if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
3082 	    reqcap & IFCAP_TSO6) {
3083 		reqcap &= ~IFCAP_TSO6;
3084 		if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
3085 	}
3086 
3087 	/* Do not enable TSO if tx checksums are disabled. */
3088 	if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
3089 	    !(reqcap & IFCAP_TXCSUM)) {
3090 		if_printf(ifp, "enable txcsum first.\n");
3091 		return (EAGAIN);
3092 	}
3093 	if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
3094 	    !(reqcap & IFCAP_TXCSUM_IPV6)) {
3095 		if_printf(ifp, "enable txcsum6 first.\n");
3096 		return (EAGAIN);
3097 	}
3098 
3099 	sc->vxl_reqcap = reqcap;
3100 	return (0);
3101 }
3102 
3103 /*
3104  * A VXLAN interface inherits the capabilities of the vxlandev or the interface
3105  * hosting the vxlanlocal address.
3106  */
3107 static void
3108 vxlan_set_hwcaps(struct vxlan_softc *sc)
3109 {
3110 	struct epoch_tracker et;
3111 	struct ifnet *p;
3112 	struct ifaddr *ifa;
3113 	u_long hwa;
3114 	int cap, ena;
3115 	bool rel;
3116 	struct ifnet *ifp = sc->vxl_ifp;
3117 
3118 	/* reset caps */
3119 	ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
3120 	ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
3121 	ifp->if_hwassist = 0;
3122 
3123 	NET_EPOCH_ENTER(et);
3124 	CURVNET_SET(ifp->if_vnet);
3125 
3126 	rel = false;
3127 	p = NULL;
3128 	if (sc->vxl_mc_ifname[0] != '\0') {
3129 		rel = true;
3130 		p = ifunit_ref(sc->vxl_mc_ifname);
3131 	} else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
3132 		if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
3133 			struct sockaddr_in in4 = sc->vxl_src_addr.in4;
3134 
3135 			in4.sin_port = 0;
3136 			ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
3137 			if (ifa != NULL)
3138 				p = ifa->ifa_ifp;
3139 		} else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
3140 			struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
3141 
3142 			in6.sin6_port = 0;
3143 			ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
3144 			if (ifa != NULL)
3145 				p = ifa->ifa_ifp;
3146 		}
3147 	}
3148 	if (p == NULL)
3149 		goto done;
3150 
3151 	cap = ena = hwa = 0;
3152 
3153 	/* checksum offload */
3154 	if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
3155 		cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3156 	if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
3157 		ena |= sc->vxl_reqcap & p->if_capenable &
3158 		    (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3159 		if (ena & IFCAP_TXCSUM) {
3160 			if (p->if_hwassist & CSUM_INNER_IP)
3161 				hwa |= CSUM_IP;
3162 			if (p->if_hwassist & CSUM_INNER_IP_UDP)
3163 				hwa |= CSUM_IP_UDP;
3164 			if (p->if_hwassist & CSUM_INNER_IP_TCP)
3165 				hwa |= CSUM_IP_TCP;
3166 		}
3167 		if (ena & IFCAP_TXCSUM_IPV6) {
3168 			if (p->if_hwassist & CSUM_INNER_IP6_UDP)
3169 				hwa |= CSUM_IP6_UDP;
3170 			if (p->if_hwassist & CSUM_INNER_IP6_TCP)
3171 				hwa |= CSUM_IP6_TCP;
3172 		}
3173 	}
3174 
3175 	/* hardware TSO */
3176 	if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
3177 		cap |= p->if_capabilities & IFCAP_TSO;
3178 		if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
3179 			ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
3180 		else
3181 			ifp->if_hw_tsomax = p->if_hw_tsomax;
3182 		/* XXX: tsomaxsegcount decrement is cxgbe specific  */
3183 		ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
3184 		ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
3185 	}
3186 	if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
3187 		ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
3188 		if (ena & IFCAP_TSO) {
3189 			if (p->if_hwassist & CSUM_INNER_IP_TSO)
3190 				hwa |= CSUM_IP_TSO;
3191 			if (p->if_hwassist & CSUM_INNER_IP6_TSO)
3192 				hwa |= CSUM_IP6_TSO;
3193 		}
3194 	}
3195 
3196 	ifp->if_capabilities |= cap;
3197 	ifp->if_capenable |= ena;
3198 	ifp->if_hwassist |= hwa;
3199 	if (rel)
3200 		if_rele(p);
3201 done:
3202 	CURVNET_RESTORE();
3203 	NET_EPOCH_EXIT(et);
3204 }
3205 
3206 static int
3207 vxlan_clone_create(struct if_clone *ifc, char *name, size_t len,
3208     struct ifc_data *ifd, struct ifnet **ifpp)
3209 {
3210 	struct vxlan_softc *sc;
3211 	struct ifnet *ifp;
3212 	struct ifvxlanparam vxlp;
3213 	int error;
3214 
3215 	sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
3216 	sc->vxl_unit = ifd->unit;
3217 	sc->vxl_fibnum = curthread->td_proc->p_fibnum;
3218 	vxlan_set_default_config(sc);
3219 
3220 	if (ifd->params != NULL) {
3221 		error = ifc_copyin(ifd, &vxlp, sizeof(vxlp));
3222 		if (error)
3223 			goto fail;
3224 
3225 		error = vxlan_set_user_config(sc, &vxlp);
3226 		if (error)
3227 			goto fail;
3228 	}
3229 
3230 	vxlan_stats_alloc(sc);
3231 	ifp = if_alloc(IFT_ETHER);
3232 	sc->vxl_ifp = ifp;
3233 	rm_init(&sc->vxl_lock, "vxlanrm");
3234 	callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
3235 	sc->vxl_port_hash_key = arc4random();
3236 	vxlan_ftable_init(sc);
3237 
3238 	vxlan_sysctl_setup(sc);
3239 
3240 	ifp->if_softc = sc;
3241 	if_initname(ifp, vxlan_name, ifd->unit);
3242 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3243 	ifp->if_init = vxlan_init;
3244 	ifp->if_ioctl = vxlan_ioctl;
3245 	ifp->if_transmit = vxlan_transmit;
3246 	ifp->if_qflush = vxlan_qflush;
3247 	ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
3248 	ifp->if_capenable = VXLAN_BASIC_IFCAPS;
3249 	sc->vxl_reqcap = -1;
3250 	vxlan_set_hwcaps(sc);
3251 
3252 	ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
3253 	ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
3254 	ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
3255 
3256 	ether_gen_addr(ifp, &sc->vxl_hwaddr);
3257 	ether_ifattach(ifp, sc->vxl_hwaddr.octet);
3258 
3259 	ifp->if_baudrate = 0;
3260 
3261 	VXLAN_WLOCK(sc);
3262 	vxlan_setup_interface_hdrlen(sc);
3263 	VXLAN_WUNLOCK(sc);
3264 	*ifpp = ifp;
3265 
3266 	return (0);
3267 
3268 fail:
3269 	free(sc, M_VXLAN);
3270 	return (error);
3271 }
3272 
3273 static int
3274 vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
3275 {
3276 	struct vxlan_softc *sc;
3277 
3278 	sc = ifp->if_softc;
3279 
3280 	vxlan_teardown(sc);
3281 
3282 	vxlan_ftable_flush(sc, 1);
3283 
3284 	ether_ifdetach(ifp);
3285 	if_free(ifp);
3286 	ifmedia_removeall(&sc->vxl_media);
3287 
3288 	vxlan_ftable_fini(sc);
3289 
3290 	vxlan_sysctl_destroy(sc);
3291 	rm_destroy(&sc->vxl_lock);
3292 	vxlan_stats_free(sc);
3293 	free(sc, M_VXLAN);
3294 
3295 	return (0);
3296 }
3297 
3298 /* BMV: Taken from if_bridge. */
3299 static uint32_t
3300 vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
3301 {
3302 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
3303 
3304 	b += addr[5] << 8;
3305 	b += addr[4];
3306 	a += addr[3] << 24;
3307 	a += addr[2] << 16;
3308 	a += addr[1] << 8;
3309 	a += addr[0];
3310 
3311 /*
3312  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3313  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3314  */
3315 #define	mix(a, b, c)							\
3316 do {									\
3317 	a -= b; a -= c; a ^= (c >> 13);					\
3318 	b -= c; b -= a; b ^= (a << 8);					\
3319 	c -= a; c -= b; c ^= (b >> 13);					\
3320 	a -= b; a -= c; a ^= (c >> 12);					\
3321 	b -= c; b -= a; b ^= (a << 16);					\
3322 	c -= a; c -= b; c ^= (b >> 5);					\
3323 	a -= b; a -= c; a ^= (c >> 3);					\
3324 	b -= c; b -= a; b ^= (a << 10);					\
3325 	c -= a; c -= b; c ^= (b >> 15);					\
3326 } while (0)
3327 
3328 	mix(a, b, c);
3329 
3330 #undef mix
3331 
3332 	return (c);
3333 }
3334 
3335 static int
3336 vxlan_media_change(struct ifnet *ifp)
3337 {
3338 
3339 	/* Ignore. */
3340 	return (0);
3341 }
3342 
3343 static void
3344 vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3345 {
3346 
3347 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
3348 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3349 }
3350 
3351 static int
3352 vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
3353     const struct sockaddr *sa)
3354 {
3355 
3356 	return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
3357 }
3358 
3359 static void
3360 vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
3361     const struct sockaddr *sa)
3362 {
3363 
3364 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3365 	bzero(vxladdr, sizeof(*vxladdr));
3366 
3367 	if (sa->sa_family == AF_INET) {
3368 		vxladdr->in4 = *satoconstsin(sa);
3369 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3370 	} else if (sa->sa_family == AF_INET6) {
3371 		vxladdr->in6 = *satoconstsin6(sa);
3372 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3373 	}
3374 }
3375 
3376 static int
3377 vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
3378     const struct sockaddr *sa)
3379 {
3380 	int equal;
3381 
3382 	if (sa->sa_family == AF_INET) {
3383 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3384 		equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
3385 	} else if (sa->sa_family == AF_INET6) {
3386 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3387 		equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
3388 	} else
3389 		equal = 0;
3390 
3391 	return (equal);
3392 }
3393 
3394 static void
3395 vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
3396     const struct sockaddr *sa)
3397 {
3398 
3399 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3400 
3401 	if (sa->sa_family == AF_INET) {
3402 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3403 		vxladdr->in4.sin_family = AF_INET;
3404 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3405 		vxladdr->in4.sin_addr = *in4;
3406 	} else if (sa->sa_family == AF_INET6) {
3407 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3408 		vxladdr->in6.sin6_family = AF_INET6;
3409 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3410 		vxladdr->in6.sin6_addr = *in6;
3411 	}
3412 }
3413 
3414 static int
3415 vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
3416 {
3417 	const struct sockaddr *sa;
3418 	int supported;
3419 
3420 	sa = &vxladdr->sa;
3421 	supported = 0;
3422 
3423 	if (sa->sa_family == AF_UNSPEC && unspec != 0) {
3424 		supported = 1;
3425 	} else if (sa->sa_family == AF_INET) {
3426 #ifdef INET
3427 		supported = 1;
3428 #endif
3429 	} else if (sa->sa_family == AF_INET6) {
3430 #ifdef INET6
3431 		supported = 1;
3432 #endif
3433 	}
3434 
3435 	return (supported);
3436 }
3437 
3438 static int
3439 vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
3440 {
3441 	const struct sockaddr *sa;
3442 	int any;
3443 
3444 	sa = &vxladdr->sa;
3445 
3446 	if (sa->sa_family == AF_INET) {
3447 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3448 		any = in4->s_addr == INADDR_ANY;
3449 	} else if (sa->sa_family == AF_INET6) {
3450 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3451 		any = IN6_IS_ADDR_UNSPECIFIED(in6);
3452 	} else
3453 		any = -1;
3454 
3455 	return (any);
3456 }
3457 
3458 static int
3459 vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
3460 {
3461 	const struct sockaddr *sa;
3462 	int mc;
3463 
3464 	sa = &vxladdr->sa;
3465 
3466 	if (sa->sa_family == AF_INET) {
3467 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3468 		mc = IN_MULTICAST(ntohl(in4->s_addr));
3469 	} else if (sa->sa_family == AF_INET6) {
3470 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3471 		mc = IN6_IS_ADDR_MULTICAST(in6);
3472 	} else
3473 		mc = -1;
3474 
3475 	return (mc);
3476 }
3477 
3478 static int
3479 vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
3480 {
3481 	int error;
3482 
3483 	MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
3484 #ifdef INET6
3485 	error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
3486 #else
3487 	error = EAFNOSUPPORT;
3488 #endif
3489 
3490 	return (error);
3491 }
3492 
3493 static int
3494 vxlan_can_change_config(struct vxlan_softc *sc)
3495 {
3496 	struct ifnet *ifp;
3497 
3498 	ifp = sc->vxl_ifp;
3499 	VXLAN_LOCK_ASSERT(sc);
3500 
3501 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3502 		return (0);
3503 	if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
3504 		return (0);
3505 
3506 	return (1);
3507 }
3508 
3509 static int
3510 vxlan_check_vni(uint32_t vni)
3511 {
3512 
3513 	return (vni >= VXLAN_VNI_MAX);
3514 }
3515 
3516 static int
3517 vxlan_check_ttl(int ttl)
3518 {
3519 
3520 	return (ttl > MAXTTL);
3521 }
3522 
3523 static int
3524 vxlan_check_ftable_timeout(uint32_t timeout)
3525 {
3526 
3527 	return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
3528 }
3529 
3530 static int
3531 vxlan_check_ftable_max(uint32_t max)
3532 {
3533 
3534 	return (max > VXLAN_FTABLE_MAX);
3535 }
3536 
3537 static void
3538 vxlan_sysctl_setup(struct vxlan_softc *sc)
3539 {
3540 	struct sysctl_ctx_list *ctx;
3541 	struct sysctl_oid *node;
3542 	struct vxlan_statistics *stats;
3543 	char namebuf[8];
3544 
3545 	ctx = &sc->vxl_sysctl_ctx;
3546 	stats = &sc->vxl_stats;
3547 	snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
3548 
3549 	sysctl_ctx_init(ctx);
3550 	sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
3551 	    SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
3552 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3553 
3554 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3555 	    OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3556 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
3557 	    CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
3558 	    "Number of entries in forwarding table");
3559 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
3560 	     CTLFLAG_RD, &sc->vxl_ftable_max, 0,
3561 	    "Maximum number of entries allowed in forwarding table");
3562 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
3563 	    CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
3564 	    "Number of seconds between prunes of the forwarding table");
3565 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
3566 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
3567 	    sc, 0, vxlan_ftable_sysctl_dump, "A",
3568 	    "Dump the forwarding table entries");
3569 
3570 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3571 	    OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3572 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3573 	    "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
3574 	    "Fowarding table reached maximum entries");
3575 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3576 	    "ftable_lock_upgrade_failed", CTLFLAG_RD,
3577 	    &stats->ftable_lock_upgrade_failed, 0,
3578 	    "Forwarding table update required lock upgrade");
3579 
3580 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
3581 	    CTLFLAG_RD, &stats->txcsum,
3582 	    "# of times hardware assisted with tx checksum");
3583 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
3584 	    CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
3585 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
3586 	    CTLFLAG_RD, &stats->rxcsum,
3587 	    "# of times hardware assisted with rx checksum");
3588 }
3589 
3590 static void
3591 vxlan_sysctl_destroy(struct vxlan_softc *sc)
3592 {
3593 
3594 	sysctl_ctx_free(&sc->vxl_sysctl_ctx);
3595 	sc->vxl_sysctl_node = NULL;
3596 }
3597 
3598 static int
3599 vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
3600 {
3601 	char path[64];
3602 
3603 	snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
3604 	    sc->vxl_unit, knob);
3605 	TUNABLE_INT_FETCH(path, &def);
3606 
3607 	return (def);
3608 }
3609 
3610 static void
3611 vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
3612 {
3613 	struct vxlan_softc_head list = LIST_HEAD_INITIALIZER(list);
3614 	struct vxlan_socket *vso;
3615 	struct vxlan_softc *sc, *tsc;
3616 
3617 	if (ifp->if_flags & IFF_RENAMING)
3618 		return;
3619 	if ((ifp->if_flags & IFF_MULTICAST) == 0)
3620 		return;
3621 
3622 	VXLAN_LIST_LOCK();
3623 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
3624 		vxlan_socket_ifdetach(vso, ifp, &list);
3625 	VXLAN_LIST_UNLOCK();
3626 
3627 	LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
3628 		LIST_REMOVE(sc, vxl_ifdetach_list);
3629 
3630 		sx_xlock(&vxlan_sx);
3631 		VXLAN_WLOCK(sc);
3632 		if (sc->vxl_flags & VXLAN_FLAG_INIT)
3633 			vxlan_init_wait(sc);
3634 		vxlan_teardown_locked(sc);
3635 		sx_xunlock(&vxlan_sx);
3636 	}
3637 }
3638 
3639 static void
3640 vxlan_load(void)
3641 {
3642 
3643 	mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
3644 	vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
3645 	    vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
3646 
3647 	struct if_clone_addreq req = {
3648 		.create_f = vxlan_clone_create,
3649 		.destroy_f = vxlan_clone_destroy,
3650 		.flags = IFC_F_AUTOUNIT,
3651 	};
3652 	vxlan_cloner = ifc_attach_cloner(vxlan_name, &req);
3653 }
3654 
3655 static void
3656 vxlan_unload(void)
3657 {
3658 
3659 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
3660 	    vxlan_ifdetach_event_tag);
3661 	ifc_detach_cloner(vxlan_cloner);
3662 	mtx_destroy(&vxlan_list_mtx);
3663 	MPASS(LIST_EMPTY(&vxlan_socket_list));
3664 }
3665 
3666 static int
3667 vxlan_modevent(module_t mod, int type, void *unused)
3668 {
3669 	int error;
3670 
3671 	error = 0;
3672 
3673 	switch (type) {
3674 	case MOD_LOAD:
3675 		vxlan_load();
3676 		break;
3677 	case MOD_UNLOAD:
3678 		vxlan_unload();
3679 		break;
3680 	default:
3681 		error = ENOTSUP;
3682 		break;
3683 	}
3684 
3685 	return (error);
3686 }
3687 
3688 static moduledata_t vxlan_mod = {
3689 	"if_vxlan",
3690 	vxlan_modevent,
3691 	0
3692 };
3693 
3694 DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3695 MODULE_VERSION(if_vxlan, 1);
3696