xref: /freebsd/sys/net/if_vxlan.c (revision 657729a89dd578d8cfc70d6616f5c65a48a8b33a)
1 /*-
2  * Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  * Copyright (c) 2020, Chelsio Communications.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/hash.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/rmlock.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/queue.h>
47 #include <sys/sbuf.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53 
54 #include <net/bpf.h>
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_var.h>
58 #include <net/if_clone.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_vxlan.h>
63 #include <net/netisr.h>
64 #include <net/route.h>
65 #include <net/route/nhop.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
73 #include <netinet/ip_var.h>
74 #include <netinet/udp.h>
75 #include <netinet/udp_var.h>
76 #include <netinet/in_fib.h>
77 #include <netinet6/in6_fib.h>
78 
79 #include <netinet6/ip6_var.h>
80 #include <netinet6/scope6_var.h>
81 
82 struct vxlan_softc;
83 LIST_HEAD(vxlan_softc_head, vxlan_softc);
84 
85 struct sx vxlan_sx;
86 SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
87 
88 struct vxlan_socket_mc_info {
89 	union vxlan_sockaddr		 vxlsomc_saddr;
90 	union vxlan_sockaddr		 vxlsomc_gaddr;
91 	int				 vxlsomc_ifidx;
92 	int				 vxlsomc_users;
93 };
94 
95 /*
96  * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
97  */
98 #define VXLAN_MAX_MTU	(IP_MAXPACKET - \
99 		60 /* Maximum IPv4 header len */ - \
100 		sizeof(struct udphdr) - \
101 		sizeof(struct vxlan_header) - \
102 		ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
103 #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
104 
105 #define VXLAN_SO_MC_MAX_GROUPS		32
106 
107 #define VXLAN_SO_VNI_HASH_SHIFT		6
108 #define VXLAN_SO_VNI_HASH_SIZE		(1 << VXLAN_SO_VNI_HASH_SHIFT)
109 #define VXLAN_SO_VNI_HASH(_vni)		((_vni) % VXLAN_SO_VNI_HASH_SIZE)
110 
111 struct vxlan_socket {
112 	struct socket			*vxlso_sock;
113 	struct rmlock			 vxlso_lock;
114 	u_int				 vxlso_refcnt;
115 	union vxlan_sockaddr		 vxlso_laddr;
116 	LIST_ENTRY(vxlan_socket)	 vxlso_entry;
117 	struct vxlan_softc_head		 vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
118 	struct vxlan_socket_mc_info	 vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
119 };
120 
121 #define VXLAN_SO_RLOCK(_vso, _p)	rm_rlock(&(_vso)->vxlso_lock, (_p))
122 #define VXLAN_SO_RUNLOCK(_vso, _p)	rm_runlock(&(_vso)->vxlso_lock, (_p))
123 #define VXLAN_SO_WLOCK(_vso)		rm_wlock(&(_vso)->vxlso_lock)
124 #define VXLAN_SO_WUNLOCK(_vso)		rm_wunlock(&(_vso)->vxlso_lock)
125 #define VXLAN_SO_LOCK_ASSERT(_vso) \
126     rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
127 #define VXLAN_SO_LOCK_WASSERT(_vso) \
128     rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
129 
130 #define VXLAN_SO_ACQUIRE(_vso)		refcount_acquire(&(_vso)->vxlso_refcnt)
131 #define VXLAN_SO_RELEASE(_vso)		refcount_release(&(_vso)->vxlso_refcnt)
132 
133 struct vxlan_ftable_entry {
134 	LIST_ENTRY(vxlan_ftable_entry)	 vxlfe_hash;
135 	uint16_t			 vxlfe_flags;
136 	uint8_t				 vxlfe_mac[ETHER_ADDR_LEN];
137 	union vxlan_sockaddr		 vxlfe_raddr;
138 	time_t				 vxlfe_expire;
139 };
140 
141 #define VXLAN_FE_FLAG_DYNAMIC		0x01
142 #define VXLAN_FE_FLAG_STATIC		0x02
143 
144 #define VXLAN_FE_IS_DYNAMIC(_fe) \
145     ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
146 
147 #define VXLAN_SC_FTABLE_SHIFT		9
148 #define VXLAN_SC_FTABLE_SIZE		(1 << VXLAN_SC_FTABLE_SHIFT)
149 #define VXLAN_SC_FTABLE_MASK		(VXLAN_SC_FTABLE_SIZE - 1)
150 #define VXLAN_SC_FTABLE_HASH(_sc, _mac)	\
151     (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
152 
153 LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
154 
155 struct vxlan_statistics {
156 	uint32_t	ftable_nospace;
157 	uint32_t	ftable_lock_upgrade_failed;
158 	counter_u64_t	txcsum;
159 	counter_u64_t	tso;
160 	counter_u64_t	rxcsum;
161 };
162 
163 struct vxlan_softc {
164 	struct ifnet			*vxl_ifp;
165 	int				 vxl_reqcap;
166 	u_int				 vxl_fibnum;
167 	struct vxlan_socket		*vxl_sock;
168 	uint32_t			 vxl_vni;
169 	union vxlan_sockaddr		 vxl_src_addr;
170 	union vxlan_sockaddr		 vxl_dst_addr;
171 	uint32_t			 vxl_flags;
172 #define VXLAN_FLAG_INIT		0x0001
173 #define VXLAN_FLAG_TEARDOWN	0x0002
174 #define VXLAN_FLAG_LEARN	0x0004
175 #define VXLAN_FLAG_USER_MTU	0x0008
176 
177 	uint32_t			 vxl_port_hash_key;
178 	uint16_t			 vxl_min_port;
179 	uint16_t			 vxl_max_port;
180 	uint8_t				 vxl_ttl;
181 
182 	/* Lookup table from MAC address to forwarding entry. */
183 	uint32_t			 vxl_ftable_cnt;
184 	uint32_t			 vxl_ftable_max;
185 	uint32_t			 vxl_ftable_timeout;
186 	uint32_t			 vxl_ftable_hash_key;
187 	struct vxlan_ftable_head	*vxl_ftable;
188 
189 	/* Derived from vxl_dst_addr. */
190 	struct vxlan_ftable_entry	 vxl_default_fe;
191 
192 	struct ip_moptions		*vxl_im4o;
193 	struct ip6_moptions		*vxl_im6o;
194 
195 	struct rmlock			 vxl_lock;
196 	volatile u_int			 vxl_refcnt;
197 
198 	int				 vxl_unit;
199 	int				 vxl_vso_mc_index;
200 	struct vxlan_statistics		 vxl_stats;
201 	struct sysctl_oid		*vxl_sysctl_node;
202 	struct sysctl_ctx_list		 vxl_sysctl_ctx;
203 	struct callout			 vxl_callout;
204 	struct ether_addr		 vxl_hwaddr;
205 	int				 vxl_mc_ifindex;
206 	struct ifnet			*vxl_mc_ifp;
207 	struct ifmedia 			 vxl_media;
208 	char				 vxl_mc_ifname[IFNAMSIZ];
209 	LIST_ENTRY(vxlan_softc)		 vxl_entry;
210 	LIST_ENTRY(vxlan_softc)		 vxl_ifdetach_list;
211 
212 	/* For rate limiting errors on the tx fast path. */
213 	struct timeval err_time;
214 	int err_pps;
215 };
216 
217 #define VXLAN_RLOCK(_sc, _p)	rm_rlock(&(_sc)->vxl_lock, (_p))
218 #define VXLAN_RUNLOCK(_sc, _p)	rm_runlock(&(_sc)->vxl_lock, (_p))
219 #define VXLAN_WLOCK(_sc)	rm_wlock(&(_sc)->vxl_lock)
220 #define VXLAN_WUNLOCK(_sc)	rm_wunlock(&(_sc)->vxl_lock)
221 #define VXLAN_LOCK_WOWNED(_sc)	rm_wowned(&(_sc)->vxl_lock)
222 #define VXLAN_LOCK_ASSERT(_sc)	rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
223 #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
224 #define VXLAN_UNLOCK(_sc, _p) do {		\
225     if (VXLAN_LOCK_WOWNED(_sc))			\
226 	VXLAN_WUNLOCK(_sc);			\
227     else					\
228 	VXLAN_RUNLOCK(_sc, _p);			\
229 } while (0)
230 
231 #define VXLAN_ACQUIRE(_sc)	refcount_acquire(&(_sc)->vxl_refcnt)
232 #define VXLAN_RELEASE(_sc)	refcount_release(&(_sc)->vxl_refcnt)
233 
234 #define	satoconstsin(sa)	((const struct sockaddr_in *)(sa))
235 #define	satoconstsin6(sa)	((const struct sockaddr_in6 *)(sa))
236 
237 struct vxlanudphdr {
238 	struct udphdr		vxlh_udp;
239 	struct vxlan_header	vxlh_hdr;
240 } __packed;
241 
242 static int	vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
243 static void	vxlan_ftable_init(struct vxlan_softc *);
244 static void	vxlan_ftable_fini(struct vxlan_softc *);
245 static void	vxlan_ftable_flush(struct vxlan_softc *, int);
246 static void	vxlan_ftable_expire(struct vxlan_softc *);
247 static int	vxlan_ftable_update_locked(struct vxlan_softc *,
248 		    const union vxlan_sockaddr *, const uint8_t *,
249 		    struct rm_priotracker *);
250 static int	vxlan_ftable_learn(struct vxlan_softc *,
251 		    const struct sockaddr *, const uint8_t *);
252 static int	vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
253 
254 static struct vxlan_ftable_entry *
255 		vxlan_ftable_entry_alloc(void);
256 static void	vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
257 static void	vxlan_ftable_entry_init(struct vxlan_softc *,
258 		    struct vxlan_ftable_entry *, const uint8_t *,
259 		    const struct sockaddr *, uint32_t);
260 static void	vxlan_ftable_entry_destroy(struct vxlan_softc *,
261 		    struct vxlan_ftable_entry *);
262 static int	vxlan_ftable_entry_insert(struct vxlan_softc *,
263 		    struct vxlan_ftable_entry *);
264 static struct vxlan_ftable_entry *
265 		vxlan_ftable_entry_lookup(struct vxlan_softc *,
266 		    const uint8_t *);
267 static void	vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
268 		    struct sbuf *);
269 
270 static struct vxlan_socket *
271 		vxlan_socket_alloc(const union vxlan_sockaddr *);
272 static void	vxlan_socket_destroy(struct vxlan_socket *);
273 static void	vxlan_socket_release(struct vxlan_socket *);
274 static struct vxlan_socket *
275 		vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
276 static void	vxlan_socket_insert(struct vxlan_socket *);
277 static int	vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
278 static int	vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
279 static int	vxlan_socket_create(struct ifnet *, int,
280 		    const union vxlan_sockaddr *, struct vxlan_socket **);
281 static void	vxlan_socket_ifdetach(struct vxlan_socket *,
282 		    struct ifnet *, struct vxlan_softc_head *);
283 
284 static struct vxlan_socket *
285 		vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
286 static int	vxlan_sockaddr_mc_info_match(
287 		    const struct vxlan_socket_mc_info *,
288 		    const union vxlan_sockaddr *,
289 		    const union vxlan_sockaddr *, int);
290 static int	vxlan_socket_mc_join_group(struct vxlan_socket *,
291 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
292 		    int *, union vxlan_sockaddr *);
293 static int	vxlan_socket_mc_leave_group(struct vxlan_socket *,
294 		    const union vxlan_sockaddr *,
295 		    const union vxlan_sockaddr *, int);
296 static int	vxlan_socket_mc_add_group(struct vxlan_socket *,
297 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
298 		    int, int *);
299 static void	vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
300 		    int);
301 
302 static struct vxlan_softc *
303 		vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
304 		    uint32_t);
305 static struct vxlan_softc *
306 		vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
307 static int	vxlan_socket_insert_softc(struct vxlan_socket *,
308 		    struct vxlan_softc *);
309 static void	vxlan_socket_remove_softc(struct vxlan_socket *,
310 		    struct vxlan_softc *);
311 
312 static struct ifnet *
313 		vxlan_multicast_if_ref(struct vxlan_softc *, int);
314 static void	vxlan_free_multicast(struct vxlan_softc *);
315 static int	vxlan_setup_multicast_interface(struct vxlan_softc *);
316 
317 static int	vxlan_setup_multicast(struct vxlan_softc *);
318 static int	vxlan_setup_socket(struct vxlan_softc *);
319 #ifdef INET6
320 static void	vxlan_setup_zero_checksum_port(struct vxlan_softc *);
321 #endif
322 static void	vxlan_setup_interface_hdrlen(struct vxlan_softc *);
323 static int	vxlan_valid_init_config(struct vxlan_softc *);
324 static void	vxlan_init_wait(struct vxlan_softc *);
325 static void	vxlan_init_complete(struct vxlan_softc *);
326 static void	vxlan_init(void *);
327 static void	vxlan_release(struct vxlan_softc *);
328 static void	vxlan_teardown_wait(struct vxlan_softc *);
329 static void	vxlan_teardown_complete(struct vxlan_softc *);
330 static void	vxlan_teardown_locked(struct vxlan_softc *);
331 static void	vxlan_teardown(struct vxlan_softc *);
332 static void	vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
333 		    struct vxlan_softc_head *);
334 static void	vxlan_timer(void *);
335 
336 static int	vxlan_ctrl_get_config(struct vxlan_softc *, void *);
337 static int	vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
338 static int	vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
339 static int	vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
340 static int	vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
341 static int	vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
342 static int	vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
343 static int	vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
344 static int	vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
345 static int	vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
346 static int	vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
347 static int	vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
348 static int	vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
349 static int	vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
350 static int	vxlan_ctrl_flush(struct vxlan_softc *, void *);
351 static int	vxlan_ioctl_drvspec(struct vxlan_softc *,
352 		    struct ifdrv *, int);
353 static int	vxlan_ioctl_ifflags(struct vxlan_softc *);
354 static int	vxlan_ioctl(struct ifnet *, u_long, caddr_t);
355 
356 #if defined(INET) || defined(INET6)
357 static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
358 static void	vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
359 		    int, uint16_t, uint16_t);
360 #endif
361 static int	vxlan_encap4(struct vxlan_softc *,
362 		    const union vxlan_sockaddr *, struct mbuf *);
363 static int	vxlan_encap6(struct vxlan_softc *,
364 		    const union vxlan_sockaddr *, struct mbuf *);
365 static int	vxlan_transmit(struct ifnet *, struct mbuf *);
366 static void	vxlan_qflush(struct ifnet *);
367 static bool	vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
368 		    const struct sockaddr *, void *);
369 static int	vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
370 		    const struct sockaddr *);
371 
372 static int	vxlan_stats_alloc(struct vxlan_softc *);
373 static void	vxlan_stats_free(struct vxlan_softc *);
374 static void	vxlan_set_default_config(struct vxlan_softc *);
375 static int	vxlan_set_user_config(struct vxlan_softc *,
376 		     struct ifvxlanparam *);
377 static int	vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
378 static void	vxlan_set_hwcaps(struct vxlan_softc *);
379 static int	vxlan_clone_create(struct if_clone *, char *, size_t,
380 		    struct ifc_data *, struct ifnet **);
381 static int	vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
382 
383 static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
384 static int	vxlan_media_change(struct ifnet *);
385 static void	vxlan_media_status(struct ifnet *, struct ifmediareq *);
386 
387 static int	vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
388 		    const struct sockaddr *);
389 static void	vxlan_sockaddr_copy(union vxlan_sockaddr *,
390 		    const struct sockaddr *);
391 static int	vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
392 		    const struct sockaddr *);
393 static void	vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
394 		    const struct sockaddr *);
395 static int	vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
396 static int	vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
397 static int	vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
398 static int	vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
399 
400 static int	vxlan_can_change_config(struct vxlan_softc *);
401 static int	vxlan_check_vni(uint32_t);
402 static int	vxlan_check_ttl(int);
403 static int	vxlan_check_ftable_timeout(uint32_t);
404 static int	vxlan_check_ftable_max(uint32_t);
405 
406 static void	vxlan_sysctl_setup(struct vxlan_softc *);
407 static void	vxlan_sysctl_destroy(struct vxlan_softc *);
408 static int	vxlan_tunable_int(struct vxlan_softc *, const char *, int);
409 
410 static void	vxlan_ifdetach_event(void *, struct ifnet *);
411 static void	vxlan_load(void);
412 static void	vxlan_unload(void);
413 static int	vxlan_modevent(module_t, int, void *);
414 
415 static const char vxlan_name[] = "vxlan";
416 static MALLOC_DEFINE(M_VXLAN, vxlan_name,
417     "Virtual eXtensible LAN Interface");
418 static struct if_clone *vxlan_cloner;
419 
420 static struct mtx vxlan_list_mtx;
421 #define VXLAN_LIST_LOCK()	mtx_lock(&vxlan_list_mtx)
422 #define VXLAN_LIST_UNLOCK()	mtx_unlock(&vxlan_list_mtx)
423 
424 static LIST_HEAD(, vxlan_socket) vxlan_socket_list;
425 
426 static eventhandler_tag vxlan_ifdetach_event_tag;
427 
428 SYSCTL_DECL(_net_link);
429 SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
430     "Virtual eXtensible Local Area Network");
431 
432 static int vxlan_legacy_port = 0;
433 TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
434 static int vxlan_reuse_port = 0;
435 TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
436 
437 /* Default maximum number of addresses in the forwarding table. */
438 #ifndef VXLAN_FTABLE_MAX
439 #define VXLAN_FTABLE_MAX	2000
440 #endif
441 
442 /* Timeout (in seconds) of addresses learned in the forwarding table. */
443 #ifndef VXLAN_FTABLE_TIMEOUT
444 #define VXLAN_FTABLE_TIMEOUT	(20 * 60)
445 #endif
446 
447 /*
448  * Maximum timeout (in seconds) of addresses learned in the forwarding
449  * table.
450  */
451 #ifndef VXLAN_FTABLE_MAX_TIMEOUT
452 #define VXLAN_FTABLE_MAX_TIMEOUT	(60 * 60 * 24)
453 #endif
454 
455 /* Number of seconds between pruning attempts of the forwarding table. */
456 #ifndef VXLAN_FTABLE_PRUNE
457 #define VXLAN_FTABLE_PRUNE	(5 * 60)
458 #endif
459 
460 static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
461 
462 struct vxlan_control {
463 	int	(*vxlc_func)(struct vxlan_softc *, void *);
464 	int	vxlc_argsize;
465 	int	vxlc_flags;
466 #define VXLAN_CTRL_FLAG_COPYIN	0x01
467 #define VXLAN_CTRL_FLAG_COPYOUT	0x02
468 #define VXLAN_CTRL_FLAG_SUSER	0x04
469 };
470 
471 static const struct vxlan_control vxlan_control_table[] = {
472 	[VXLAN_CMD_GET_CONFIG] =
473 	    {	vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
474 		VXLAN_CTRL_FLAG_COPYOUT
475 	    },
476 
477 	[VXLAN_CMD_SET_VNI] =
478 	    {   vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
479 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
480 	    },
481 
482 	[VXLAN_CMD_SET_LOCAL_ADDR] =
483 	    {   vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
484 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
485 	    },
486 
487 	[VXLAN_CMD_SET_REMOTE_ADDR] =
488 	    {   vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
489 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
490 	    },
491 
492 	[VXLAN_CMD_SET_LOCAL_PORT] =
493 	    {   vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
494 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
495 	    },
496 
497 	[VXLAN_CMD_SET_REMOTE_PORT] =
498 	    {   vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
499 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
500 	    },
501 
502 	[VXLAN_CMD_SET_PORT_RANGE] =
503 	    {   vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
504 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
505 	    },
506 
507 	[VXLAN_CMD_SET_FTABLE_TIMEOUT] =
508 	    {	vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
509 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
510 	    },
511 
512 	[VXLAN_CMD_SET_FTABLE_MAX] =
513 	    {	vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
514 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
515 	    },
516 
517 	[VXLAN_CMD_SET_MULTICAST_IF] =
518 	    {	vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
519 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
520 	    },
521 
522 	[VXLAN_CMD_SET_TTL] =
523 	    {	vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
524 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
525 	    },
526 
527 	[VXLAN_CMD_SET_LEARN] =
528 	    {	vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
529 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
530 	    },
531 
532 	[VXLAN_CMD_FTABLE_ENTRY_ADD] =
533 	    {	vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
534 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
535 	    },
536 
537 	[VXLAN_CMD_FTABLE_ENTRY_REM] =
538 	    {	vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
539 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
540 	    },
541 
542 	[VXLAN_CMD_FLUSH] =
543 	    {   vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
544 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
545 	    },
546 };
547 
548 static const int vxlan_control_table_size = nitems(vxlan_control_table);
549 
550 static int
551 vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
552 {
553 	int i, d;
554 
555 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
556 		d = ((int)a[i]) - ((int)b[i]);
557 
558 	return (d);
559 }
560 
561 static void
562 vxlan_ftable_init(struct vxlan_softc *sc)
563 {
564 	int i;
565 
566 	sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
567 	    VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
568 
569 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
570 		LIST_INIT(&sc->vxl_ftable[i]);
571 	sc->vxl_ftable_hash_key = arc4random();
572 }
573 
574 static void
575 vxlan_ftable_fini(struct vxlan_softc *sc)
576 {
577 	int i;
578 
579 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
580 		KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
581 		    ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
582 	}
583 	MPASS(sc->vxl_ftable_cnt == 0);
584 
585 	free(sc->vxl_ftable, M_VXLAN);
586 	sc->vxl_ftable = NULL;
587 }
588 
589 static void
590 vxlan_ftable_flush(struct vxlan_softc *sc, int all)
591 {
592 	struct vxlan_ftable_entry *fe, *tfe;
593 	int i;
594 
595 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
596 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
597 			if (all || VXLAN_FE_IS_DYNAMIC(fe))
598 				vxlan_ftable_entry_destroy(sc, fe);
599 		}
600 	}
601 }
602 
603 static void
604 vxlan_ftable_expire(struct vxlan_softc *sc)
605 {
606 	struct vxlan_ftable_entry *fe, *tfe;
607 	int i;
608 
609 	VXLAN_LOCK_WASSERT(sc);
610 
611 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
612 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
613 			if (VXLAN_FE_IS_DYNAMIC(fe) &&
614 			    time_uptime >= fe->vxlfe_expire)
615 				vxlan_ftable_entry_destroy(sc, fe);
616 		}
617 	}
618 }
619 
620 static int
621 vxlan_ftable_update_locked(struct vxlan_softc *sc,
622     const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
623     struct rm_priotracker *tracker)
624 {
625 	struct vxlan_ftable_entry *fe;
626 	int error __unused;
627 
628 	VXLAN_LOCK_ASSERT(sc);
629 
630 again:
631 	/*
632 	 * A forwarding entry for this MAC address might already exist. If
633 	 * so, update it, otherwise create a new one. We may have to upgrade
634 	 * the lock if we have to change or create an entry.
635 	 */
636 	fe = vxlan_ftable_entry_lookup(sc, mac);
637 	if (fe != NULL) {
638 		fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
639 
640 		if (!VXLAN_FE_IS_DYNAMIC(fe) ||
641 		    vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
642 			return (0);
643 		if (!VXLAN_LOCK_WOWNED(sc)) {
644 			VXLAN_RUNLOCK(sc, tracker);
645 			VXLAN_WLOCK(sc);
646 			sc->vxl_stats.ftable_lock_upgrade_failed++;
647 			goto again;
648 		}
649 		vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
650 		return (0);
651 	}
652 
653 	if (!VXLAN_LOCK_WOWNED(sc)) {
654 		VXLAN_RUNLOCK(sc, tracker);
655 		VXLAN_WLOCK(sc);
656 		sc->vxl_stats.ftable_lock_upgrade_failed++;
657 		goto again;
658 	}
659 
660 	if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
661 		sc->vxl_stats.ftable_nospace++;
662 		return (ENOSPC);
663 	}
664 
665 	fe = vxlan_ftable_entry_alloc();
666 	if (fe == NULL)
667 		return (ENOMEM);
668 
669 	vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
670 
671 	/* The prior lookup failed, so the insert should not. */
672 	error = vxlan_ftable_entry_insert(sc, fe);
673 	MPASS(error == 0);
674 
675 	return (0);
676 }
677 
678 static int
679 vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
680     const uint8_t *mac)
681 {
682 	struct rm_priotracker tracker;
683 	union vxlan_sockaddr vxlsa;
684 	int error;
685 
686 	/*
687 	 * The source port may be randomly selected by the remote host, so
688 	 * use the port of the default destination address.
689 	 */
690 	vxlan_sockaddr_copy(&vxlsa, sa);
691 	vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
692 
693 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
694 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
695 		if (error)
696 			return (error);
697 	}
698 
699 	VXLAN_RLOCK(sc, &tracker);
700 	error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
701 	VXLAN_UNLOCK(sc, &tracker);
702 
703 	return (error);
704 }
705 
706 static int
707 vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
708 {
709 	struct rm_priotracker tracker;
710 	struct sbuf sb;
711 	struct vxlan_softc *sc;
712 	struct vxlan_ftable_entry *fe;
713 	size_t size;
714 	int i, error;
715 
716 	/*
717 	 * This is mostly intended for debugging during development. It is
718 	 * not practical to dump an entire large table this way.
719 	 */
720 
721 	sc = arg1;
722 	size = PAGE_SIZE;	/* Calculate later. */
723 
724 	sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
725 	sbuf_putc(&sb, '\n');
726 
727 	VXLAN_RLOCK(sc, &tracker);
728 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
729 		LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
730 			if (sbuf_error(&sb) != 0)
731 				break;
732 			vxlan_ftable_entry_dump(fe, &sb);
733 		}
734 	}
735 	VXLAN_RUNLOCK(sc, &tracker);
736 
737 	if (sbuf_len(&sb) == 1)
738 		sbuf_setpos(&sb, 0);
739 
740 	sbuf_finish(&sb);
741 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
742 	sbuf_delete(&sb);
743 
744 	return (error);
745 }
746 
747 static struct vxlan_ftable_entry *
748 vxlan_ftable_entry_alloc(void)
749 {
750 	struct vxlan_ftable_entry *fe;
751 
752 	fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
753 
754 	return (fe);
755 }
756 
757 static void
758 vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
759 {
760 
761 	free(fe, M_VXLAN);
762 }
763 
764 static void
765 vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
766     const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
767 {
768 
769 	fe->vxlfe_flags = flags;
770 	fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
771 	memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
772 	vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
773 }
774 
775 static void
776 vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
777     struct vxlan_ftable_entry *fe)
778 {
779 
780 	sc->vxl_ftable_cnt--;
781 	LIST_REMOVE(fe, vxlfe_hash);
782 	vxlan_ftable_entry_free(fe);
783 }
784 
785 static int
786 vxlan_ftable_entry_insert(struct vxlan_softc *sc,
787     struct vxlan_ftable_entry *fe)
788 {
789 	struct vxlan_ftable_entry *lfe;
790 	uint32_t hash;
791 	int dir;
792 
793 	VXLAN_LOCK_WASSERT(sc);
794 	hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
795 
796 	lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
797 	if (lfe == NULL) {
798 		LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
799 		goto out;
800 	}
801 
802 	do {
803 		dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
804 		if (dir == 0)
805 			return (EEXIST);
806 		if (dir > 0) {
807 			LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
808 			goto out;
809 		} else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
810 			LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
811 			goto out;
812 		} else
813 			lfe = LIST_NEXT(lfe, vxlfe_hash);
814 	} while (lfe != NULL);
815 
816 out:
817 	sc->vxl_ftable_cnt++;
818 
819 	return (0);
820 }
821 
822 static struct vxlan_ftable_entry *
823 vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
824 {
825 	struct vxlan_ftable_entry *fe;
826 	uint32_t hash;
827 	int dir;
828 
829 	VXLAN_LOCK_ASSERT(sc);
830 	hash = VXLAN_SC_FTABLE_HASH(sc, mac);
831 
832 	LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
833 		dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
834 		if (dir == 0)
835 			return (fe);
836 		if (dir > 0)
837 			break;
838 	}
839 
840 	return (NULL);
841 }
842 
843 static void
844 vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
845 {
846 	char buf[64];
847 	const union vxlan_sockaddr *sa;
848 	const void *addr;
849 	int i, len, af, width;
850 
851 	sa = &fe->vxlfe_raddr;
852 	af = sa->sa.sa_family;
853 	len = sbuf_len(sb);
854 
855 	sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
856 	    fe->vxlfe_flags);
857 
858 	for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
859 		sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
860 	sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
861 
862 	if (af == AF_INET) {
863 		addr = &sa->in4.sin_addr;
864 		width = INET_ADDRSTRLEN - 1;
865 	} else {
866 		addr = &sa->in6.sin6_addr;
867 		width = INET6_ADDRSTRLEN - 1;
868 	}
869 	inet_ntop(af, addr, buf, sizeof(buf));
870 	sbuf_printf(sb, "%*s ", width, buf);
871 
872 	sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
873 
874 	sbuf_putc(sb, '\n');
875 
876 	/* Truncate a partial line. */
877 	if (sbuf_error(sb) != 0)
878 		sbuf_setpos(sb, len);
879 }
880 
881 static struct vxlan_socket *
882 vxlan_socket_alloc(const union vxlan_sockaddr *sa)
883 {
884 	struct vxlan_socket *vso;
885 	int i;
886 
887 	vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
888 	rm_init(&vso->vxlso_lock, "vxlansorm");
889 	refcount_init(&vso->vxlso_refcnt, 0);
890 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
891 		LIST_INIT(&vso->vxlso_vni_hash[i]);
892 	vso->vxlso_laddr = *sa;
893 
894 	return (vso);
895 }
896 
897 static void
898 vxlan_socket_destroy(struct vxlan_socket *vso)
899 {
900 	struct socket *so;
901 #ifdef INVARIANTS
902 	int i;
903 	struct vxlan_socket_mc_info *mc;
904 
905 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
906 		mc = &vso->vxlso_mc[i];
907 		KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
908 		    ("%s: socket %p mc[%d] still has address",
909 		     __func__, vso, i));
910 	}
911 
912 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
913 		KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
914 		    ("%s: socket %p vni_hash[%d] not empty",
915 		     __func__, vso, i));
916 	}
917 #endif
918 	so = vso->vxlso_sock;
919 	if (so != NULL) {
920 		vso->vxlso_sock = NULL;
921 		soclose(so);
922 	}
923 
924 	rm_destroy(&vso->vxlso_lock);
925 	free(vso, M_VXLAN);
926 }
927 
928 static void
929 vxlan_socket_release(struct vxlan_socket *vso)
930 {
931 	int destroy;
932 
933 	VXLAN_LIST_LOCK();
934 	destroy = VXLAN_SO_RELEASE(vso);
935 	if (destroy != 0)
936 		LIST_REMOVE(vso, vxlso_entry);
937 	VXLAN_LIST_UNLOCK();
938 
939 	if (destroy != 0)
940 		vxlan_socket_destroy(vso);
941 }
942 
943 static struct vxlan_socket *
944 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
945 {
946 	struct vxlan_socket *vso;
947 
948 	VXLAN_LIST_LOCK();
949 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
950 		if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
951 			VXLAN_SO_ACQUIRE(vso);
952 			break;
953 		}
954 	}
955 	VXLAN_LIST_UNLOCK();
956 
957 	return (vso);
958 }
959 
960 static void
961 vxlan_socket_insert(struct vxlan_socket *vso)
962 {
963 
964 	VXLAN_LIST_LOCK();
965 	VXLAN_SO_ACQUIRE(vso);
966 	LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
967 	VXLAN_LIST_UNLOCK();
968 }
969 
970 static int
971 vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
972 {
973 	struct thread *td;
974 	int error;
975 
976 	td = curthread;
977 
978 	error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
979 	    SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
980 	if (error) {
981 		if_printf(ifp, "cannot create socket: %d\n", error);
982 		return (error);
983 	}
984 
985 	error = udp_set_kernel_tunneling(vso->vxlso_sock,
986 	    vxlan_rcv_udp_packet, NULL, vso);
987 	if (error) {
988 		if_printf(ifp, "cannot set tunneling function: %d\n", error);
989 		return (error);
990 	}
991 
992 	if (vxlan_reuse_port != 0) {
993 		struct sockopt sopt;
994 		int val = 1;
995 
996 		bzero(&sopt, sizeof(sopt));
997 		sopt.sopt_dir = SOPT_SET;
998 		sopt.sopt_level = IPPROTO_IP;
999 		sopt.sopt_name = SO_REUSEPORT;
1000 		sopt.sopt_val = &val;
1001 		sopt.sopt_valsize = sizeof(val);
1002 		error = sosetopt(vso->vxlso_sock, &sopt);
1003 		if (error) {
1004 			if_printf(ifp,
1005 			    "cannot set REUSEADDR socket opt: %d\n", error);
1006 			return (error);
1007 		}
1008 	}
1009 
1010 	return (0);
1011 }
1012 
1013 static int
1014 vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
1015 {
1016 	union vxlan_sockaddr laddr;
1017 	struct thread *td;
1018 	int error;
1019 
1020 	td = curthread;
1021 	laddr = vso->vxlso_laddr;
1022 
1023 	error = sobind(vso->vxlso_sock, &laddr.sa, td);
1024 	if (error) {
1025 		if (error != EADDRINUSE)
1026 			if_printf(ifp, "cannot bind socket: %d\n", error);
1027 		return (error);
1028 	}
1029 
1030 	return (0);
1031 }
1032 
1033 static int
1034 vxlan_socket_create(struct ifnet *ifp, int multicast,
1035     const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
1036 {
1037 	union vxlan_sockaddr laddr;
1038 	struct vxlan_socket *vso;
1039 	int error;
1040 
1041 	laddr = *saddr;
1042 
1043 	/*
1044 	 * If this socket will be multicast, then only the local port
1045 	 * must be specified when binding.
1046 	 */
1047 	if (multicast != 0) {
1048 		if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1049 			laddr.in4.sin_addr.s_addr = INADDR_ANY;
1050 #ifdef INET6
1051 		else
1052 			laddr.in6.sin6_addr = in6addr_any;
1053 #endif
1054 	}
1055 
1056 	vso = vxlan_socket_alloc(&laddr);
1057 	if (vso == NULL)
1058 		return (ENOMEM);
1059 
1060 	error = vxlan_socket_init(vso, ifp);
1061 	if (error)
1062 		goto fail;
1063 
1064 	error = vxlan_socket_bind(vso, ifp);
1065 	if (error)
1066 		goto fail;
1067 
1068 	/*
1069 	 * There is a small window between the bind completing and
1070 	 * inserting the socket, so that a concurrent create may fail.
1071 	 * Let's not worry about that for now.
1072 	 */
1073 	vxlan_socket_insert(vso);
1074 	*vsop = vso;
1075 
1076 	return (0);
1077 
1078 fail:
1079 	vxlan_socket_destroy(vso);
1080 
1081 	return (error);
1082 }
1083 
1084 static void
1085 vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
1086     struct vxlan_softc_head *list)
1087 {
1088 	struct rm_priotracker tracker;
1089 	struct vxlan_softc *sc;
1090 	int i;
1091 
1092 	VXLAN_SO_RLOCK(vso, &tracker);
1093 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
1094 		LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
1095 			vxlan_ifdetach(sc, ifp, list);
1096 	}
1097 	VXLAN_SO_RUNLOCK(vso, &tracker);
1098 }
1099 
1100 static struct vxlan_socket *
1101 vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
1102 {
1103 	union vxlan_sockaddr laddr;
1104 	struct vxlan_socket *vso;
1105 
1106 	laddr = *vxlsa;
1107 
1108 	if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1109 		laddr.in4.sin_addr.s_addr = INADDR_ANY;
1110 #ifdef INET6
1111 	else
1112 		laddr.in6.sin6_addr = in6addr_any;
1113 #endif
1114 
1115 	vso = vxlan_socket_lookup(&laddr);
1116 
1117 	return (vso);
1118 }
1119 
1120 static int
1121 vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
1122     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1123     int ifidx)
1124 {
1125 
1126 	if (!vxlan_sockaddr_in_any(local) &&
1127 	    !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
1128 		return (0);
1129 	if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
1130 		return (0);
1131 	if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
1132 		return (0);
1133 
1134 	return (1);
1135 }
1136 
1137 static int
1138 vxlan_socket_mc_join_group(struct vxlan_socket *vso,
1139     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1140     int *ifidx, union vxlan_sockaddr *source)
1141 {
1142 	struct sockopt sopt;
1143 	int error;
1144 
1145 	*source = *local;
1146 
1147 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1148 		struct ip_mreq mreq;
1149 
1150 		mreq.imr_multiaddr = group->in4.sin_addr;
1151 		mreq.imr_interface = local->in4.sin_addr;
1152 
1153 		bzero(&sopt, sizeof(sopt));
1154 		sopt.sopt_dir = SOPT_SET;
1155 		sopt.sopt_level = IPPROTO_IP;
1156 		sopt.sopt_name = IP_ADD_MEMBERSHIP;
1157 		sopt.sopt_val = &mreq;
1158 		sopt.sopt_valsize = sizeof(mreq);
1159 		error = sosetopt(vso->vxlso_sock, &sopt);
1160 		if (error)
1161 			return (error);
1162 
1163 		/*
1164 		 * BMV: Ideally, there would be a formal way for us to get
1165 		 * the local interface that was selected based on the
1166 		 * imr_interface address. We could then update *ifidx so
1167 		 * vxlan_sockaddr_mc_info_match() would return a match for
1168 		 * later creates that explicitly set the multicast interface.
1169 		 *
1170 		 * If we really need to, we can of course look in the INP's
1171 		 * membership list:
1172 		 *     sotoinpcb(vso->vxlso_sock)->inp_moptions->
1173 		 *         imo_head[]->imf_inm->inm_ifp
1174 		 * similarly to imo_match_group().
1175 		 */
1176 		source->in4.sin_addr = local->in4.sin_addr;
1177 
1178 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1179 		struct ipv6_mreq mreq;
1180 
1181 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1182 		mreq.ipv6mr_interface = *ifidx;
1183 
1184 		bzero(&sopt, sizeof(sopt));
1185 		sopt.sopt_dir = SOPT_SET;
1186 		sopt.sopt_level = IPPROTO_IPV6;
1187 		sopt.sopt_name = IPV6_JOIN_GROUP;
1188 		sopt.sopt_val = &mreq;
1189 		sopt.sopt_valsize = sizeof(mreq);
1190 		error = sosetopt(vso->vxlso_sock, &sopt);
1191 		if (error)
1192 			return (error);
1193 
1194 		/*
1195 		 * BMV: As with IPv4, we would really like to know what
1196 		 * interface in6p_lookup_mcast_ifp() selected.
1197 		 */
1198 	} else
1199 		error = EAFNOSUPPORT;
1200 
1201 	return (error);
1202 }
1203 
1204 static int
1205 vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
1206     const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
1207     int ifidx)
1208 {
1209 	struct sockopt sopt;
1210 	int error;
1211 
1212 	bzero(&sopt, sizeof(sopt));
1213 	sopt.sopt_dir = SOPT_SET;
1214 
1215 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1216 		struct ip_mreq mreq;
1217 
1218 		mreq.imr_multiaddr = group->in4.sin_addr;
1219 		mreq.imr_interface = source->in4.sin_addr;
1220 
1221 		sopt.sopt_level = IPPROTO_IP;
1222 		sopt.sopt_name = IP_DROP_MEMBERSHIP;
1223 		sopt.sopt_val = &mreq;
1224 		sopt.sopt_valsize = sizeof(mreq);
1225 		error = sosetopt(vso->vxlso_sock, &sopt);
1226 
1227 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1228 		struct ipv6_mreq mreq;
1229 
1230 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1231 		mreq.ipv6mr_interface = ifidx;
1232 
1233 		sopt.sopt_level = IPPROTO_IPV6;
1234 		sopt.sopt_name = IPV6_LEAVE_GROUP;
1235 		sopt.sopt_val = &mreq;
1236 		sopt.sopt_valsize = sizeof(mreq);
1237 		error = sosetopt(vso->vxlso_sock, &sopt);
1238 
1239 	} else
1240 		error = EAFNOSUPPORT;
1241 
1242 	return (error);
1243 }
1244 
1245 static int
1246 vxlan_socket_mc_add_group(struct vxlan_socket *vso,
1247     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1248     int ifidx, int *idx)
1249 {
1250 	union vxlan_sockaddr source;
1251 	struct vxlan_socket_mc_info *mc;
1252 	int i, empty, error;
1253 
1254 	/*
1255 	 * Within a socket, the same multicast group may be used by multiple
1256 	 * interfaces, each with a different network identifier. But a socket
1257 	 * may only join a multicast group once, so keep track of the users
1258 	 * here.
1259 	 */
1260 
1261 	VXLAN_SO_WLOCK(vso);
1262 	for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1263 		mc = &vso->vxlso_mc[i];
1264 
1265 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1266 			empty++;
1267 			continue;
1268 		}
1269 
1270 		if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
1271 			goto out;
1272 	}
1273 	VXLAN_SO_WUNLOCK(vso);
1274 
1275 	if (empty == 0)
1276 		return (ENOSPC);
1277 
1278 	error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
1279 	if (error)
1280 		return (error);
1281 
1282 	VXLAN_SO_WLOCK(vso);
1283 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1284 		mc = &vso->vxlso_mc[i];
1285 
1286 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1287 			vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
1288 			vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
1289 			mc->vxlsomc_ifidx = ifidx;
1290 			goto out;
1291 		}
1292 	}
1293 	VXLAN_SO_WUNLOCK(vso);
1294 
1295 	error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
1296 	MPASS(error == 0);
1297 
1298 	return (ENOSPC);
1299 
1300 out:
1301 	mc->vxlsomc_users++;
1302 	VXLAN_SO_WUNLOCK(vso);
1303 
1304 	*idx = i;
1305 
1306 	return (0);
1307 }
1308 
1309 static void
1310 vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
1311 {
1312 	union vxlan_sockaddr group, source;
1313 	struct vxlan_socket_mc_info *mc;
1314 	int ifidx, leave;
1315 
1316 	KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
1317 	    ("%s: vso %p idx %d out of bounds", __func__, vso, idx));
1318 
1319 	leave = 0;
1320 	mc = &vso->vxlso_mc[idx];
1321 
1322 	VXLAN_SO_WLOCK(vso);
1323 	mc->vxlsomc_users--;
1324 	if (mc->vxlsomc_users == 0) {
1325 		group = mc->vxlsomc_gaddr;
1326 		source = mc->vxlsomc_saddr;
1327 		ifidx = mc->vxlsomc_ifidx;
1328 		bzero(mc, sizeof(*mc));
1329 		leave = 1;
1330 	}
1331 	VXLAN_SO_WUNLOCK(vso);
1332 
1333 	if (leave != 0) {
1334 		/*
1335 		 * Our socket's membership in this group may have already
1336 		 * been removed if we joined through an interface that's
1337 		 * been detached.
1338 		 */
1339 		vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
1340 	}
1341 }
1342 
1343 static struct vxlan_softc *
1344 vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
1345 {
1346 	struct vxlan_softc *sc;
1347 	uint32_t hash;
1348 
1349 	VXLAN_SO_LOCK_ASSERT(vso);
1350 	hash = VXLAN_SO_VNI_HASH(vni);
1351 
1352 	LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
1353 		if (sc->vxl_vni == vni) {
1354 			VXLAN_ACQUIRE(sc);
1355 			break;
1356 		}
1357 	}
1358 
1359 	return (sc);
1360 }
1361 
1362 static struct vxlan_softc *
1363 vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
1364 {
1365 	struct rm_priotracker tracker;
1366 	struct vxlan_softc *sc;
1367 
1368 	VXLAN_SO_RLOCK(vso, &tracker);
1369 	sc = vxlan_socket_lookup_softc_locked(vso, vni);
1370 	VXLAN_SO_RUNLOCK(vso, &tracker);
1371 
1372 	return (sc);
1373 }
1374 
1375 static int
1376 vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1377 {
1378 	struct vxlan_softc *tsc;
1379 	uint32_t vni, hash;
1380 
1381 	vni = sc->vxl_vni;
1382 	hash = VXLAN_SO_VNI_HASH(vni);
1383 
1384 	VXLAN_SO_WLOCK(vso);
1385 	tsc = vxlan_socket_lookup_softc_locked(vso, vni);
1386 	if (tsc != NULL) {
1387 		VXLAN_SO_WUNLOCK(vso);
1388 		vxlan_release(tsc);
1389 		return (EEXIST);
1390 	}
1391 
1392 	VXLAN_ACQUIRE(sc);
1393 	LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
1394 	VXLAN_SO_WUNLOCK(vso);
1395 
1396 	return (0);
1397 }
1398 
1399 static void
1400 vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1401 {
1402 
1403 	VXLAN_SO_WLOCK(vso);
1404 	LIST_REMOVE(sc, vxl_entry);
1405 	VXLAN_SO_WUNLOCK(vso);
1406 
1407 	vxlan_release(sc);
1408 }
1409 
1410 static struct ifnet *
1411 vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
1412 {
1413 	struct ifnet *ifp;
1414 
1415 	VXLAN_LOCK_ASSERT(sc);
1416 
1417 	if (ipv4 && sc->vxl_im4o != NULL)
1418 		ifp = sc->vxl_im4o->imo_multicast_ifp;
1419 	else if (!ipv4 && sc->vxl_im6o != NULL)
1420 		ifp = sc->vxl_im6o->im6o_multicast_ifp;
1421 	else
1422 		ifp = NULL;
1423 
1424 	if (ifp != NULL)
1425 		if_ref(ifp);
1426 
1427 	return (ifp);
1428 }
1429 
1430 static void
1431 vxlan_free_multicast(struct vxlan_softc *sc)
1432 {
1433 
1434 	if (sc->vxl_mc_ifp != NULL) {
1435 		if_rele(sc->vxl_mc_ifp);
1436 		sc->vxl_mc_ifp = NULL;
1437 		sc->vxl_mc_ifindex = 0;
1438 	}
1439 
1440 	if (sc->vxl_im4o != NULL) {
1441 		free(sc->vxl_im4o, M_VXLAN);
1442 		sc->vxl_im4o = NULL;
1443 	}
1444 
1445 	if (sc->vxl_im6o != NULL) {
1446 		free(sc->vxl_im6o, M_VXLAN);
1447 		sc->vxl_im6o = NULL;
1448 	}
1449 }
1450 
1451 static int
1452 vxlan_setup_multicast_interface(struct vxlan_softc *sc)
1453 {
1454 	struct ifnet *ifp;
1455 
1456 	ifp = ifunit_ref(sc->vxl_mc_ifname);
1457 	if (ifp == NULL) {
1458 		if_printf(sc->vxl_ifp, "multicast interface %s does "
1459 		    "not exist\n", sc->vxl_mc_ifname);
1460 		return (ENOENT);
1461 	}
1462 
1463 	if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1464 		if_printf(sc->vxl_ifp, "interface %s does not support "
1465 		     "multicast\n", sc->vxl_mc_ifname);
1466 		if_rele(ifp);
1467 		return (ENOTSUP);
1468 	}
1469 
1470 	sc->vxl_mc_ifp = ifp;
1471 	sc->vxl_mc_ifindex = ifp->if_index;
1472 
1473 	return (0);
1474 }
1475 
1476 static int
1477 vxlan_setup_multicast(struct vxlan_softc *sc)
1478 {
1479 	const union vxlan_sockaddr *group;
1480 	int error;
1481 
1482 	group = &sc->vxl_dst_addr;
1483 	error = 0;
1484 
1485 	if (sc->vxl_mc_ifname[0] != '\0') {
1486 		error = vxlan_setup_multicast_interface(sc);
1487 		if (error)
1488 			return (error);
1489 	}
1490 
1491 	/*
1492 	 * Initialize an multicast options structure that is sufficiently
1493 	 * populated for use in the respective IP output routine. This
1494 	 * structure is typically stored in the socket, but our sockets
1495 	 * may be shared among multiple interfaces.
1496 	 */
1497 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1498 		sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
1499 		    M_ZERO | M_WAITOK);
1500 		sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
1501 		sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
1502 		sc->vxl_im4o->imo_multicast_vif = -1;
1503 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1504 		sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
1505 		    M_ZERO | M_WAITOK);
1506 		sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
1507 		sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
1508 	}
1509 
1510 	return (error);
1511 }
1512 
1513 static int
1514 vxlan_setup_socket(struct vxlan_softc *sc)
1515 {
1516 	struct vxlan_socket *vso;
1517 	struct ifnet *ifp;
1518 	union vxlan_sockaddr *saddr, *daddr;
1519 	int multicast, error;
1520 
1521 	vso = NULL;
1522 	ifp = sc->vxl_ifp;
1523 	saddr = &sc->vxl_src_addr;
1524 	daddr = &sc->vxl_dst_addr;
1525 
1526 	multicast = vxlan_sockaddr_in_multicast(daddr);
1527 	MPASS(multicast != -1);
1528 	sc->vxl_vso_mc_index = -1;
1529 
1530 	/*
1531 	 * Try to create the socket. If that fails, attempt to use an
1532 	 * existing socket.
1533 	 */
1534 	error = vxlan_socket_create(ifp, multicast, saddr, &vso);
1535 	if (error) {
1536 		if (multicast != 0)
1537 			vso = vxlan_socket_mc_lookup(saddr);
1538 		else
1539 			vso = vxlan_socket_lookup(saddr);
1540 
1541 		if (vso == NULL) {
1542 			if_printf(ifp, "cannot create socket (error: %d), "
1543 			    "and no existing socket found\n", error);
1544 			goto out;
1545 		}
1546 	}
1547 
1548 	if (multicast != 0) {
1549 		error = vxlan_setup_multicast(sc);
1550 		if (error)
1551 			goto out;
1552 
1553 		error = vxlan_socket_mc_add_group(vso, daddr, saddr,
1554 		    sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
1555 		if (error)
1556 			goto out;
1557 	}
1558 
1559 	sc->vxl_sock = vso;
1560 	error = vxlan_socket_insert_softc(vso, sc);
1561 	if (error) {
1562 		sc->vxl_sock = NULL;
1563 		if_printf(ifp, "network identifier %d already exists in "
1564 		    "this socket\n", sc->vxl_vni);
1565 		goto out;
1566 	}
1567 
1568 	return (0);
1569 
1570 out:
1571 	if (vso != NULL) {
1572 		if (sc->vxl_vso_mc_index != -1) {
1573 			vxlan_socket_mc_release_group_by_idx(vso,
1574 			    sc->vxl_vso_mc_index);
1575 			sc->vxl_vso_mc_index = -1;
1576 		}
1577 		if (multicast != 0)
1578 			vxlan_free_multicast(sc);
1579 		vxlan_socket_release(vso);
1580 	}
1581 
1582 	return (error);
1583 }
1584 
1585 #ifdef INET6
1586 static void
1587 vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
1588 {
1589 
1590 	if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
1591 		return;
1592 
1593 	MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
1594 	MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
1595 
1596 	if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
1597 		if_printf(sc->vxl_ifp, "port %d in src address does not match "
1598 		    "port %d in dst address, rfc6935_port (%d) not updated.\n",
1599 		    ntohs(sc->vxl_src_addr.in6.sin6_port),
1600 		    ntohs(sc->vxl_dst_addr.in6.sin6_port),
1601 		    V_zero_checksum_port);
1602 		return;
1603 	}
1604 
1605 	if (V_zero_checksum_port != 0) {
1606 		if (V_zero_checksum_port !=
1607 		    ntohs(sc->vxl_src_addr.in6.sin6_port)) {
1608 			if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
1609 			    "%d, cannot set it to %d.\n", V_zero_checksum_port,
1610 			    ntohs(sc->vxl_src_addr.in6.sin6_port));
1611 		}
1612 		return;
1613 	}
1614 
1615 	V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
1616 	if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
1617 	    V_zero_checksum_port);
1618 }
1619 #endif
1620 
1621 static void
1622 vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
1623 {
1624 	struct ifnet *ifp;
1625 
1626 	VXLAN_LOCK_WASSERT(sc);
1627 
1628 	ifp = sc->vxl_ifp;
1629 	ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
1630 
1631 	if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
1632 		ifp->if_hdrlen += sizeof(struct ip);
1633 	else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
1634 		ifp->if_hdrlen += sizeof(struct ip6_hdr);
1635 
1636 	if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0)
1637 		ifp->if_mtu = ETHERMTU - ifp->if_hdrlen;
1638 }
1639 
1640 static int
1641 vxlan_valid_init_config(struct vxlan_softc *sc)
1642 {
1643 	const char *reason;
1644 
1645 	if (vxlan_check_vni(sc->vxl_vni) != 0) {
1646 		reason = "invalid virtual network identifier specified";
1647 		goto fail;
1648 	}
1649 
1650 	if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
1651 		reason = "source address type is not supported";
1652 		goto fail;
1653 	}
1654 
1655 	if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
1656 		reason = "destination address type is not supported";
1657 		goto fail;
1658 	}
1659 
1660 	if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
1661 		reason = "no valid destination address specified";
1662 		goto fail;
1663 	}
1664 
1665 	if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
1666 	    sc->vxl_mc_ifname[0] != '\0') {
1667 		reason = "can only specify interface with a group address";
1668 		goto fail;
1669 	}
1670 
1671 	if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
1672 		if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
1673 		    VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
1674 			reason = "source and destination address must both "
1675 			    "be either IPv4 or IPv6";
1676 			goto fail;
1677 		}
1678 	}
1679 
1680 	if (sc->vxl_src_addr.in4.sin_port == 0) {
1681 		reason = "local port not specified";
1682 		goto fail;
1683 	}
1684 
1685 	if (sc->vxl_dst_addr.in4.sin_port == 0) {
1686 		reason = "remote port not specified";
1687 		goto fail;
1688 	}
1689 
1690 	return (0);
1691 
1692 fail:
1693 	if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
1694 	return (EINVAL);
1695 }
1696 
1697 static void
1698 vxlan_init_wait(struct vxlan_softc *sc)
1699 {
1700 
1701 	VXLAN_LOCK_WASSERT(sc);
1702 	while (sc->vxl_flags & VXLAN_FLAG_INIT)
1703 		rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
1704 }
1705 
1706 static void
1707 vxlan_init_complete(struct vxlan_softc *sc)
1708 {
1709 
1710 	VXLAN_WLOCK(sc);
1711 	sc->vxl_flags &= ~VXLAN_FLAG_INIT;
1712 	wakeup(sc);
1713 	VXLAN_WUNLOCK(sc);
1714 }
1715 
1716 static void
1717 vxlan_init(void *xsc)
1718 {
1719 	static const uint8_t empty_mac[ETHER_ADDR_LEN];
1720 	struct vxlan_softc *sc;
1721 	struct ifnet *ifp;
1722 
1723 	sc = xsc;
1724 	ifp = sc->vxl_ifp;
1725 
1726 	sx_xlock(&vxlan_sx);
1727 	VXLAN_WLOCK(sc);
1728 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1729 		VXLAN_WUNLOCK(sc);
1730 		sx_xunlock(&vxlan_sx);
1731 		return;
1732 	}
1733 	sc->vxl_flags |= VXLAN_FLAG_INIT;
1734 	VXLAN_WUNLOCK(sc);
1735 
1736 	if (vxlan_valid_init_config(sc) != 0)
1737 		goto out;
1738 
1739 	if (vxlan_setup_socket(sc) != 0)
1740 		goto out;
1741 
1742 #ifdef INET6
1743 	vxlan_setup_zero_checksum_port(sc);
1744 #endif
1745 
1746 	/* Initialize the default forwarding entry. */
1747 	vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
1748 	    &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
1749 
1750 	VXLAN_WLOCK(sc);
1751 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1752 	callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
1753 	    vxlan_timer, sc);
1754 	VXLAN_WUNLOCK(sc);
1755 
1756 	if_link_state_change(ifp, LINK_STATE_UP);
1757 
1758 	EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
1759 	    ntohs(sc->vxl_src_addr.in4.sin_port));
1760 out:
1761 	vxlan_init_complete(sc);
1762 	sx_xunlock(&vxlan_sx);
1763 }
1764 
1765 static void
1766 vxlan_release(struct vxlan_softc *sc)
1767 {
1768 
1769 	/*
1770 	 * The softc may be destroyed as soon as we release our reference,
1771 	 * so we cannot serialize the wakeup with the softc lock. We use a
1772 	 * timeout in our sleeps so a missed wakeup is unfortunate but not
1773 	 * fatal.
1774 	 */
1775 	if (VXLAN_RELEASE(sc) != 0)
1776 		wakeup(sc);
1777 }
1778 
1779 static void
1780 vxlan_teardown_wait(struct vxlan_softc *sc)
1781 {
1782 
1783 	VXLAN_LOCK_WASSERT(sc);
1784 	while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1785 		rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
1786 }
1787 
1788 static void
1789 vxlan_teardown_complete(struct vxlan_softc *sc)
1790 {
1791 
1792 	VXLAN_WLOCK(sc);
1793 	sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
1794 	wakeup(sc);
1795 	VXLAN_WUNLOCK(sc);
1796 }
1797 
1798 static void
1799 vxlan_teardown_locked(struct vxlan_softc *sc)
1800 {
1801 	struct ifnet *ifp;
1802 	struct vxlan_socket *vso;
1803 
1804 	sx_assert(&vxlan_sx, SA_XLOCKED);
1805 	VXLAN_LOCK_WASSERT(sc);
1806 	MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
1807 
1808 	ifp = sc->vxl_ifp;
1809 	ifp->if_flags &= ~IFF_UP;
1810 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1811 	callout_stop(&sc->vxl_callout);
1812 	vso = sc->vxl_sock;
1813 	sc->vxl_sock = NULL;
1814 
1815 	VXLAN_WUNLOCK(sc);
1816 	if_link_state_change(ifp, LINK_STATE_DOWN);
1817 	EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family,
1818 	    ntohs(sc->vxl_src_addr.in4.sin_port));
1819 
1820 	if (vso != NULL) {
1821 		vxlan_socket_remove_softc(vso, sc);
1822 
1823 		if (sc->vxl_vso_mc_index != -1) {
1824 			vxlan_socket_mc_release_group_by_idx(vso,
1825 			    sc->vxl_vso_mc_index);
1826 			sc->vxl_vso_mc_index = -1;
1827 		}
1828 	}
1829 
1830 	VXLAN_WLOCK(sc);
1831 	while (sc->vxl_refcnt != 0)
1832 		rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
1833 	VXLAN_WUNLOCK(sc);
1834 
1835 	callout_drain(&sc->vxl_callout);
1836 
1837 	vxlan_free_multicast(sc);
1838 	if (vso != NULL)
1839 		vxlan_socket_release(vso);
1840 
1841 	vxlan_teardown_complete(sc);
1842 }
1843 
1844 static void
1845 vxlan_teardown(struct vxlan_softc *sc)
1846 {
1847 
1848 	sx_xlock(&vxlan_sx);
1849 	VXLAN_WLOCK(sc);
1850 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
1851 		vxlan_teardown_wait(sc);
1852 		VXLAN_WUNLOCK(sc);
1853 		sx_xunlock(&vxlan_sx);
1854 		return;
1855 	}
1856 
1857 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1858 	vxlan_teardown_locked(sc);
1859 	sx_xunlock(&vxlan_sx);
1860 }
1861 
1862 static void
1863 vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
1864     struct vxlan_softc_head *list)
1865 {
1866 
1867 	VXLAN_WLOCK(sc);
1868 
1869 	if (sc->vxl_mc_ifp != ifp)
1870 		goto out;
1871 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1872 		goto out;
1873 
1874 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1875 	LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
1876 
1877 out:
1878 	VXLAN_WUNLOCK(sc);
1879 }
1880 
1881 static void
1882 vxlan_timer(void *xsc)
1883 {
1884 	struct vxlan_softc *sc;
1885 
1886 	sc = xsc;
1887 	VXLAN_LOCK_WASSERT(sc);
1888 
1889 	vxlan_ftable_expire(sc);
1890 	callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
1891 }
1892 
1893 static int
1894 vxlan_ioctl_ifflags(struct vxlan_softc *sc)
1895 {
1896 	struct ifnet *ifp;
1897 
1898 	ifp = sc->vxl_ifp;
1899 
1900 	if (ifp->if_flags & IFF_UP) {
1901 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1902 			vxlan_init(sc);
1903 	} else {
1904 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1905 			vxlan_teardown(sc);
1906 	}
1907 
1908 	return (0);
1909 }
1910 
1911 static int
1912 vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
1913 {
1914 	struct rm_priotracker tracker;
1915 	struct ifvxlancfg *cfg;
1916 
1917 	cfg = arg;
1918 	bzero(cfg, sizeof(*cfg));
1919 
1920 	VXLAN_RLOCK(sc, &tracker);
1921 	cfg->vxlc_vni = sc->vxl_vni;
1922 	memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
1923 	    sizeof(union vxlan_sockaddr));
1924 	memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
1925 	    sizeof(union vxlan_sockaddr));
1926 	cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
1927 	cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
1928 	cfg->vxlc_ftable_max = sc->vxl_ftable_max;
1929 	cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
1930 	cfg->vxlc_port_min = sc->vxl_min_port;
1931 	cfg->vxlc_port_max = sc->vxl_max_port;
1932 	cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
1933 	cfg->vxlc_ttl = sc->vxl_ttl;
1934 	VXLAN_RUNLOCK(sc, &tracker);
1935 
1936 #ifdef INET6
1937 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
1938 		sa6_recoverscope(&cfg->vxlc_local_sa.in6);
1939 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
1940 		sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
1941 #endif
1942 
1943 	return (0);
1944 }
1945 
1946 static int
1947 vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
1948 {
1949 	struct ifvxlancmd *cmd;
1950 	int error;
1951 
1952 	cmd = arg;
1953 
1954 	if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
1955 		return (EINVAL);
1956 
1957 	VXLAN_WLOCK(sc);
1958 	if (vxlan_can_change_config(sc)) {
1959 		sc->vxl_vni = cmd->vxlcmd_vni;
1960 		error = 0;
1961 	} else
1962 		error = EBUSY;
1963 	VXLAN_WUNLOCK(sc);
1964 
1965 	return (error);
1966 }
1967 
1968 static int
1969 vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
1970 {
1971 	struct ifvxlancmd *cmd;
1972 	union vxlan_sockaddr *vxlsa;
1973 	int error;
1974 
1975 	cmd = arg;
1976 	vxlsa = &cmd->vxlcmd_sa;
1977 
1978 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
1979 		return (EINVAL);
1980 	if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
1981 		return (EINVAL);
1982 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
1983 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
1984 		if (error)
1985 			return (error);
1986 	}
1987 
1988 	VXLAN_WLOCK(sc);
1989 	if (vxlan_can_change_config(sc)) {
1990 		vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
1991 		vxlan_set_hwcaps(sc);
1992 		error = 0;
1993 	} else
1994 		error = EBUSY;
1995 	VXLAN_WUNLOCK(sc);
1996 
1997 	return (error);
1998 }
1999 
2000 static int
2001 vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
2002 {
2003 	struct ifvxlancmd *cmd;
2004 	union vxlan_sockaddr *vxlsa;
2005 	int error;
2006 
2007 	cmd = arg;
2008 	vxlsa = &cmd->vxlcmd_sa;
2009 
2010 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
2011 		return (EINVAL);
2012 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
2013 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
2014 		if (error)
2015 			return (error);
2016 	}
2017 
2018 	VXLAN_WLOCK(sc);
2019 	if (vxlan_can_change_config(sc)) {
2020 		vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
2021 		vxlan_setup_interface_hdrlen(sc);
2022 		error = 0;
2023 	} else
2024 		error = EBUSY;
2025 	VXLAN_WUNLOCK(sc);
2026 
2027 	return (error);
2028 }
2029 
2030 static int
2031 vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
2032 {
2033 	struct ifvxlancmd *cmd;
2034 	int error;
2035 
2036 	cmd = arg;
2037 
2038 	if (cmd->vxlcmd_port == 0)
2039 		return (EINVAL);
2040 
2041 	VXLAN_WLOCK(sc);
2042 	if (vxlan_can_change_config(sc)) {
2043 		sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2044 		error = 0;
2045 	} else
2046 		error = EBUSY;
2047 	VXLAN_WUNLOCK(sc);
2048 
2049 	return (error);
2050 }
2051 
2052 static int
2053 vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
2054 {
2055 	struct ifvxlancmd *cmd;
2056 	int error;
2057 
2058 	cmd = arg;
2059 
2060 	if (cmd->vxlcmd_port == 0)
2061 		return (EINVAL);
2062 
2063 	VXLAN_WLOCK(sc);
2064 	if (vxlan_can_change_config(sc)) {
2065 		sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2066 		error = 0;
2067 	} else
2068 		error = EBUSY;
2069 	VXLAN_WUNLOCK(sc);
2070 
2071 	return (error);
2072 }
2073 
2074 static int
2075 vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
2076 {
2077 	struct ifvxlancmd *cmd;
2078 	uint16_t min, max;
2079 	int error;
2080 
2081 	cmd = arg;
2082 	min = cmd->vxlcmd_port_min;
2083 	max = cmd->vxlcmd_port_max;
2084 
2085 	if (max < min)
2086 		return (EINVAL);
2087 
2088 	VXLAN_WLOCK(sc);
2089 	if (vxlan_can_change_config(sc)) {
2090 		sc->vxl_min_port = min;
2091 		sc->vxl_max_port = max;
2092 		error = 0;
2093 	} else
2094 		error = EBUSY;
2095 	VXLAN_WUNLOCK(sc);
2096 
2097 	return (error);
2098 }
2099 
2100 static int
2101 vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
2102 {
2103 	struct ifvxlancmd *cmd;
2104 	int error;
2105 
2106 	cmd = arg;
2107 
2108 	VXLAN_WLOCK(sc);
2109 	if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
2110 		sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
2111 		error = 0;
2112 	} else
2113 		error = EINVAL;
2114 	VXLAN_WUNLOCK(sc);
2115 
2116 	return (error);
2117 }
2118 
2119 static int
2120 vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
2121 {
2122 	struct ifvxlancmd *cmd;
2123 	int error;
2124 
2125 	cmd = arg;
2126 
2127 	VXLAN_WLOCK(sc);
2128 	if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
2129 		sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
2130 		error = 0;
2131 	} else
2132 		error = EINVAL;
2133 	VXLAN_WUNLOCK(sc);
2134 
2135 	return (error);
2136 }
2137 
2138 static int
2139 vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
2140 {
2141 	struct ifvxlancmd *cmd;
2142 	int error;
2143 
2144 	cmd = arg;
2145 
2146 	VXLAN_WLOCK(sc);
2147 	if (vxlan_can_change_config(sc)) {
2148 		strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
2149 		vxlan_set_hwcaps(sc);
2150 		error = 0;
2151 	} else
2152 		error = EBUSY;
2153 	VXLAN_WUNLOCK(sc);
2154 
2155 	return (error);
2156 }
2157 
2158 static int
2159 vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
2160 {
2161 	struct ifvxlancmd *cmd;
2162 	int error;
2163 
2164 	cmd = arg;
2165 
2166 	VXLAN_WLOCK(sc);
2167 	if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
2168 		sc->vxl_ttl = cmd->vxlcmd_ttl;
2169 		if (sc->vxl_im4o != NULL)
2170 			sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
2171 		if (sc->vxl_im6o != NULL)
2172 			sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
2173 		error = 0;
2174 	} else
2175 		error = EINVAL;
2176 	VXLAN_WUNLOCK(sc);
2177 
2178 	return (error);
2179 }
2180 
2181 static int
2182 vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
2183 {
2184 	struct ifvxlancmd *cmd;
2185 
2186 	cmd = arg;
2187 
2188 	VXLAN_WLOCK(sc);
2189 	if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
2190 		sc->vxl_flags |= VXLAN_FLAG_LEARN;
2191 	else
2192 		sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
2193 	VXLAN_WUNLOCK(sc);
2194 
2195 	return (0);
2196 }
2197 
2198 static int
2199 vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
2200 {
2201 	union vxlan_sockaddr vxlsa;
2202 	struct ifvxlancmd *cmd;
2203 	struct vxlan_ftable_entry *fe;
2204 	int error;
2205 
2206 	cmd = arg;
2207 	vxlsa = cmd->vxlcmd_sa;
2208 
2209 	if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
2210 		return (EINVAL);
2211 	if (vxlan_sockaddr_in_any(&vxlsa) != 0)
2212 		return (EINVAL);
2213 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2214 		return (EINVAL);
2215 	/* BMV: We could support both IPv4 and IPv6 later. */
2216 	if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
2217 		return (EAFNOSUPPORT);
2218 
2219 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
2220 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
2221 		if (error)
2222 			return (error);
2223 	}
2224 
2225 	fe = vxlan_ftable_entry_alloc();
2226 	if (fe == NULL)
2227 		return (ENOMEM);
2228 
2229 	if (vxlsa.in4.sin_port == 0)
2230 		vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
2231 
2232 	vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
2233 	    VXLAN_FE_FLAG_STATIC);
2234 
2235 	VXLAN_WLOCK(sc);
2236 	error = vxlan_ftable_entry_insert(sc, fe);
2237 	VXLAN_WUNLOCK(sc);
2238 
2239 	if (error)
2240 		vxlan_ftable_entry_free(fe);
2241 
2242 	return (error);
2243 }
2244 
2245 static int
2246 vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
2247 {
2248 	struct ifvxlancmd *cmd;
2249 	struct vxlan_ftable_entry *fe;
2250 	int error;
2251 
2252 	cmd = arg;
2253 
2254 	VXLAN_WLOCK(sc);
2255 	fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
2256 	if (fe != NULL) {
2257 		vxlan_ftable_entry_destroy(sc, fe);
2258 		error = 0;
2259 	} else
2260 		error = ENOENT;
2261 	VXLAN_WUNLOCK(sc);
2262 
2263 	return (error);
2264 }
2265 
2266 static int
2267 vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
2268 {
2269 	struct ifvxlancmd *cmd;
2270 	int all;
2271 
2272 	cmd = arg;
2273 	all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
2274 
2275 	VXLAN_WLOCK(sc);
2276 	vxlan_ftable_flush(sc, all);
2277 	VXLAN_WUNLOCK(sc);
2278 
2279 	return (0);
2280 }
2281 
2282 static int
2283 vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
2284 {
2285 	const struct vxlan_control *vc;
2286 	union {
2287 		struct ifvxlancfg	cfg;
2288 		struct ifvxlancmd	cmd;
2289 	} args;
2290 	int out, error;
2291 
2292 	if (ifd->ifd_cmd >= vxlan_control_table_size)
2293 		return (EINVAL);
2294 
2295 	bzero(&args, sizeof(args));
2296 	vc = &vxlan_control_table[ifd->ifd_cmd];
2297 	out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
2298 
2299 	if ((get != 0 && out == 0) || (get == 0 && out != 0))
2300 		return (EINVAL);
2301 
2302 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
2303 		error = priv_check(curthread, PRIV_NET_VXLAN);
2304 		if (error)
2305 			return (error);
2306 	}
2307 
2308 	if (ifd->ifd_len != vc->vxlc_argsize ||
2309 	    ifd->ifd_len > sizeof(args))
2310 		return (EINVAL);
2311 
2312 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
2313 		error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
2314 		if (error)
2315 			return (error);
2316 	}
2317 
2318 	error = vc->vxlc_func(sc, &args);
2319 	if (error)
2320 		return (error);
2321 
2322 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
2323 		error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
2324 		if (error)
2325 			return (error);
2326 	}
2327 
2328 	return (0);
2329 }
2330 
2331 static int
2332 vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2333 {
2334 	struct rm_priotracker tracker;
2335 	struct vxlan_softc *sc;
2336 	struct ifreq *ifr;
2337 	struct ifdrv *ifd;
2338 	int error;
2339 
2340 	sc = ifp->if_softc;
2341 	ifr = (struct ifreq *) data;
2342 	ifd = (struct ifdrv *) data;
2343 
2344 	error = 0;
2345 
2346 	switch (cmd) {
2347 	case SIOCADDMULTI:
2348 	case SIOCDELMULTI:
2349 		break;
2350 
2351 	case SIOCGDRVSPEC:
2352 	case SIOCSDRVSPEC:
2353 		error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
2354 		break;
2355 
2356 	case SIOCSIFFLAGS:
2357 		error = vxlan_ioctl_ifflags(sc);
2358 		break;
2359 
2360 	case SIOCSIFMEDIA:
2361 	case SIOCGIFMEDIA:
2362 		error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
2363 		break;
2364 
2365 	case SIOCSIFMTU:
2366 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) {
2367 			error = EINVAL;
2368 		} else {
2369 			VXLAN_WLOCK(sc);
2370 			ifp->if_mtu = ifr->ifr_mtu;
2371 			sc->vxl_flags |= VXLAN_FLAG_USER_MTU;
2372 			VXLAN_WUNLOCK(sc);
2373 		}
2374 		break;
2375 
2376 	case SIOCSIFCAP:
2377 		VXLAN_WLOCK(sc);
2378 		error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
2379 		if (error == 0)
2380 			vxlan_set_hwcaps(sc);
2381 		VXLAN_WUNLOCK(sc);
2382 		break;
2383 
2384 	case SIOCGTUNFIB:
2385 		VXLAN_RLOCK(sc, &tracker);
2386 		ifr->ifr_fib = sc->vxl_fibnum;
2387 		VXLAN_RUNLOCK(sc, &tracker);
2388 		break;
2389 
2390 	case SIOCSTUNFIB:
2391 		if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0)
2392 			break;
2393 
2394 		if (ifr->ifr_fib >= rt_numfibs)
2395 			error = EINVAL;
2396 		else {
2397 			VXLAN_WLOCK(sc);
2398 			sc->vxl_fibnum = ifr->ifr_fib;
2399 			VXLAN_WUNLOCK(sc);
2400 		}
2401 		break;
2402 
2403 	default:
2404 		error = ether_ioctl(ifp, cmd, data);
2405 		break;
2406 	}
2407 
2408 	return (error);
2409 }
2410 
2411 #if defined(INET) || defined(INET6)
2412 static uint16_t
2413 vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
2414 {
2415 	int range;
2416 	uint32_t hash;
2417 
2418 	range = sc->vxl_max_port - sc->vxl_min_port + 1;
2419 
2420 	if (M_HASHTYPE_ISHASH(m))
2421 		hash = m->m_pkthdr.flowid;
2422 	else
2423 		hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
2424 		    sc->vxl_port_hash_key);
2425 
2426 	return (sc->vxl_min_port + (hash % range));
2427 }
2428 
2429 static void
2430 vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
2431     uint16_t srcport, uint16_t dstport)
2432 {
2433 	struct vxlanudphdr *hdr;
2434 	struct udphdr *udph;
2435 	struct vxlan_header *vxh;
2436 	int len;
2437 
2438 	len = m->m_pkthdr.len - ipoff;
2439 	MPASS(len >= sizeof(struct vxlanudphdr));
2440 	hdr = mtodo(m, ipoff);
2441 
2442 	udph = &hdr->vxlh_udp;
2443 	udph->uh_sport = srcport;
2444 	udph->uh_dport = dstport;
2445 	udph->uh_ulen = htons(len);
2446 	udph->uh_sum = 0;
2447 
2448 	vxh = &hdr->vxlh_hdr;
2449 	vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
2450 	vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
2451 }
2452 #endif
2453 
2454 #if defined(INET6) || defined(INET)
2455 /*
2456  * Return the CSUM_INNER_* equivalent of CSUM_* caps.
2457  */
2458 static uint32_t
2459 csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
2460 {
2461 	uint32_t csum_flags = encap;
2462 	const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
2463 
2464 	/*
2465 	 * csum_flags can request either v4 or v6 offload but not both.
2466 	 * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
2467 	 * so those bits are no good to detect the IP version.  Other bits are
2468 	 * always set with CSUM_TSO and we use those to figure out the IP
2469 	 * version.
2470 	 */
2471 	if (csum_flags_in & v4) {
2472 		if (csum_flags_in & CSUM_IP)
2473 			csum_flags |= CSUM_INNER_IP;
2474 		if (csum_flags_in & CSUM_IP_UDP)
2475 			csum_flags |= CSUM_INNER_IP_UDP;
2476 		if (csum_flags_in & CSUM_IP_TCP)
2477 			csum_flags |= CSUM_INNER_IP_TCP;
2478 		if (csum_flags_in & CSUM_IP_TSO)
2479 			csum_flags |= CSUM_INNER_IP_TSO;
2480 	} else {
2481 #ifdef INVARIANTS
2482 		const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
2483 
2484 		MPASS((csum_flags_in & v6) != 0);
2485 #endif
2486 		if (csum_flags_in & CSUM_IP6_UDP)
2487 			csum_flags |= CSUM_INNER_IP6_UDP;
2488 		if (csum_flags_in & CSUM_IP6_TCP)
2489 			csum_flags |= CSUM_INNER_IP6_TCP;
2490 		if (csum_flags_in & CSUM_IP6_TSO)
2491 			csum_flags |= CSUM_INNER_IP6_TSO;
2492 	}
2493 
2494 	return (csum_flags);
2495 }
2496 #endif
2497 
2498 static int
2499 vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2500     struct mbuf *m)
2501 {
2502 #ifdef INET
2503 	struct ifnet *ifp;
2504 	struct ip *ip;
2505 	struct in_addr srcaddr, dstaddr;
2506 	uint16_t srcport, dstport;
2507 	int plen, mcast, error;
2508 	struct route route, *ro;
2509 	struct sockaddr_in *sin;
2510 	uint32_t csum_flags;
2511 
2512 	NET_EPOCH_ASSERT();
2513 
2514 	ifp = sc->vxl_ifp;
2515 	srcaddr = sc->vxl_src_addr.in4.sin_addr;
2516 	srcport = vxlan_pick_source_port(sc, m);
2517 	dstaddr = fvxlsa->in4.sin_addr;
2518 	dstport = fvxlsa->in4.sin_port;
2519 
2520 	plen = m->m_pkthdr.len;
2521 	M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
2522 	    M_NOWAIT);
2523 	if (m == NULL) {
2524 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2525 		return (ENOBUFS);
2526 	}
2527 
2528 	ip = mtod(m, struct ip *);
2529 	ip->ip_tos = 0;
2530 	ip->ip_len = htons(m->m_pkthdr.len);
2531 	ip->ip_off = 0;
2532 	ip->ip_ttl = sc->vxl_ttl;
2533 	ip->ip_p = IPPROTO_UDP;
2534 	ip->ip_sum = 0;
2535 	ip->ip_src = srcaddr;
2536 	ip->ip_dst = dstaddr;
2537 
2538 	vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
2539 
2540 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2541 	m->m_flags &= ~(M_MCAST | M_BCAST);
2542 
2543 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2544 	if (m->m_pkthdr.csum_flags != 0) {
2545 		/*
2546 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2547 		 * up the ifnet for the outbound route and verify that the
2548 		 * outbound ifnet can perform the requested operation on the
2549 		 * inner frame.
2550 		 */
2551 		bzero(&route, sizeof(route));
2552 		ro = &route;
2553 		sin = (struct sockaddr_in *)&ro->ro_dst;
2554 		sin->sin_family = AF_INET;
2555 		sin->sin_len = sizeof(*sin);
2556 		sin->sin_addr = ip->ip_dst;
2557 		ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE,
2558 		    0);
2559 		if (ro->ro_nh == NULL) {
2560 			m_freem(m);
2561 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2562 			return (EHOSTUNREACH);
2563 		}
2564 
2565 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2566 		    CSUM_ENCAP_VXLAN);
2567 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2568 		    csum_flags) {
2569 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2570 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2571 
2572 				if_printf(ifp, "interface %s is missing hwcaps "
2573 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2574 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2575 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2576 				    m->m_pkthdr.csum_flags, csum_flags,
2577 				    (uint32_t)nh_ifp->if_hwassist);
2578 			}
2579 			m_freem(m);
2580 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2581 			return (ENXIO);
2582 		}
2583 		m->m_pkthdr.csum_flags = csum_flags;
2584 		if (csum_flags &
2585 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2586 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2587 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2588 			if (csum_flags & CSUM_INNER_TSO)
2589 				counter_u64_add(sc->vxl_stats.tso, 1);
2590 		}
2591 	} else
2592 		ro = NULL;
2593 	error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
2594 	if (error == 0) {
2595 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2596 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2597 		if (mcast != 0)
2598 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2599 	} else
2600 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2601 
2602 	return (error);
2603 #else
2604 	m_freem(m);
2605 	return (ENOTSUP);
2606 #endif
2607 }
2608 
2609 static int
2610 vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2611     struct mbuf *m)
2612 {
2613 #ifdef INET6
2614 	struct ifnet *ifp;
2615 	struct ip6_hdr *ip6;
2616 	const struct in6_addr *srcaddr, *dstaddr;
2617 	uint16_t srcport, dstport;
2618 	int plen, mcast, error;
2619 	struct route_in6 route, *ro;
2620 	struct sockaddr_in6 *sin6;
2621 	uint32_t csum_flags;
2622 
2623 	NET_EPOCH_ASSERT();
2624 
2625 	ifp = sc->vxl_ifp;
2626 	srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
2627 	srcport = vxlan_pick_source_port(sc, m);
2628 	dstaddr = &fvxlsa->in6.sin6_addr;
2629 	dstport = fvxlsa->in6.sin6_port;
2630 
2631 	plen = m->m_pkthdr.len;
2632 	M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
2633 	    M_NOWAIT);
2634 	if (m == NULL) {
2635 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2636 		return (ENOBUFS);
2637 	}
2638 
2639 	ip6 = mtod(m, struct ip6_hdr *);
2640 	ip6->ip6_flow = 0;		/* BMV: Keep in forwarding entry? */
2641 	ip6->ip6_vfc = IPV6_VERSION;
2642 	ip6->ip6_plen = 0;
2643 	ip6->ip6_nxt = IPPROTO_UDP;
2644 	ip6->ip6_hlim = sc->vxl_ttl;
2645 	ip6->ip6_src = *srcaddr;
2646 	ip6->ip6_dst = *dstaddr;
2647 
2648 	vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
2649 
2650 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2651 	m->m_flags &= ~(M_MCAST | M_BCAST);
2652 
2653 	ro = NULL;
2654 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2655 	if (m->m_pkthdr.csum_flags != 0) {
2656 		/*
2657 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2658 		 * up the ifnet for the outbound route and verify that the
2659 		 * outbound ifnet can perform the requested operation on the
2660 		 * inner frame.
2661 		 */
2662 		bzero(&route, sizeof(route));
2663 		ro = &route;
2664 		sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
2665 		sin6->sin6_family = AF_INET6;
2666 		sin6->sin6_len = sizeof(*sin6);
2667 		sin6->sin6_addr = ip6->ip6_dst;
2668 		ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0,
2669 		    NHR_NONE, 0);
2670 		if (ro->ro_nh == NULL) {
2671 			m_freem(m);
2672 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2673 			return (EHOSTUNREACH);
2674 		}
2675 
2676 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2677 		    CSUM_ENCAP_VXLAN);
2678 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2679 		    csum_flags) {
2680 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2681 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2682 
2683 				if_printf(ifp, "interface %s is missing hwcaps "
2684 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2685 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2686 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2687 				    m->m_pkthdr.csum_flags, csum_flags,
2688 				    (uint32_t)nh_ifp->if_hwassist);
2689 			}
2690 			m_freem(m);
2691 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2692 			return (ENXIO);
2693 		}
2694 		m->m_pkthdr.csum_flags = csum_flags;
2695 		if (csum_flags &
2696 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2697 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2698 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2699 			if (csum_flags & CSUM_INNER_TSO)
2700 				counter_u64_add(sc->vxl_stats.tso, 1);
2701 		}
2702 	} else if (ntohs(dstport) != V_zero_checksum_port) {
2703 		struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
2704 
2705 		hdr->uh_sum = in6_cksum_pseudo(ip6,
2706 		    m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
2707 		m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2708 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2709 	}
2710 	error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
2711 	if (error == 0) {
2712 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2713 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2714 		if (mcast != 0)
2715 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2716 	} else
2717 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2718 
2719 	return (error);
2720 #else
2721 	m_freem(m);
2722 	return (ENOTSUP);
2723 #endif
2724 }
2725 
2726 static int
2727 vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
2728 {
2729 	struct rm_priotracker tracker;
2730 	union vxlan_sockaddr vxlsa;
2731 	struct vxlan_softc *sc;
2732 	struct vxlan_ftable_entry *fe;
2733 	struct ifnet *mcifp;
2734 	struct ether_header *eh;
2735 	int ipv4, error;
2736 
2737 	sc = ifp->if_softc;
2738 	eh = mtod(m, struct ether_header *);
2739 	fe = NULL;
2740 	mcifp = NULL;
2741 
2742 	ETHER_BPF_MTAP(ifp, m);
2743 
2744 	VXLAN_RLOCK(sc, &tracker);
2745 	M_SETFIB(m, sc->vxl_fibnum);
2746 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2747 		VXLAN_RUNLOCK(sc, &tracker);
2748 		m_freem(m);
2749 		return (ENETDOWN);
2750 	}
2751 
2752 	if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
2753 		fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
2754 	if (fe == NULL)
2755 		fe = &sc->vxl_default_fe;
2756 	vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
2757 
2758 	ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
2759 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2760 		mcifp = vxlan_multicast_if_ref(sc, ipv4);
2761 
2762 	VXLAN_ACQUIRE(sc);
2763 	VXLAN_RUNLOCK(sc, &tracker);
2764 
2765 	if (ipv4 != 0)
2766 		error = vxlan_encap4(sc, &vxlsa, m);
2767 	else
2768 		error = vxlan_encap6(sc, &vxlsa, m);
2769 
2770 	vxlan_release(sc);
2771 	if (mcifp != NULL)
2772 		if_rele(mcifp);
2773 
2774 	return (error);
2775 }
2776 
2777 static void
2778 vxlan_qflush(struct ifnet *ifp __unused)
2779 {
2780 }
2781 
2782 static bool
2783 vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
2784     const struct sockaddr *srcsa, void *xvso)
2785 {
2786 	struct vxlan_socket *vso;
2787 	struct vxlan_header *vxh, vxlanhdr;
2788 	uint32_t vni;
2789 	int error __unused;
2790 
2791 	M_ASSERTPKTHDR(m);
2792 	vso = xvso;
2793 	offset += sizeof(struct udphdr);
2794 
2795 	if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
2796 		goto out;
2797 
2798 	if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
2799 		m_copydata(m, offset, sizeof(struct vxlan_header),
2800 		    (caddr_t) &vxlanhdr);
2801 		vxh = &vxlanhdr;
2802 	} else
2803 		vxh = mtodo(m, offset);
2804 
2805 	/*
2806 	 * Drop if there is a reserved bit set in either the flags or VNI
2807 	 * fields of the header. This goes against the specification, but
2808 	 * a bit set may indicate an unsupported new feature. This matches
2809 	 * the behavior of the Linux implementation.
2810 	 */
2811 	if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
2812 	    vxh->vxlh_vni & ~VXLAN_VNI_MASK)
2813 		goto out;
2814 
2815 	vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
2816 
2817 	/* Adjust to the start of the inner Ethernet frame. */
2818 	m_adj_decap(m, offset + sizeof(struct vxlan_header));
2819 
2820 	error = vxlan_input(vso, vni, &m, srcsa);
2821 	MPASS(error != 0 || m == NULL);
2822 
2823 out:
2824 	if (m != NULL)
2825 		m_freem(m);
2826 
2827 	return (true);
2828 }
2829 
2830 static int
2831 vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
2832     const struct sockaddr *sa)
2833 {
2834 	struct vxlan_softc *sc;
2835 	struct ifnet *ifp;
2836 	struct mbuf *m;
2837 	struct ether_header *eh;
2838 	int error;
2839 
2840 	m = *m0;
2841 
2842 	if (m->m_pkthdr.len < ETHER_HDR_LEN)
2843 		return (EINVAL);
2844 
2845 	sc = vxlan_socket_lookup_softc(vso, vni);
2846 	if (sc == NULL)
2847 		return (ENOENT);
2848 
2849 	ifp = sc->vxl_ifp;
2850 	if (m->m_len < ETHER_HDR_LEN &&
2851 	    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
2852 		*m0 = NULL;
2853 		error = ENOBUFS;
2854 		goto out;
2855 	}
2856 	eh = mtod(m, struct ether_header *);
2857 
2858 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2859 		error = ENETDOWN;
2860 		goto out;
2861 	} else if (ifp == m->m_pkthdr.rcvif) {
2862 		/* XXX Does not catch more complex loops. */
2863 		error = EDEADLK;
2864 		goto out;
2865 	}
2866 
2867 	if (sc->vxl_flags & VXLAN_FLAG_LEARN)
2868 		vxlan_ftable_learn(sc, sa, eh->ether_shost);
2869 
2870 	m_clrprotoflags(m);
2871 	m->m_pkthdr.rcvif = ifp;
2872 	M_SETFIB(m, ifp->if_fib);
2873 	if (((ifp->if_capenable & IFCAP_RXCSUM &&
2874 	    m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
2875 	    (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2876 	    !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
2877 		uint32_t csum_flags = 0;
2878 
2879 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
2880 			csum_flags |= CSUM_L3_CALC;
2881 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
2882 			csum_flags |= CSUM_L3_VALID;
2883 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
2884 			csum_flags |= CSUM_L4_CALC;
2885 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
2886 			csum_flags |= CSUM_L4_VALID;
2887 		m->m_pkthdr.csum_flags = csum_flags;
2888 		counter_u64_add(sc->vxl_stats.rxcsum, 1);
2889 	} else {
2890 		/* clear everything */
2891 		m->m_pkthdr.csum_flags = 0;
2892 		m->m_pkthdr.csum_data = 0;
2893 	}
2894 
2895 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2896 	(*ifp->if_input)(ifp, m);
2897 	*m0 = NULL;
2898 	error = 0;
2899 
2900 out:
2901 	vxlan_release(sc);
2902 	return (error);
2903 }
2904 
2905 static int
2906 vxlan_stats_alloc(struct vxlan_softc *sc)
2907 {
2908 	struct vxlan_statistics *stats = &sc->vxl_stats;
2909 
2910 	stats->txcsum = counter_u64_alloc(M_WAITOK);
2911 	if (stats->txcsum == NULL)
2912 		goto failed;
2913 
2914 	stats->tso = counter_u64_alloc(M_WAITOK);
2915 	if (stats->tso == NULL)
2916 		goto failed;
2917 
2918 	stats->rxcsum = counter_u64_alloc(M_WAITOK);
2919 	if (stats->rxcsum == NULL)
2920 		goto failed;
2921 
2922 	return (0);
2923 failed:
2924 	vxlan_stats_free(sc);
2925 	return (ENOMEM);
2926 }
2927 
2928 static void
2929 vxlan_stats_free(struct vxlan_softc *sc)
2930 {
2931 	struct vxlan_statistics *stats = &sc->vxl_stats;
2932 
2933 	if (stats->txcsum != NULL) {
2934 		counter_u64_free(stats->txcsum);
2935 		stats->txcsum = NULL;
2936 	}
2937 	if (stats->tso != NULL) {
2938 		counter_u64_free(stats->tso);
2939 		stats->tso = NULL;
2940 	}
2941 	if (stats->rxcsum != NULL) {
2942 		counter_u64_free(stats->rxcsum);
2943 		stats->rxcsum = NULL;
2944 	}
2945 }
2946 
2947 static void
2948 vxlan_set_default_config(struct vxlan_softc *sc)
2949 {
2950 
2951 	sc->vxl_flags |= VXLAN_FLAG_LEARN;
2952 
2953 	sc->vxl_vni = VXLAN_VNI_MAX;
2954 	sc->vxl_ttl = IPDEFTTL;
2955 
2956 	if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
2957 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
2958 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
2959 	} else {
2960 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2961 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2962 	}
2963 
2964 	sc->vxl_min_port = V_ipport_firstauto;
2965 	sc->vxl_max_port = V_ipport_lastauto;
2966 
2967 	sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
2968 	sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
2969 }
2970 
2971 static int
2972 vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
2973 {
2974 
2975 #ifndef INET
2976 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
2977 	    VXLAN_PARAM_WITH_REMOTE_ADDR4))
2978 		return (EAFNOSUPPORT);
2979 #endif
2980 
2981 #ifndef INET6
2982 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
2983 	    VXLAN_PARAM_WITH_REMOTE_ADDR6))
2984 		return (EAFNOSUPPORT);
2985 #else
2986 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
2987 		int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
2988 		if (error)
2989 			return (error);
2990 	}
2991 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
2992 		int error = vxlan_sockaddr_in6_embedscope(
2993 		   &vxlp->vxlp_remote_sa);
2994 		if (error)
2995 			return (error);
2996 	}
2997 #endif
2998 
2999 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
3000 		if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
3001 			sc->vxl_vni = vxlp->vxlp_vni;
3002 	}
3003 
3004 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
3005 		sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
3006 		sc->vxl_src_addr.in4.sin_family = AF_INET;
3007 		sc->vxl_src_addr.in4.sin_addr =
3008 		    vxlp->vxlp_local_sa.in4.sin_addr;
3009 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
3010 		sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3011 		sc->vxl_src_addr.in6.sin6_family = AF_INET6;
3012 		sc->vxl_src_addr.in6.sin6_addr =
3013 		    vxlp->vxlp_local_sa.in6.sin6_addr;
3014 	}
3015 
3016 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
3017 		sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
3018 		sc->vxl_dst_addr.in4.sin_family = AF_INET;
3019 		sc->vxl_dst_addr.in4.sin_addr =
3020 		    vxlp->vxlp_remote_sa.in4.sin_addr;
3021 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
3022 		sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3023 		sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
3024 		sc->vxl_dst_addr.in6.sin6_addr =
3025 		    vxlp->vxlp_remote_sa.in6.sin6_addr;
3026 	}
3027 
3028 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
3029 		sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
3030 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
3031 		sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
3032 
3033 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
3034 		if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
3035 			sc->vxl_min_port = vxlp->vxlp_min_port;
3036 			sc->vxl_max_port = vxlp->vxlp_max_port;
3037 		}
3038 	}
3039 
3040 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
3041 		strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
3042 
3043 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
3044 		if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
3045 			sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
3046 	}
3047 
3048 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
3049 		if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
3050 			sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
3051 	}
3052 
3053 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
3054 		if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
3055 			sc->vxl_ttl = vxlp->vxlp_ttl;
3056 	}
3057 
3058 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
3059 		if (vxlp->vxlp_learn == 0)
3060 			sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
3061 	}
3062 
3063 	return (0);
3064 }
3065 
3066 static int
3067 vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
3068 {
3069 	int mask = reqcap ^ ifp->if_capenable;
3070 
3071 	/* Disable TSO if tx checksums are disabled. */
3072 	if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
3073 	    reqcap & IFCAP_TSO4) {
3074 		reqcap &= ~IFCAP_TSO4;
3075 		if_printf(ifp, "tso4 disabled due to -txcsum.\n");
3076 	}
3077 	if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
3078 	    reqcap & IFCAP_TSO6) {
3079 		reqcap &= ~IFCAP_TSO6;
3080 		if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
3081 	}
3082 
3083 	/* Do not enable TSO if tx checksums are disabled. */
3084 	if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
3085 	    !(reqcap & IFCAP_TXCSUM)) {
3086 		if_printf(ifp, "enable txcsum first.\n");
3087 		return (EAGAIN);
3088 	}
3089 	if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
3090 	    !(reqcap & IFCAP_TXCSUM_IPV6)) {
3091 		if_printf(ifp, "enable txcsum6 first.\n");
3092 		return (EAGAIN);
3093 	}
3094 
3095 	sc->vxl_reqcap = reqcap;
3096 	return (0);
3097 }
3098 
3099 /*
3100  * A VXLAN interface inherits the capabilities of the vxlandev or the interface
3101  * hosting the vxlanlocal address.
3102  */
3103 static void
3104 vxlan_set_hwcaps(struct vxlan_softc *sc)
3105 {
3106 	struct epoch_tracker et;
3107 	struct ifnet *p;
3108 	struct ifaddr *ifa;
3109 	u_long hwa;
3110 	int cap, ena;
3111 	bool rel;
3112 	struct ifnet *ifp = sc->vxl_ifp;
3113 
3114 	/* reset caps */
3115 	ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
3116 	ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
3117 	ifp->if_hwassist = 0;
3118 
3119 	NET_EPOCH_ENTER(et);
3120 	CURVNET_SET(ifp->if_vnet);
3121 
3122 	rel = false;
3123 	p = NULL;
3124 	if (sc->vxl_mc_ifname[0] != '\0') {
3125 		rel = true;
3126 		p = ifunit_ref(sc->vxl_mc_ifname);
3127 	} else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
3128 		if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
3129 			struct sockaddr_in in4 = sc->vxl_src_addr.in4;
3130 
3131 			in4.sin_port = 0;
3132 			ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
3133 			if (ifa != NULL)
3134 				p = ifa->ifa_ifp;
3135 		} else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
3136 			struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
3137 
3138 			in6.sin6_port = 0;
3139 			ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
3140 			if (ifa != NULL)
3141 				p = ifa->ifa_ifp;
3142 		}
3143 	}
3144 	if (p == NULL)
3145 		goto done;
3146 
3147 	cap = ena = hwa = 0;
3148 
3149 	/* checksum offload */
3150 	if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
3151 		cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3152 	if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
3153 		ena |= sc->vxl_reqcap & p->if_capenable &
3154 		    (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3155 		if (ena & IFCAP_TXCSUM) {
3156 			if (p->if_hwassist & CSUM_INNER_IP)
3157 				hwa |= CSUM_IP;
3158 			if (p->if_hwassist & CSUM_INNER_IP_UDP)
3159 				hwa |= CSUM_IP_UDP;
3160 			if (p->if_hwassist & CSUM_INNER_IP_TCP)
3161 				hwa |= CSUM_IP_TCP;
3162 		}
3163 		if (ena & IFCAP_TXCSUM_IPV6) {
3164 			if (p->if_hwassist & CSUM_INNER_IP6_UDP)
3165 				hwa |= CSUM_IP6_UDP;
3166 			if (p->if_hwassist & CSUM_INNER_IP6_TCP)
3167 				hwa |= CSUM_IP6_TCP;
3168 		}
3169 	}
3170 
3171 	/* hardware TSO */
3172 	if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
3173 		cap |= p->if_capabilities & IFCAP_TSO;
3174 		if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
3175 			ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
3176 		else
3177 			ifp->if_hw_tsomax = p->if_hw_tsomax;
3178 		/* XXX: tsomaxsegcount decrement is cxgbe specific  */
3179 		ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
3180 		ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
3181 	}
3182 	if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
3183 		ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
3184 		if (ena & IFCAP_TSO) {
3185 			if (p->if_hwassist & CSUM_INNER_IP_TSO)
3186 				hwa |= CSUM_IP_TSO;
3187 			if (p->if_hwassist & CSUM_INNER_IP6_TSO)
3188 				hwa |= CSUM_IP6_TSO;
3189 		}
3190 	}
3191 
3192 	ifp->if_capabilities |= cap;
3193 	ifp->if_capenable |= ena;
3194 	ifp->if_hwassist |= hwa;
3195 	if (rel)
3196 		if_rele(p);
3197 done:
3198 	CURVNET_RESTORE();
3199 	NET_EPOCH_EXIT(et);
3200 }
3201 
3202 static int
3203 vxlan_clone_create(struct if_clone *ifc, char *name, size_t len,
3204     struct ifc_data *ifd, struct ifnet **ifpp)
3205 {
3206 	struct vxlan_softc *sc;
3207 	struct ifnet *ifp;
3208 	struct ifvxlanparam vxlp;
3209 	int error;
3210 
3211 	sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
3212 	sc->vxl_unit = ifd->unit;
3213 	sc->vxl_fibnum = curthread->td_proc->p_fibnum;
3214 	vxlan_set_default_config(sc);
3215 	error = vxlan_stats_alloc(sc);
3216 	if (error != 0)
3217 		goto fail;
3218 
3219 	if (ifd->params != NULL) {
3220 		error = ifc_copyin(ifd, &vxlp, sizeof(vxlp));
3221 		if (error)
3222 			goto fail;
3223 
3224 		error = vxlan_set_user_config(sc, &vxlp);
3225 		if (error)
3226 			goto fail;
3227 	}
3228 
3229 	ifp = if_alloc(IFT_ETHER);
3230 	if (ifp == NULL) {
3231 		error = ENOSPC;
3232 		goto fail;
3233 	}
3234 
3235 	sc->vxl_ifp = ifp;
3236 	rm_init(&sc->vxl_lock, "vxlanrm");
3237 	callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
3238 	sc->vxl_port_hash_key = arc4random();
3239 	vxlan_ftable_init(sc);
3240 
3241 	vxlan_sysctl_setup(sc);
3242 
3243 	ifp->if_softc = sc;
3244 	if_initname(ifp, vxlan_name, ifd->unit);
3245 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3246 	ifp->if_init = vxlan_init;
3247 	ifp->if_ioctl = vxlan_ioctl;
3248 	ifp->if_transmit = vxlan_transmit;
3249 	ifp->if_qflush = vxlan_qflush;
3250 	ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
3251 	ifp->if_capenable = VXLAN_BASIC_IFCAPS;
3252 	sc->vxl_reqcap = -1;
3253 	vxlan_set_hwcaps(sc);
3254 
3255 	ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
3256 	ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
3257 	ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
3258 
3259 	ether_gen_addr(ifp, &sc->vxl_hwaddr);
3260 	ether_ifattach(ifp, sc->vxl_hwaddr.octet);
3261 
3262 	ifp->if_baudrate = 0;
3263 
3264 	VXLAN_WLOCK(sc);
3265 	vxlan_setup_interface_hdrlen(sc);
3266 	VXLAN_WUNLOCK(sc);
3267 	*ifpp = ifp;
3268 
3269 	return (0);
3270 
3271 fail:
3272 	free(sc, M_VXLAN);
3273 	return (error);
3274 }
3275 
3276 static int
3277 vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
3278 {
3279 	struct vxlan_softc *sc;
3280 
3281 	sc = ifp->if_softc;
3282 
3283 	vxlan_teardown(sc);
3284 
3285 	vxlan_ftable_flush(sc, 1);
3286 
3287 	ether_ifdetach(ifp);
3288 	if_free(ifp);
3289 	ifmedia_removeall(&sc->vxl_media);
3290 
3291 	vxlan_ftable_fini(sc);
3292 
3293 	vxlan_sysctl_destroy(sc);
3294 	rm_destroy(&sc->vxl_lock);
3295 	vxlan_stats_free(sc);
3296 	free(sc, M_VXLAN);
3297 
3298 	return (0);
3299 }
3300 
3301 /* BMV: Taken from if_bridge. */
3302 static uint32_t
3303 vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
3304 {
3305 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
3306 
3307 	b += addr[5] << 8;
3308 	b += addr[4];
3309 	a += addr[3] << 24;
3310 	a += addr[2] << 16;
3311 	a += addr[1] << 8;
3312 	a += addr[0];
3313 
3314 /*
3315  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3316  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3317  */
3318 #define	mix(a, b, c)							\
3319 do {									\
3320 	a -= b; a -= c; a ^= (c >> 13);					\
3321 	b -= c; b -= a; b ^= (a << 8);					\
3322 	c -= a; c -= b; c ^= (b >> 13);					\
3323 	a -= b; a -= c; a ^= (c >> 12);					\
3324 	b -= c; b -= a; b ^= (a << 16);					\
3325 	c -= a; c -= b; c ^= (b >> 5);					\
3326 	a -= b; a -= c; a ^= (c >> 3);					\
3327 	b -= c; b -= a; b ^= (a << 10);					\
3328 	c -= a; c -= b; c ^= (b >> 15);					\
3329 } while (0)
3330 
3331 	mix(a, b, c);
3332 
3333 #undef mix
3334 
3335 	return (c);
3336 }
3337 
3338 static int
3339 vxlan_media_change(struct ifnet *ifp)
3340 {
3341 
3342 	/* Ignore. */
3343 	return (0);
3344 }
3345 
3346 static void
3347 vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3348 {
3349 
3350 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
3351 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3352 }
3353 
3354 static int
3355 vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
3356     const struct sockaddr *sa)
3357 {
3358 
3359 	return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
3360 }
3361 
3362 static void
3363 vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
3364     const struct sockaddr *sa)
3365 {
3366 
3367 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3368 	bzero(vxladdr, sizeof(*vxladdr));
3369 
3370 	if (sa->sa_family == AF_INET) {
3371 		vxladdr->in4 = *satoconstsin(sa);
3372 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3373 	} else if (sa->sa_family == AF_INET6) {
3374 		vxladdr->in6 = *satoconstsin6(sa);
3375 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3376 	}
3377 }
3378 
3379 static int
3380 vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
3381     const struct sockaddr *sa)
3382 {
3383 	int equal;
3384 
3385 	if (sa->sa_family == AF_INET) {
3386 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3387 		equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
3388 	} else if (sa->sa_family == AF_INET6) {
3389 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3390 		equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
3391 	} else
3392 		equal = 0;
3393 
3394 	return (equal);
3395 }
3396 
3397 static void
3398 vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
3399     const struct sockaddr *sa)
3400 {
3401 
3402 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3403 
3404 	if (sa->sa_family == AF_INET) {
3405 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3406 		vxladdr->in4.sin_family = AF_INET;
3407 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3408 		vxladdr->in4.sin_addr = *in4;
3409 	} else if (sa->sa_family == AF_INET6) {
3410 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3411 		vxladdr->in6.sin6_family = AF_INET6;
3412 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3413 		vxladdr->in6.sin6_addr = *in6;
3414 	}
3415 }
3416 
3417 static int
3418 vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
3419 {
3420 	const struct sockaddr *sa;
3421 	int supported;
3422 
3423 	sa = &vxladdr->sa;
3424 	supported = 0;
3425 
3426 	if (sa->sa_family == AF_UNSPEC && unspec != 0) {
3427 		supported = 1;
3428 	} else if (sa->sa_family == AF_INET) {
3429 #ifdef INET
3430 		supported = 1;
3431 #endif
3432 	} else if (sa->sa_family == AF_INET6) {
3433 #ifdef INET6
3434 		supported = 1;
3435 #endif
3436 	}
3437 
3438 	return (supported);
3439 }
3440 
3441 static int
3442 vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
3443 {
3444 	const struct sockaddr *sa;
3445 	int any;
3446 
3447 	sa = &vxladdr->sa;
3448 
3449 	if (sa->sa_family == AF_INET) {
3450 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3451 		any = in4->s_addr == INADDR_ANY;
3452 	} else if (sa->sa_family == AF_INET6) {
3453 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3454 		any = IN6_IS_ADDR_UNSPECIFIED(in6);
3455 	} else
3456 		any = -1;
3457 
3458 	return (any);
3459 }
3460 
3461 static int
3462 vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
3463 {
3464 	const struct sockaddr *sa;
3465 	int mc;
3466 
3467 	sa = &vxladdr->sa;
3468 
3469 	if (sa->sa_family == AF_INET) {
3470 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3471 		mc = IN_MULTICAST(ntohl(in4->s_addr));
3472 	} else if (sa->sa_family == AF_INET6) {
3473 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3474 		mc = IN6_IS_ADDR_MULTICAST(in6);
3475 	} else
3476 		mc = -1;
3477 
3478 	return (mc);
3479 }
3480 
3481 static int
3482 vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
3483 {
3484 	int error;
3485 
3486 	MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
3487 #ifdef INET6
3488 	error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
3489 #else
3490 	error = EAFNOSUPPORT;
3491 #endif
3492 
3493 	return (error);
3494 }
3495 
3496 static int
3497 vxlan_can_change_config(struct vxlan_softc *sc)
3498 {
3499 	struct ifnet *ifp;
3500 
3501 	ifp = sc->vxl_ifp;
3502 	VXLAN_LOCK_ASSERT(sc);
3503 
3504 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3505 		return (0);
3506 	if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
3507 		return (0);
3508 
3509 	return (1);
3510 }
3511 
3512 static int
3513 vxlan_check_vni(uint32_t vni)
3514 {
3515 
3516 	return (vni >= VXLAN_VNI_MAX);
3517 }
3518 
3519 static int
3520 vxlan_check_ttl(int ttl)
3521 {
3522 
3523 	return (ttl > MAXTTL);
3524 }
3525 
3526 static int
3527 vxlan_check_ftable_timeout(uint32_t timeout)
3528 {
3529 
3530 	return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
3531 }
3532 
3533 static int
3534 vxlan_check_ftable_max(uint32_t max)
3535 {
3536 
3537 	return (max > VXLAN_FTABLE_MAX);
3538 }
3539 
3540 static void
3541 vxlan_sysctl_setup(struct vxlan_softc *sc)
3542 {
3543 	struct sysctl_ctx_list *ctx;
3544 	struct sysctl_oid *node;
3545 	struct vxlan_statistics *stats;
3546 	char namebuf[8];
3547 
3548 	ctx = &sc->vxl_sysctl_ctx;
3549 	stats = &sc->vxl_stats;
3550 	snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
3551 
3552 	sysctl_ctx_init(ctx);
3553 	sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
3554 	    SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
3555 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3556 
3557 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3558 	    OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3559 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
3560 	    CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
3561 	    "Number of entries in forwarding table");
3562 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
3563 	     CTLFLAG_RD, &sc->vxl_ftable_max, 0,
3564 	    "Maximum number of entries allowed in forwarding table");
3565 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
3566 	    CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
3567 	    "Number of seconds between prunes of the forwarding table");
3568 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
3569 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
3570 	    sc, 0, vxlan_ftable_sysctl_dump, "A",
3571 	    "Dump the forwarding table entries");
3572 
3573 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3574 	    OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3575 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3576 	    "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
3577 	    "Fowarding table reached maximum entries");
3578 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3579 	    "ftable_lock_upgrade_failed", CTLFLAG_RD,
3580 	    &stats->ftable_lock_upgrade_failed, 0,
3581 	    "Forwarding table update required lock upgrade");
3582 
3583 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
3584 	    CTLFLAG_RD, &stats->txcsum,
3585 	    "# of times hardware assisted with tx checksum");
3586 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
3587 	    CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
3588 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
3589 	    CTLFLAG_RD, &stats->rxcsum,
3590 	    "# of times hardware assisted with rx checksum");
3591 }
3592 
3593 static void
3594 vxlan_sysctl_destroy(struct vxlan_softc *sc)
3595 {
3596 
3597 	sysctl_ctx_free(&sc->vxl_sysctl_ctx);
3598 	sc->vxl_sysctl_node = NULL;
3599 }
3600 
3601 static int
3602 vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
3603 {
3604 	char path[64];
3605 
3606 	snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
3607 	    sc->vxl_unit, knob);
3608 	TUNABLE_INT_FETCH(path, &def);
3609 
3610 	return (def);
3611 }
3612 
3613 static void
3614 vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
3615 {
3616 	struct vxlan_softc_head list;
3617 	struct vxlan_socket *vso;
3618 	struct vxlan_softc *sc, *tsc;
3619 
3620 	LIST_INIT(&list);
3621 
3622 	if (ifp->if_flags & IFF_RENAMING)
3623 		return;
3624 	if ((ifp->if_flags & IFF_MULTICAST) == 0)
3625 		return;
3626 
3627 	VXLAN_LIST_LOCK();
3628 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
3629 		vxlan_socket_ifdetach(vso, ifp, &list);
3630 	VXLAN_LIST_UNLOCK();
3631 
3632 	LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
3633 		LIST_REMOVE(sc, vxl_ifdetach_list);
3634 
3635 		sx_xlock(&vxlan_sx);
3636 		VXLAN_WLOCK(sc);
3637 		if (sc->vxl_flags & VXLAN_FLAG_INIT)
3638 			vxlan_init_wait(sc);
3639 		vxlan_teardown_locked(sc);
3640 		sx_xunlock(&vxlan_sx);
3641 	}
3642 }
3643 
3644 static void
3645 vxlan_load(void)
3646 {
3647 
3648 	mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
3649 	LIST_INIT(&vxlan_socket_list);
3650 	vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
3651 	    vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
3652 
3653 	struct if_clone_addreq req = {
3654 		.create_f = vxlan_clone_create,
3655 		.destroy_f = vxlan_clone_destroy,
3656 		.flags = IFC_F_AUTOUNIT,
3657 	};
3658 	vxlan_cloner = ifc_attach_cloner(vxlan_name, &req);
3659 }
3660 
3661 static void
3662 vxlan_unload(void)
3663 {
3664 
3665 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
3666 	    vxlan_ifdetach_event_tag);
3667 	ifc_detach_cloner(vxlan_cloner);
3668 	mtx_destroy(&vxlan_list_mtx);
3669 	MPASS(LIST_EMPTY(&vxlan_socket_list));
3670 }
3671 
3672 static int
3673 vxlan_modevent(module_t mod, int type, void *unused)
3674 {
3675 	int error;
3676 
3677 	error = 0;
3678 
3679 	switch (type) {
3680 	case MOD_LOAD:
3681 		vxlan_load();
3682 		break;
3683 	case MOD_UNLOAD:
3684 		vxlan_unload();
3685 		break;
3686 	default:
3687 		error = ENOTSUP;
3688 		break;
3689 	}
3690 
3691 	return (error);
3692 }
3693 
3694 static moduledata_t vxlan_mod = {
3695 	"if_vxlan",
3696 	vxlan_modevent,
3697 	0
3698 };
3699 
3700 DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3701 MODULE_VERSION(if_vxlan, 1);
3702