xref: /freebsd/sys/net/if_vxlan.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*-
2  * Copyright (c) 2014, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  * Copyright (c) 2020, Chelsio Communications.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 
31 #include <sys/param.h>
32 #include <sys/eventhandler.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/hash.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/refcount.h>
40 #include <sys/rmlock.h>
41 #include <sys/priv.h>
42 #include <sys/proc.h>
43 #include <sys/queue.h>
44 #include <sys/sbuf.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 
51 #include <net/bpf.h>
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56 #include <net/if_clone.h>
57 #include <net/if_dl.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 #include <net/if_vxlan.h>
61 #include <net/netisr.h>
62 #include <net/route.h>
63 #include <net/route/nhop.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip6.h>
71 #include <netinet/ip_var.h>
72 #include <netinet/udp.h>
73 #include <netinet/udp_var.h>
74 #include <netinet/in_fib.h>
75 #include <netinet6/in6_fib.h>
76 
77 #include <netinet6/ip6_var.h>
78 #include <netinet6/scope6_var.h>
79 
80 struct vxlan_softc;
81 LIST_HEAD(vxlan_softc_head, vxlan_softc);
82 
83 struct sx vxlan_sx;
84 SX_SYSINIT(vxlan, &vxlan_sx, "VXLAN global start/stop lock");
85 
86 struct vxlan_socket_mc_info {
87 	union vxlan_sockaddr		 vxlsomc_saddr;
88 	union vxlan_sockaddr		 vxlsomc_gaddr;
89 	int				 vxlsomc_ifidx;
90 	int				 vxlsomc_users;
91 };
92 
93 /*
94  * The maximum MTU of encapsulated ethernet frame within IPv4/UDP packet.
95  */
96 #define VXLAN_MAX_MTU	(IP_MAXPACKET - \
97 		60 /* Maximum IPv4 header len */ - \
98 		sizeof(struct udphdr) - \
99 		sizeof(struct vxlan_header) - \
100 		ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
101 #define VXLAN_BASIC_IFCAPS (IFCAP_LINKSTATE | IFCAP_JUMBO_MTU)
102 
103 #define VXLAN_SO_MC_MAX_GROUPS		32
104 
105 #define VXLAN_SO_VNI_HASH_SHIFT		6
106 #define VXLAN_SO_VNI_HASH_SIZE		(1 << VXLAN_SO_VNI_HASH_SHIFT)
107 #define VXLAN_SO_VNI_HASH(_vni)		((_vni) % VXLAN_SO_VNI_HASH_SIZE)
108 
109 struct vxlan_socket {
110 	struct socket			*vxlso_sock;
111 	struct rmlock			 vxlso_lock;
112 	u_int				 vxlso_refcnt;
113 	union vxlan_sockaddr		 vxlso_laddr;
114 	LIST_ENTRY(vxlan_socket)	 vxlso_entry;
115 	struct vxlan_softc_head		 vxlso_vni_hash[VXLAN_SO_VNI_HASH_SIZE];
116 	struct vxlan_socket_mc_info	 vxlso_mc[VXLAN_SO_MC_MAX_GROUPS];
117 };
118 
119 #define VXLAN_SO_RLOCK(_vso, _p)	rm_rlock(&(_vso)->vxlso_lock, (_p))
120 #define VXLAN_SO_RUNLOCK(_vso, _p)	rm_runlock(&(_vso)->vxlso_lock, (_p))
121 #define VXLAN_SO_WLOCK(_vso)		rm_wlock(&(_vso)->vxlso_lock)
122 #define VXLAN_SO_WUNLOCK(_vso)		rm_wunlock(&(_vso)->vxlso_lock)
123 #define VXLAN_SO_LOCK_ASSERT(_vso) \
124     rm_assert(&(_vso)->vxlso_lock, RA_LOCKED)
125 #define VXLAN_SO_LOCK_WASSERT(_vso) \
126     rm_assert(&(_vso)->vxlso_lock, RA_WLOCKED)
127 
128 #define VXLAN_SO_ACQUIRE(_vso)		refcount_acquire(&(_vso)->vxlso_refcnt)
129 #define VXLAN_SO_RELEASE(_vso)		refcount_release(&(_vso)->vxlso_refcnt)
130 
131 struct vxlan_ftable_entry {
132 	LIST_ENTRY(vxlan_ftable_entry)	 vxlfe_hash;
133 	uint16_t			 vxlfe_flags;
134 	uint8_t				 vxlfe_mac[ETHER_ADDR_LEN];
135 	union vxlan_sockaddr		 vxlfe_raddr;
136 	time_t				 vxlfe_expire;
137 };
138 
139 #define VXLAN_FE_FLAG_DYNAMIC		0x01
140 #define VXLAN_FE_FLAG_STATIC		0x02
141 
142 #define VXLAN_FE_IS_DYNAMIC(_fe) \
143     ((_fe)->vxlfe_flags & VXLAN_FE_FLAG_DYNAMIC)
144 
145 #define VXLAN_SC_FTABLE_SHIFT		9
146 #define VXLAN_SC_FTABLE_SIZE		(1 << VXLAN_SC_FTABLE_SHIFT)
147 #define VXLAN_SC_FTABLE_MASK		(VXLAN_SC_FTABLE_SIZE - 1)
148 #define VXLAN_SC_FTABLE_HASH(_sc, _mac)	\
149     (vxlan_mac_hash(_sc, _mac) % VXLAN_SC_FTABLE_SIZE)
150 
151 LIST_HEAD(vxlan_ftable_head, vxlan_ftable_entry);
152 
153 struct vxlan_statistics {
154 	uint32_t	ftable_nospace;
155 	uint32_t	ftable_lock_upgrade_failed;
156 	counter_u64_t	txcsum;
157 	counter_u64_t	tso;
158 	counter_u64_t	rxcsum;
159 };
160 
161 struct vxlan_softc {
162 	struct ifnet			*vxl_ifp;
163 	int				 vxl_reqcap;
164 	u_int				 vxl_fibnum;
165 	struct vxlan_socket		*vxl_sock;
166 	uint32_t			 vxl_vni;
167 	union vxlan_sockaddr		 vxl_src_addr;
168 	union vxlan_sockaddr		 vxl_dst_addr;
169 	uint32_t			 vxl_flags;
170 #define VXLAN_FLAG_INIT		0x0001
171 #define VXLAN_FLAG_TEARDOWN	0x0002
172 #define VXLAN_FLAG_LEARN	0x0004
173 #define VXLAN_FLAG_USER_MTU	0x0008
174 
175 	uint32_t			 vxl_port_hash_key;
176 	uint16_t			 vxl_min_port;
177 	uint16_t			 vxl_max_port;
178 	uint8_t				 vxl_ttl;
179 
180 	/* Lookup table from MAC address to forwarding entry. */
181 	uint32_t			 vxl_ftable_cnt;
182 	uint32_t			 vxl_ftable_max;
183 	uint32_t			 vxl_ftable_timeout;
184 	uint32_t			 vxl_ftable_hash_key;
185 	struct vxlan_ftable_head	*vxl_ftable;
186 
187 	/* Derived from vxl_dst_addr. */
188 	struct vxlan_ftable_entry	 vxl_default_fe;
189 
190 	struct ip_moptions		*vxl_im4o;
191 	struct ip6_moptions		*vxl_im6o;
192 
193 	struct rmlock			 vxl_lock;
194 	volatile u_int			 vxl_refcnt;
195 
196 	int				 vxl_unit;
197 	int				 vxl_vso_mc_index;
198 	struct vxlan_statistics		 vxl_stats;
199 	struct sysctl_oid		*vxl_sysctl_node;
200 	struct sysctl_ctx_list		 vxl_sysctl_ctx;
201 	struct callout			 vxl_callout;
202 	struct ether_addr		 vxl_hwaddr;
203 	int				 vxl_mc_ifindex;
204 	struct ifnet			*vxl_mc_ifp;
205 	struct ifmedia 			 vxl_media;
206 	char				 vxl_mc_ifname[IFNAMSIZ];
207 	LIST_ENTRY(vxlan_softc)		 vxl_entry;
208 	LIST_ENTRY(vxlan_softc)		 vxl_ifdetach_list;
209 
210 	/* For rate limiting errors on the tx fast path. */
211 	struct timeval err_time;
212 	int err_pps;
213 };
214 
215 #define VXLAN_RLOCK(_sc, _p)	rm_rlock(&(_sc)->vxl_lock, (_p))
216 #define VXLAN_RUNLOCK(_sc, _p)	rm_runlock(&(_sc)->vxl_lock, (_p))
217 #define VXLAN_WLOCK(_sc)	rm_wlock(&(_sc)->vxl_lock)
218 #define VXLAN_WUNLOCK(_sc)	rm_wunlock(&(_sc)->vxl_lock)
219 #define VXLAN_LOCK_WOWNED(_sc)	rm_wowned(&(_sc)->vxl_lock)
220 #define VXLAN_LOCK_ASSERT(_sc)	rm_assert(&(_sc)->vxl_lock, RA_LOCKED)
221 #define VXLAN_LOCK_WASSERT(_sc) rm_assert(&(_sc)->vxl_lock, RA_WLOCKED)
222 #define VXLAN_UNLOCK(_sc, _p) do {		\
223     if (VXLAN_LOCK_WOWNED(_sc))			\
224 	VXLAN_WUNLOCK(_sc);			\
225     else					\
226 	VXLAN_RUNLOCK(_sc, _p);			\
227 } while (0)
228 
229 #define VXLAN_ACQUIRE(_sc)	refcount_acquire(&(_sc)->vxl_refcnt)
230 #define VXLAN_RELEASE(_sc)	refcount_release(&(_sc)->vxl_refcnt)
231 
232 #define	satoconstsin(sa)	((const struct sockaddr_in *)(sa))
233 #define	satoconstsin6(sa)	((const struct sockaddr_in6 *)(sa))
234 
235 struct vxlanudphdr {
236 	struct udphdr		vxlh_udp;
237 	struct vxlan_header	vxlh_hdr;
238 } __packed;
239 
240 static int	vxlan_ftable_addr_cmp(const uint8_t *, const uint8_t *);
241 static void	vxlan_ftable_init(struct vxlan_softc *);
242 static void	vxlan_ftable_fini(struct vxlan_softc *);
243 static void	vxlan_ftable_flush(struct vxlan_softc *, int);
244 static void	vxlan_ftable_expire(struct vxlan_softc *);
245 static int	vxlan_ftable_update_locked(struct vxlan_softc *,
246 		    const union vxlan_sockaddr *, const uint8_t *,
247 		    struct rm_priotracker *);
248 static int	vxlan_ftable_learn(struct vxlan_softc *,
249 		    const struct sockaddr *, const uint8_t *);
250 static int	vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS);
251 
252 static struct vxlan_ftable_entry *
253 		vxlan_ftable_entry_alloc(void);
254 static void	vxlan_ftable_entry_free(struct vxlan_ftable_entry *);
255 static void	vxlan_ftable_entry_init(struct vxlan_softc *,
256 		    struct vxlan_ftable_entry *, const uint8_t *,
257 		    const struct sockaddr *, uint32_t);
258 static void	vxlan_ftable_entry_destroy(struct vxlan_softc *,
259 		    struct vxlan_ftable_entry *);
260 static int	vxlan_ftable_entry_insert(struct vxlan_softc *,
261 		    struct vxlan_ftable_entry *);
262 static struct vxlan_ftable_entry *
263 		vxlan_ftable_entry_lookup(struct vxlan_softc *,
264 		    const uint8_t *);
265 static void	vxlan_ftable_entry_dump(struct vxlan_ftable_entry *,
266 		    struct sbuf *);
267 
268 static struct vxlan_socket *
269 		vxlan_socket_alloc(const union vxlan_sockaddr *);
270 static void	vxlan_socket_destroy(struct vxlan_socket *);
271 static void	vxlan_socket_release(struct vxlan_socket *);
272 static struct vxlan_socket *
273 		vxlan_socket_lookup(union vxlan_sockaddr *vxlsa);
274 static void	vxlan_socket_insert(struct vxlan_socket *);
275 static int	vxlan_socket_init(struct vxlan_socket *, struct ifnet *);
276 static int	vxlan_socket_bind(struct vxlan_socket *, struct ifnet *);
277 static int	vxlan_socket_create(struct ifnet *, int,
278 		    const union vxlan_sockaddr *, struct vxlan_socket **);
279 static void	vxlan_socket_ifdetach(struct vxlan_socket *,
280 		    struct ifnet *, struct vxlan_softc_head *);
281 
282 static struct vxlan_socket *
283 		vxlan_socket_mc_lookup(const union vxlan_sockaddr *);
284 static int	vxlan_sockaddr_mc_info_match(
285 		    const struct vxlan_socket_mc_info *,
286 		    const union vxlan_sockaddr *,
287 		    const union vxlan_sockaddr *, int);
288 static int	vxlan_socket_mc_join_group(struct vxlan_socket *,
289 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
290 		    int *, union vxlan_sockaddr *);
291 static int	vxlan_socket_mc_leave_group(struct vxlan_socket *,
292 		    const union vxlan_sockaddr *,
293 		    const union vxlan_sockaddr *, int);
294 static int	vxlan_socket_mc_add_group(struct vxlan_socket *,
295 		    const union vxlan_sockaddr *, const union vxlan_sockaddr *,
296 		    int, int *);
297 static void	vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *,
298 		    int);
299 
300 static struct vxlan_softc *
301 		vxlan_socket_lookup_softc_locked(struct vxlan_socket *,
302 		    uint32_t);
303 static struct vxlan_softc *
304 		vxlan_socket_lookup_softc(struct vxlan_socket *, uint32_t);
305 static int	vxlan_socket_insert_softc(struct vxlan_socket *,
306 		    struct vxlan_softc *);
307 static void	vxlan_socket_remove_softc(struct vxlan_socket *,
308 		    struct vxlan_softc *);
309 
310 static struct ifnet *
311 		vxlan_multicast_if_ref(struct vxlan_softc *, int);
312 static void	vxlan_free_multicast(struct vxlan_softc *);
313 static int	vxlan_setup_multicast_interface(struct vxlan_softc *);
314 
315 static int	vxlan_setup_multicast(struct vxlan_softc *);
316 static int	vxlan_setup_socket(struct vxlan_softc *);
317 #ifdef INET6
318 static void	vxlan_setup_zero_checksum_port(struct vxlan_softc *);
319 #endif
320 static void	vxlan_setup_interface_hdrlen(struct vxlan_softc *);
321 static int	vxlan_valid_init_config(struct vxlan_softc *);
322 static void	vxlan_init_wait(struct vxlan_softc *);
323 static void	vxlan_init_complete(struct vxlan_softc *);
324 static void	vxlan_init(void *);
325 static void	vxlan_release(struct vxlan_softc *);
326 static void	vxlan_teardown_wait(struct vxlan_softc *);
327 static void	vxlan_teardown_complete(struct vxlan_softc *);
328 static void	vxlan_teardown_locked(struct vxlan_softc *);
329 static void	vxlan_teardown(struct vxlan_softc *);
330 static void	vxlan_ifdetach(struct vxlan_softc *, struct ifnet *,
331 		    struct vxlan_softc_head *);
332 static void	vxlan_timer(void *);
333 
334 static int	vxlan_ctrl_get_config(struct vxlan_softc *, void *);
335 static int	vxlan_ctrl_set_vni(struct vxlan_softc *, void *);
336 static int	vxlan_ctrl_set_local_addr(struct vxlan_softc *, void *);
337 static int	vxlan_ctrl_set_remote_addr(struct vxlan_softc *, void *);
338 static int	vxlan_ctrl_set_local_port(struct vxlan_softc *, void *);
339 static int	vxlan_ctrl_set_remote_port(struct vxlan_softc *, void *);
340 static int	vxlan_ctrl_set_port_range(struct vxlan_softc *, void *);
341 static int	vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *, void *);
342 static int	vxlan_ctrl_set_ftable_max(struct vxlan_softc *, void *);
343 static int	vxlan_ctrl_set_multicast_if(struct vxlan_softc * , void *);
344 static int	vxlan_ctrl_set_ttl(struct vxlan_softc *, void *);
345 static int	vxlan_ctrl_set_learn(struct vxlan_softc *, void *);
346 static int	vxlan_ctrl_ftable_entry_add(struct vxlan_softc *, void *);
347 static int	vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *, void *);
348 static int	vxlan_ctrl_flush(struct vxlan_softc *, void *);
349 static int	vxlan_ioctl_drvspec(struct vxlan_softc *,
350 		    struct ifdrv *, int);
351 static int	vxlan_ioctl_ifflags(struct vxlan_softc *);
352 static int	vxlan_ioctl(struct ifnet *, u_long, caddr_t);
353 
354 #if defined(INET) || defined(INET6)
355 static uint16_t vxlan_pick_source_port(struct vxlan_softc *, struct mbuf *);
356 static void	vxlan_encap_header(struct vxlan_softc *, struct mbuf *,
357 		    int, uint16_t, uint16_t);
358 #endif
359 static int	vxlan_encap4(struct vxlan_softc *,
360 		    const union vxlan_sockaddr *, struct mbuf *);
361 static int	vxlan_encap6(struct vxlan_softc *,
362 		    const union vxlan_sockaddr *, struct mbuf *);
363 static int	vxlan_transmit(struct ifnet *, struct mbuf *);
364 static void	vxlan_qflush(struct ifnet *);
365 static bool	vxlan_rcv_udp_packet(struct mbuf *, int, struct inpcb *,
366 		    const struct sockaddr *, void *);
367 static int	vxlan_input(struct vxlan_socket *, uint32_t, struct mbuf **,
368 		    const struct sockaddr *);
369 
370 static int	vxlan_stats_alloc(struct vxlan_softc *);
371 static void	vxlan_stats_free(struct vxlan_softc *);
372 static void	vxlan_set_default_config(struct vxlan_softc *);
373 static int	vxlan_set_user_config(struct vxlan_softc *,
374 		     struct ifvxlanparam *);
375 static int	vxlan_set_reqcap(struct vxlan_softc *, struct ifnet *, int);
376 static void	vxlan_set_hwcaps(struct vxlan_softc *);
377 static int	vxlan_clone_create(struct if_clone *, char *, size_t,
378 		    struct ifc_data *, struct ifnet **);
379 static int	vxlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
380 
381 static uint32_t vxlan_mac_hash(struct vxlan_softc *, const uint8_t *);
382 static int	vxlan_media_change(struct ifnet *);
383 static void	vxlan_media_status(struct ifnet *, struct ifmediareq *);
384 
385 static int	vxlan_sockaddr_cmp(const union vxlan_sockaddr *,
386 		    const struct sockaddr *);
387 static void	vxlan_sockaddr_copy(union vxlan_sockaddr *,
388 		    const struct sockaddr *);
389 static int	vxlan_sockaddr_in_equal(const union vxlan_sockaddr *,
390 		    const struct sockaddr *);
391 static void	vxlan_sockaddr_in_copy(union vxlan_sockaddr *,
392 		    const struct sockaddr *);
393 static int	vxlan_sockaddr_supported(const union vxlan_sockaddr *, int);
394 static int	vxlan_sockaddr_in_any(const union vxlan_sockaddr *);
395 static int	vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *);
396 static int	vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *);
397 
398 static int	vxlan_can_change_config(struct vxlan_softc *);
399 static int	vxlan_check_vni(uint32_t);
400 static int	vxlan_check_ttl(int);
401 static int	vxlan_check_ftable_timeout(uint32_t);
402 static int	vxlan_check_ftable_max(uint32_t);
403 
404 static void	vxlan_sysctl_setup(struct vxlan_softc *);
405 static void	vxlan_sysctl_destroy(struct vxlan_softc *);
406 static int	vxlan_tunable_int(struct vxlan_softc *, const char *, int);
407 
408 static void	vxlan_ifdetach_event(void *, struct ifnet *);
409 static void	vxlan_load(void);
410 static void	vxlan_unload(void);
411 static int	vxlan_modevent(module_t, int, void *);
412 
413 static const char vxlan_name[] = "vxlan";
414 static MALLOC_DEFINE(M_VXLAN, vxlan_name,
415     "Virtual eXtensible LAN Interface");
416 static struct if_clone *vxlan_cloner;
417 
418 static struct mtx vxlan_list_mtx;
419 #define VXLAN_LIST_LOCK()	mtx_lock(&vxlan_list_mtx)
420 #define VXLAN_LIST_UNLOCK()	mtx_unlock(&vxlan_list_mtx)
421 
422 static LIST_HEAD(, vxlan_socket) vxlan_socket_list;
423 
424 static eventhandler_tag vxlan_ifdetach_event_tag;
425 
426 SYSCTL_DECL(_net_link);
427 SYSCTL_NODE(_net_link, OID_AUTO, vxlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
428     "Virtual eXtensible Local Area Network");
429 
430 static int vxlan_legacy_port = 0;
431 TUNABLE_INT("net.link.vxlan.legacy_port", &vxlan_legacy_port);
432 static int vxlan_reuse_port = 0;
433 TUNABLE_INT("net.link.vxlan.reuse_port", &vxlan_reuse_port);
434 
435 /* Default maximum number of addresses in the forwarding table. */
436 #ifndef VXLAN_FTABLE_MAX
437 #define VXLAN_FTABLE_MAX	2000
438 #endif
439 
440 /* Timeout (in seconds) of addresses learned in the forwarding table. */
441 #ifndef VXLAN_FTABLE_TIMEOUT
442 #define VXLAN_FTABLE_TIMEOUT	(20 * 60)
443 #endif
444 
445 /*
446  * Maximum timeout (in seconds) of addresses learned in the forwarding
447  * table.
448  */
449 #ifndef VXLAN_FTABLE_MAX_TIMEOUT
450 #define VXLAN_FTABLE_MAX_TIMEOUT	(60 * 60 * 24)
451 #endif
452 
453 /* Number of seconds between pruning attempts of the forwarding table. */
454 #ifndef VXLAN_FTABLE_PRUNE
455 #define VXLAN_FTABLE_PRUNE	(5 * 60)
456 #endif
457 
458 static int vxlan_ftable_prune_period = VXLAN_FTABLE_PRUNE;
459 
460 struct vxlan_control {
461 	int	(*vxlc_func)(struct vxlan_softc *, void *);
462 	int	vxlc_argsize;
463 	int	vxlc_flags;
464 #define VXLAN_CTRL_FLAG_COPYIN	0x01
465 #define VXLAN_CTRL_FLAG_COPYOUT	0x02
466 #define VXLAN_CTRL_FLAG_SUSER	0x04
467 };
468 
469 static const struct vxlan_control vxlan_control_table[] = {
470 	[VXLAN_CMD_GET_CONFIG] =
471 	    {	vxlan_ctrl_get_config, sizeof(struct ifvxlancfg),
472 		VXLAN_CTRL_FLAG_COPYOUT
473 	    },
474 
475 	[VXLAN_CMD_SET_VNI] =
476 	    {   vxlan_ctrl_set_vni, sizeof(struct ifvxlancmd),
477 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
478 	    },
479 
480 	[VXLAN_CMD_SET_LOCAL_ADDR] =
481 	    {   vxlan_ctrl_set_local_addr, sizeof(struct ifvxlancmd),
482 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
483 	    },
484 
485 	[VXLAN_CMD_SET_REMOTE_ADDR] =
486 	    {   vxlan_ctrl_set_remote_addr, sizeof(struct ifvxlancmd),
487 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
488 	    },
489 
490 	[VXLAN_CMD_SET_LOCAL_PORT] =
491 	    {   vxlan_ctrl_set_local_port, sizeof(struct ifvxlancmd),
492 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
493 	    },
494 
495 	[VXLAN_CMD_SET_REMOTE_PORT] =
496 	    {   vxlan_ctrl_set_remote_port, sizeof(struct ifvxlancmd),
497 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
498 	    },
499 
500 	[VXLAN_CMD_SET_PORT_RANGE] =
501 	    {   vxlan_ctrl_set_port_range, sizeof(struct ifvxlancmd),
502 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
503 	    },
504 
505 	[VXLAN_CMD_SET_FTABLE_TIMEOUT] =
506 	    {	vxlan_ctrl_set_ftable_timeout, sizeof(struct ifvxlancmd),
507 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
508 	    },
509 
510 	[VXLAN_CMD_SET_FTABLE_MAX] =
511 	    {	vxlan_ctrl_set_ftable_max, sizeof(struct ifvxlancmd),
512 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
513 	    },
514 
515 	[VXLAN_CMD_SET_MULTICAST_IF] =
516 	    {	vxlan_ctrl_set_multicast_if, sizeof(struct ifvxlancmd),
517 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
518 	    },
519 
520 	[VXLAN_CMD_SET_TTL] =
521 	    {	vxlan_ctrl_set_ttl, sizeof(struct ifvxlancmd),
522 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
523 	    },
524 
525 	[VXLAN_CMD_SET_LEARN] =
526 	    {	vxlan_ctrl_set_learn, sizeof(struct ifvxlancmd),
527 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
528 	    },
529 
530 	[VXLAN_CMD_FTABLE_ENTRY_ADD] =
531 	    {	vxlan_ctrl_ftable_entry_add, sizeof(struct ifvxlancmd),
532 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
533 	    },
534 
535 	[VXLAN_CMD_FTABLE_ENTRY_REM] =
536 	    {	vxlan_ctrl_ftable_entry_rem, sizeof(struct ifvxlancmd),
537 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
538 	    },
539 
540 	[VXLAN_CMD_FLUSH] =
541 	    {   vxlan_ctrl_flush, sizeof(struct ifvxlancmd),
542 		VXLAN_CTRL_FLAG_COPYIN | VXLAN_CTRL_FLAG_SUSER,
543 	    },
544 };
545 
546 static const int vxlan_control_table_size = nitems(vxlan_control_table);
547 
548 static int
549 vxlan_ftable_addr_cmp(const uint8_t *a, const uint8_t *b)
550 {
551 	int i, d;
552 
553 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++)
554 		d = ((int)a[i]) - ((int)b[i]);
555 
556 	return (d);
557 }
558 
559 static void
560 vxlan_ftable_init(struct vxlan_softc *sc)
561 {
562 	int i;
563 
564 	sc->vxl_ftable = malloc(sizeof(struct vxlan_ftable_head) *
565 	    VXLAN_SC_FTABLE_SIZE, M_VXLAN, M_ZERO | M_WAITOK);
566 
567 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++)
568 		LIST_INIT(&sc->vxl_ftable[i]);
569 	sc->vxl_ftable_hash_key = arc4random();
570 }
571 
572 static void
573 vxlan_ftable_fini(struct vxlan_softc *sc)
574 {
575 	int i;
576 
577 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
578 		KASSERT(LIST_EMPTY(&sc->vxl_ftable[i]),
579 		    ("%s: vxlan %p ftable[%d] not empty", __func__, sc, i));
580 	}
581 	MPASS(sc->vxl_ftable_cnt == 0);
582 
583 	free(sc->vxl_ftable, M_VXLAN);
584 	sc->vxl_ftable = NULL;
585 }
586 
587 static void
588 vxlan_ftable_flush(struct vxlan_softc *sc, int all)
589 {
590 	struct vxlan_ftable_entry *fe, *tfe;
591 	int i;
592 
593 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
594 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
595 			if (all || VXLAN_FE_IS_DYNAMIC(fe))
596 				vxlan_ftable_entry_destroy(sc, fe);
597 		}
598 	}
599 }
600 
601 static void
602 vxlan_ftable_expire(struct vxlan_softc *sc)
603 {
604 	struct vxlan_ftable_entry *fe, *tfe;
605 	int i;
606 
607 	VXLAN_LOCK_WASSERT(sc);
608 
609 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
610 		LIST_FOREACH_SAFE(fe, &sc->vxl_ftable[i], vxlfe_hash, tfe) {
611 			if (VXLAN_FE_IS_DYNAMIC(fe) &&
612 			    time_uptime >= fe->vxlfe_expire)
613 				vxlan_ftable_entry_destroy(sc, fe);
614 		}
615 	}
616 }
617 
618 static int
619 vxlan_ftable_update_locked(struct vxlan_softc *sc,
620     const union vxlan_sockaddr *vxlsa, const uint8_t *mac,
621     struct rm_priotracker *tracker)
622 {
623 	struct vxlan_ftable_entry *fe;
624 	int error __unused;
625 
626 	VXLAN_LOCK_ASSERT(sc);
627 
628 again:
629 	/*
630 	 * A forwarding entry for this MAC address might already exist. If
631 	 * so, update it, otherwise create a new one. We may have to upgrade
632 	 * the lock if we have to change or create an entry.
633 	 */
634 	fe = vxlan_ftable_entry_lookup(sc, mac);
635 	if (fe != NULL) {
636 		fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
637 
638 		if (!VXLAN_FE_IS_DYNAMIC(fe) ||
639 		    vxlan_sockaddr_in_equal(&fe->vxlfe_raddr, &vxlsa->sa))
640 			return (0);
641 		if (!VXLAN_LOCK_WOWNED(sc)) {
642 			VXLAN_RUNLOCK(sc, tracker);
643 			VXLAN_WLOCK(sc);
644 			sc->vxl_stats.ftable_lock_upgrade_failed++;
645 			goto again;
646 		}
647 		vxlan_sockaddr_in_copy(&fe->vxlfe_raddr, &vxlsa->sa);
648 		return (0);
649 	}
650 
651 	if (!VXLAN_LOCK_WOWNED(sc)) {
652 		VXLAN_RUNLOCK(sc, tracker);
653 		VXLAN_WLOCK(sc);
654 		sc->vxl_stats.ftable_lock_upgrade_failed++;
655 		goto again;
656 	}
657 
658 	if (sc->vxl_ftable_cnt >= sc->vxl_ftable_max) {
659 		sc->vxl_stats.ftable_nospace++;
660 		return (ENOSPC);
661 	}
662 
663 	fe = vxlan_ftable_entry_alloc();
664 	if (fe == NULL)
665 		return (ENOMEM);
666 
667 	vxlan_ftable_entry_init(sc, fe, mac, &vxlsa->sa, VXLAN_FE_FLAG_DYNAMIC);
668 
669 	/* The prior lookup failed, so the insert should not. */
670 	error = vxlan_ftable_entry_insert(sc, fe);
671 	MPASS(error == 0);
672 
673 	return (0);
674 }
675 
676 static int
677 vxlan_ftable_learn(struct vxlan_softc *sc, const struct sockaddr *sa,
678     const uint8_t *mac)
679 {
680 	struct rm_priotracker tracker;
681 	union vxlan_sockaddr vxlsa;
682 	int error;
683 
684 	/*
685 	 * The source port may be randomly selected by the remote host, so
686 	 * use the port of the default destination address.
687 	 */
688 	vxlan_sockaddr_copy(&vxlsa, sa);
689 	vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
690 
691 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
692 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
693 		if (error)
694 			return (error);
695 	}
696 
697 	VXLAN_RLOCK(sc, &tracker);
698 	error = vxlan_ftable_update_locked(sc, &vxlsa, mac, &tracker);
699 	VXLAN_UNLOCK(sc, &tracker);
700 
701 	return (error);
702 }
703 
704 static int
705 vxlan_ftable_sysctl_dump(SYSCTL_HANDLER_ARGS)
706 {
707 	struct rm_priotracker tracker;
708 	struct sbuf sb;
709 	struct vxlan_softc *sc;
710 	struct vxlan_ftable_entry *fe;
711 	size_t size;
712 	int i, error;
713 
714 	/*
715 	 * This is mostly intended for debugging during development. It is
716 	 * not practical to dump an entire large table this way.
717 	 */
718 
719 	sc = arg1;
720 	size = PAGE_SIZE;	/* Calculate later. */
721 
722 	sbuf_new(&sb, NULL, size, SBUF_FIXEDLEN);
723 	sbuf_putc(&sb, '\n');
724 
725 	VXLAN_RLOCK(sc, &tracker);
726 	for (i = 0; i < VXLAN_SC_FTABLE_SIZE; i++) {
727 		LIST_FOREACH(fe, &sc->vxl_ftable[i], vxlfe_hash) {
728 			if (sbuf_error(&sb) != 0)
729 				break;
730 			vxlan_ftable_entry_dump(fe, &sb);
731 		}
732 	}
733 	VXLAN_RUNLOCK(sc, &tracker);
734 
735 	if (sbuf_len(&sb) == 1)
736 		sbuf_setpos(&sb, 0);
737 
738 	sbuf_finish(&sb);
739 	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
740 	sbuf_delete(&sb);
741 
742 	return (error);
743 }
744 
745 static struct vxlan_ftable_entry *
746 vxlan_ftable_entry_alloc(void)
747 {
748 	struct vxlan_ftable_entry *fe;
749 
750 	fe = malloc(sizeof(*fe), M_VXLAN, M_ZERO | M_NOWAIT);
751 
752 	return (fe);
753 }
754 
755 static void
756 vxlan_ftable_entry_free(struct vxlan_ftable_entry *fe)
757 {
758 
759 	free(fe, M_VXLAN);
760 }
761 
762 static void
763 vxlan_ftable_entry_init(struct vxlan_softc *sc, struct vxlan_ftable_entry *fe,
764     const uint8_t *mac, const struct sockaddr *sa, uint32_t flags)
765 {
766 
767 	fe->vxlfe_flags = flags;
768 	fe->vxlfe_expire = time_uptime + sc->vxl_ftable_timeout;
769 	memcpy(fe->vxlfe_mac, mac, ETHER_ADDR_LEN);
770 	vxlan_sockaddr_copy(&fe->vxlfe_raddr, sa);
771 }
772 
773 static void
774 vxlan_ftable_entry_destroy(struct vxlan_softc *sc,
775     struct vxlan_ftable_entry *fe)
776 {
777 
778 	sc->vxl_ftable_cnt--;
779 	LIST_REMOVE(fe, vxlfe_hash);
780 	vxlan_ftable_entry_free(fe);
781 }
782 
783 static int
784 vxlan_ftable_entry_insert(struct vxlan_softc *sc,
785     struct vxlan_ftable_entry *fe)
786 {
787 	struct vxlan_ftable_entry *lfe;
788 	uint32_t hash;
789 	int dir;
790 
791 	VXLAN_LOCK_WASSERT(sc);
792 	hash = VXLAN_SC_FTABLE_HASH(sc, fe->vxlfe_mac);
793 
794 	lfe = LIST_FIRST(&sc->vxl_ftable[hash]);
795 	if (lfe == NULL) {
796 		LIST_INSERT_HEAD(&sc->vxl_ftable[hash], fe, vxlfe_hash);
797 		goto out;
798 	}
799 
800 	do {
801 		dir = vxlan_ftable_addr_cmp(fe->vxlfe_mac, lfe->vxlfe_mac);
802 		if (dir == 0)
803 			return (EEXIST);
804 		if (dir > 0) {
805 			LIST_INSERT_BEFORE(lfe, fe, vxlfe_hash);
806 			goto out;
807 		} else if (LIST_NEXT(lfe, vxlfe_hash) == NULL) {
808 			LIST_INSERT_AFTER(lfe, fe, vxlfe_hash);
809 			goto out;
810 		} else
811 			lfe = LIST_NEXT(lfe, vxlfe_hash);
812 	} while (lfe != NULL);
813 
814 out:
815 	sc->vxl_ftable_cnt++;
816 
817 	return (0);
818 }
819 
820 static struct vxlan_ftable_entry *
821 vxlan_ftable_entry_lookup(struct vxlan_softc *sc, const uint8_t *mac)
822 {
823 	struct vxlan_ftable_entry *fe;
824 	uint32_t hash;
825 	int dir;
826 
827 	VXLAN_LOCK_ASSERT(sc);
828 	hash = VXLAN_SC_FTABLE_HASH(sc, mac);
829 
830 	LIST_FOREACH(fe, &sc->vxl_ftable[hash], vxlfe_hash) {
831 		dir = vxlan_ftable_addr_cmp(mac, fe->vxlfe_mac);
832 		if (dir == 0)
833 			return (fe);
834 		if (dir > 0)
835 			break;
836 	}
837 
838 	return (NULL);
839 }
840 
841 static void
842 vxlan_ftable_entry_dump(struct vxlan_ftable_entry *fe, struct sbuf *sb)
843 {
844 	char buf[64];
845 	const union vxlan_sockaddr *sa;
846 	const void *addr;
847 	int i, len, af, width;
848 
849 	sa = &fe->vxlfe_raddr;
850 	af = sa->sa.sa_family;
851 	len = sbuf_len(sb);
852 
853 	sbuf_printf(sb, "%c 0x%02X ", VXLAN_FE_IS_DYNAMIC(fe) ? 'D' : 'S',
854 	    fe->vxlfe_flags);
855 
856 	for (i = 0; i < ETHER_ADDR_LEN - 1; i++)
857 		sbuf_printf(sb, "%02X:", fe->vxlfe_mac[i]);
858 	sbuf_printf(sb, "%02X ", fe->vxlfe_mac[i]);
859 
860 	if (af == AF_INET) {
861 		addr = &sa->in4.sin_addr;
862 		width = INET_ADDRSTRLEN - 1;
863 	} else {
864 		addr = &sa->in6.sin6_addr;
865 		width = INET6_ADDRSTRLEN - 1;
866 	}
867 	inet_ntop(af, addr, buf, sizeof(buf));
868 	sbuf_printf(sb, "%*s ", width, buf);
869 
870 	sbuf_printf(sb, "%08jd", (intmax_t)fe->vxlfe_expire);
871 
872 	sbuf_putc(sb, '\n');
873 
874 	/* Truncate a partial line. */
875 	if (sbuf_error(sb) != 0)
876 		sbuf_setpos(sb, len);
877 }
878 
879 static struct vxlan_socket *
880 vxlan_socket_alloc(const union vxlan_sockaddr *sa)
881 {
882 	struct vxlan_socket *vso;
883 	int i;
884 
885 	vso = malloc(sizeof(*vso), M_VXLAN, M_WAITOK | M_ZERO);
886 	rm_init(&vso->vxlso_lock, "vxlansorm");
887 	refcount_init(&vso->vxlso_refcnt, 0);
888 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++)
889 		LIST_INIT(&vso->vxlso_vni_hash[i]);
890 	vso->vxlso_laddr = *sa;
891 
892 	return (vso);
893 }
894 
895 static void
896 vxlan_socket_destroy(struct vxlan_socket *vso)
897 {
898 	struct socket *so;
899 #ifdef INVARIANTS
900 	int i;
901 	struct vxlan_socket_mc_info *mc;
902 
903 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
904 		mc = &vso->vxlso_mc[i];
905 		KASSERT(mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC,
906 		    ("%s: socket %p mc[%d] still has address",
907 		     __func__, vso, i));
908 	}
909 
910 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
911 		KASSERT(LIST_EMPTY(&vso->vxlso_vni_hash[i]),
912 		    ("%s: socket %p vni_hash[%d] not empty",
913 		     __func__, vso, i));
914 	}
915 #endif
916 	so = vso->vxlso_sock;
917 	if (so != NULL) {
918 		vso->vxlso_sock = NULL;
919 		soclose(so);
920 	}
921 
922 	rm_destroy(&vso->vxlso_lock);
923 	free(vso, M_VXLAN);
924 }
925 
926 static void
927 vxlan_socket_release(struct vxlan_socket *vso)
928 {
929 	int destroy;
930 
931 	VXLAN_LIST_LOCK();
932 	destroy = VXLAN_SO_RELEASE(vso);
933 	if (destroy != 0)
934 		LIST_REMOVE(vso, vxlso_entry);
935 	VXLAN_LIST_UNLOCK();
936 
937 	if (destroy != 0)
938 		vxlan_socket_destroy(vso);
939 }
940 
941 static struct vxlan_socket *
942 vxlan_socket_lookup(union vxlan_sockaddr *vxlsa)
943 {
944 	struct vxlan_socket *vso;
945 
946 	VXLAN_LIST_LOCK();
947 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry) {
948 		if (vxlan_sockaddr_cmp(&vso->vxlso_laddr, &vxlsa->sa) == 0) {
949 			VXLAN_SO_ACQUIRE(vso);
950 			break;
951 		}
952 	}
953 	VXLAN_LIST_UNLOCK();
954 
955 	return (vso);
956 }
957 
958 static void
959 vxlan_socket_insert(struct vxlan_socket *vso)
960 {
961 
962 	VXLAN_LIST_LOCK();
963 	VXLAN_SO_ACQUIRE(vso);
964 	LIST_INSERT_HEAD(&vxlan_socket_list, vso, vxlso_entry);
965 	VXLAN_LIST_UNLOCK();
966 }
967 
968 static int
969 vxlan_socket_init(struct vxlan_socket *vso, struct ifnet *ifp)
970 {
971 	struct thread *td;
972 	int error;
973 
974 	td = curthread;
975 
976 	error = socreate(vso->vxlso_laddr.sa.sa_family, &vso->vxlso_sock,
977 	    SOCK_DGRAM, IPPROTO_UDP, td->td_ucred, td);
978 	if (error) {
979 		if_printf(ifp, "cannot create socket: %d\n", error);
980 		return (error);
981 	}
982 
983 	error = udp_set_kernel_tunneling(vso->vxlso_sock,
984 	    vxlan_rcv_udp_packet, NULL, vso);
985 	if (error) {
986 		if_printf(ifp, "cannot set tunneling function: %d\n", error);
987 		return (error);
988 	}
989 
990 	if (vxlan_reuse_port != 0) {
991 		struct sockopt sopt;
992 		int val = 1;
993 
994 		bzero(&sopt, sizeof(sopt));
995 		sopt.sopt_dir = SOPT_SET;
996 		sopt.sopt_level = IPPROTO_IP;
997 		sopt.sopt_name = SO_REUSEPORT;
998 		sopt.sopt_val = &val;
999 		sopt.sopt_valsize = sizeof(val);
1000 		error = sosetopt(vso->vxlso_sock, &sopt);
1001 		if (error) {
1002 			if_printf(ifp,
1003 			    "cannot set REUSEADDR socket opt: %d\n", error);
1004 			return (error);
1005 		}
1006 	}
1007 
1008 	return (0);
1009 }
1010 
1011 static int
1012 vxlan_socket_bind(struct vxlan_socket *vso, struct ifnet *ifp)
1013 {
1014 	union vxlan_sockaddr laddr;
1015 	struct thread *td;
1016 	int error;
1017 
1018 	td = curthread;
1019 	laddr = vso->vxlso_laddr;
1020 
1021 	error = sobind(vso->vxlso_sock, &laddr.sa, td);
1022 	if (error) {
1023 		if (error != EADDRINUSE)
1024 			if_printf(ifp, "cannot bind socket: %d\n", error);
1025 		return (error);
1026 	}
1027 
1028 	return (0);
1029 }
1030 
1031 static int
1032 vxlan_socket_create(struct ifnet *ifp, int multicast,
1033     const union vxlan_sockaddr *saddr, struct vxlan_socket **vsop)
1034 {
1035 	union vxlan_sockaddr laddr;
1036 	struct vxlan_socket *vso;
1037 	int error;
1038 
1039 	laddr = *saddr;
1040 
1041 	/*
1042 	 * If this socket will be multicast, then only the local port
1043 	 * must be specified when binding.
1044 	 */
1045 	if (multicast != 0) {
1046 		if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1047 			laddr.in4.sin_addr.s_addr = INADDR_ANY;
1048 #ifdef INET6
1049 		else
1050 			laddr.in6.sin6_addr = in6addr_any;
1051 #endif
1052 	}
1053 
1054 	vso = vxlan_socket_alloc(&laddr);
1055 	if (vso == NULL)
1056 		return (ENOMEM);
1057 
1058 	error = vxlan_socket_init(vso, ifp);
1059 	if (error)
1060 		goto fail;
1061 
1062 	error = vxlan_socket_bind(vso, ifp);
1063 	if (error)
1064 		goto fail;
1065 
1066 	/*
1067 	 * There is a small window between the bind completing and
1068 	 * inserting the socket, so that a concurrent create may fail.
1069 	 * Let's not worry about that for now.
1070 	 */
1071 	vxlan_socket_insert(vso);
1072 	*vsop = vso;
1073 
1074 	return (0);
1075 
1076 fail:
1077 	vxlan_socket_destroy(vso);
1078 
1079 	return (error);
1080 }
1081 
1082 static void
1083 vxlan_socket_ifdetach(struct vxlan_socket *vso, struct ifnet *ifp,
1084     struct vxlan_softc_head *list)
1085 {
1086 	struct rm_priotracker tracker;
1087 	struct vxlan_softc *sc;
1088 	int i;
1089 
1090 	VXLAN_SO_RLOCK(vso, &tracker);
1091 	for (i = 0; i < VXLAN_SO_VNI_HASH_SIZE; i++) {
1092 		LIST_FOREACH(sc, &vso->vxlso_vni_hash[i], vxl_entry)
1093 			vxlan_ifdetach(sc, ifp, list);
1094 	}
1095 	VXLAN_SO_RUNLOCK(vso, &tracker);
1096 }
1097 
1098 static struct vxlan_socket *
1099 vxlan_socket_mc_lookup(const union vxlan_sockaddr *vxlsa)
1100 {
1101 	union vxlan_sockaddr laddr;
1102 	struct vxlan_socket *vso;
1103 
1104 	laddr = *vxlsa;
1105 
1106 	if (VXLAN_SOCKADDR_IS_IPV4(&laddr))
1107 		laddr.in4.sin_addr.s_addr = INADDR_ANY;
1108 #ifdef INET6
1109 	else
1110 		laddr.in6.sin6_addr = in6addr_any;
1111 #endif
1112 
1113 	vso = vxlan_socket_lookup(&laddr);
1114 
1115 	return (vso);
1116 }
1117 
1118 static int
1119 vxlan_sockaddr_mc_info_match(const struct vxlan_socket_mc_info *mc,
1120     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1121     int ifidx)
1122 {
1123 
1124 	if (!vxlan_sockaddr_in_any(local) &&
1125 	    !vxlan_sockaddr_in_equal(&mc->vxlsomc_saddr, &local->sa))
1126 		return (0);
1127 	if (!vxlan_sockaddr_in_equal(&mc->vxlsomc_gaddr, &group->sa))
1128 		return (0);
1129 	if (ifidx != 0 && ifidx != mc->vxlsomc_ifidx)
1130 		return (0);
1131 
1132 	return (1);
1133 }
1134 
1135 static int
1136 vxlan_socket_mc_join_group(struct vxlan_socket *vso,
1137     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1138     int *ifidx, union vxlan_sockaddr *source)
1139 {
1140 	struct sockopt sopt;
1141 	int error;
1142 
1143 	*source = *local;
1144 
1145 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1146 		struct ip_mreq mreq;
1147 
1148 		mreq.imr_multiaddr = group->in4.sin_addr;
1149 		mreq.imr_interface = local->in4.sin_addr;
1150 
1151 		bzero(&sopt, sizeof(sopt));
1152 		sopt.sopt_dir = SOPT_SET;
1153 		sopt.sopt_level = IPPROTO_IP;
1154 		sopt.sopt_name = IP_ADD_MEMBERSHIP;
1155 		sopt.sopt_val = &mreq;
1156 		sopt.sopt_valsize = sizeof(mreq);
1157 		error = sosetopt(vso->vxlso_sock, &sopt);
1158 		if (error)
1159 			return (error);
1160 
1161 		/*
1162 		 * BMV: Ideally, there would be a formal way for us to get
1163 		 * the local interface that was selected based on the
1164 		 * imr_interface address. We could then update *ifidx so
1165 		 * vxlan_sockaddr_mc_info_match() would return a match for
1166 		 * later creates that explicitly set the multicast interface.
1167 		 *
1168 		 * If we really need to, we can of course look in the INP's
1169 		 * membership list:
1170 		 *     sotoinpcb(vso->vxlso_sock)->inp_moptions->
1171 		 *         imo_head[]->imf_inm->inm_ifp
1172 		 * similarly to imo_match_group().
1173 		 */
1174 		source->in4.sin_addr = local->in4.sin_addr;
1175 
1176 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1177 		struct ipv6_mreq mreq;
1178 
1179 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1180 		mreq.ipv6mr_interface = *ifidx;
1181 
1182 		bzero(&sopt, sizeof(sopt));
1183 		sopt.sopt_dir = SOPT_SET;
1184 		sopt.sopt_level = IPPROTO_IPV6;
1185 		sopt.sopt_name = IPV6_JOIN_GROUP;
1186 		sopt.sopt_val = &mreq;
1187 		sopt.sopt_valsize = sizeof(mreq);
1188 		error = sosetopt(vso->vxlso_sock, &sopt);
1189 		if (error)
1190 			return (error);
1191 
1192 		/*
1193 		 * BMV: As with IPv4, we would really like to know what
1194 		 * interface in6p_lookup_mcast_ifp() selected.
1195 		 */
1196 	} else
1197 		error = EAFNOSUPPORT;
1198 
1199 	return (error);
1200 }
1201 
1202 static int
1203 vxlan_socket_mc_leave_group(struct vxlan_socket *vso,
1204     const union vxlan_sockaddr *group, const union vxlan_sockaddr *source,
1205     int ifidx)
1206 {
1207 	struct sockopt sopt;
1208 	int error;
1209 
1210 	bzero(&sopt, sizeof(sopt));
1211 	sopt.sopt_dir = SOPT_SET;
1212 
1213 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1214 		struct ip_mreq mreq;
1215 
1216 		mreq.imr_multiaddr = group->in4.sin_addr;
1217 		mreq.imr_interface = source->in4.sin_addr;
1218 
1219 		sopt.sopt_level = IPPROTO_IP;
1220 		sopt.sopt_name = IP_DROP_MEMBERSHIP;
1221 		sopt.sopt_val = &mreq;
1222 		sopt.sopt_valsize = sizeof(mreq);
1223 		error = sosetopt(vso->vxlso_sock, &sopt);
1224 
1225 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1226 		struct ipv6_mreq mreq;
1227 
1228 		mreq.ipv6mr_multiaddr = group->in6.sin6_addr;
1229 		mreq.ipv6mr_interface = ifidx;
1230 
1231 		sopt.sopt_level = IPPROTO_IPV6;
1232 		sopt.sopt_name = IPV6_LEAVE_GROUP;
1233 		sopt.sopt_val = &mreq;
1234 		sopt.sopt_valsize = sizeof(mreq);
1235 		error = sosetopt(vso->vxlso_sock, &sopt);
1236 
1237 	} else
1238 		error = EAFNOSUPPORT;
1239 
1240 	return (error);
1241 }
1242 
1243 static int
1244 vxlan_socket_mc_add_group(struct vxlan_socket *vso,
1245     const union vxlan_sockaddr *group, const union vxlan_sockaddr *local,
1246     int ifidx, int *idx)
1247 {
1248 	union vxlan_sockaddr source;
1249 	struct vxlan_socket_mc_info *mc;
1250 	int i, empty, error;
1251 
1252 	/*
1253 	 * Within a socket, the same multicast group may be used by multiple
1254 	 * interfaces, each with a different network identifier. But a socket
1255 	 * may only join a multicast group once, so keep track of the users
1256 	 * here.
1257 	 */
1258 
1259 	VXLAN_SO_WLOCK(vso);
1260 	for (empty = 0, i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1261 		mc = &vso->vxlso_mc[i];
1262 
1263 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1264 			empty++;
1265 			continue;
1266 		}
1267 
1268 		if (vxlan_sockaddr_mc_info_match(mc, group, local, ifidx))
1269 			goto out;
1270 	}
1271 	VXLAN_SO_WUNLOCK(vso);
1272 
1273 	if (empty == 0)
1274 		return (ENOSPC);
1275 
1276 	error = vxlan_socket_mc_join_group(vso, group, local, &ifidx, &source);
1277 	if (error)
1278 		return (error);
1279 
1280 	VXLAN_SO_WLOCK(vso);
1281 	for (i = 0; i < VXLAN_SO_MC_MAX_GROUPS; i++) {
1282 		mc = &vso->vxlso_mc[i];
1283 
1284 		if (mc->vxlsomc_gaddr.sa.sa_family == AF_UNSPEC) {
1285 			vxlan_sockaddr_copy(&mc->vxlsomc_gaddr, &group->sa);
1286 			vxlan_sockaddr_copy(&mc->vxlsomc_saddr, &source.sa);
1287 			mc->vxlsomc_ifidx = ifidx;
1288 			goto out;
1289 		}
1290 	}
1291 	VXLAN_SO_WUNLOCK(vso);
1292 
1293 	error = vxlan_socket_mc_leave_group(vso, group, &source, ifidx);
1294 	MPASS(error == 0);
1295 
1296 	return (ENOSPC);
1297 
1298 out:
1299 	mc->vxlsomc_users++;
1300 	VXLAN_SO_WUNLOCK(vso);
1301 
1302 	*idx = i;
1303 
1304 	return (0);
1305 }
1306 
1307 static void
1308 vxlan_socket_mc_release_group_by_idx(struct vxlan_socket *vso, int idx)
1309 {
1310 	union vxlan_sockaddr group, source;
1311 	struct vxlan_socket_mc_info *mc;
1312 	int ifidx, leave;
1313 
1314 	KASSERT(idx >= 0 && idx < VXLAN_SO_MC_MAX_GROUPS,
1315 	    ("%s: vso %p idx %d out of bounds", __func__, vso, idx));
1316 
1317 	leave = 0;
1318 	mc = &vso->vxlso_mc[idx];
1319 
1320 	VXLAN_SO_WLOCK(vso);
1321 	mc->vxlsomc_users--;
1322 	if (mc->vxlsomc_users == 0) {
1323 		group = mc->vxlsomc_gaddr;
1324 		source = mc->vxlsomc_saddr;
1325 		ifidx = mc->vxlsomc_ifidx;
1326 		bzero(mc, sizeof(*mc));
1327 		leave = 1;
1328 	}
1329 	VXLAN_SO_WUNLOCK(vso);
1330 
1331 	if (leave != 0) {
1332 		/*
1333 		 * Our socket's membership in this group may have already
1334 		 * been removed if we joined through an interface that's
1335 		 * been detached.
1336 		 */
1337 		vxlan_socket_mc_leave_group(vso, &group, &source, ifidx);
1338 	}
1339 }
1340 
1341 static struct vxlan_softc *
1342 vxlan_socket_lookup_softc_locked(struct vxlan_socket *vso, uint32_t vni)
1343 {
1344 	struct vxlan_softc *sc;
1345 	uint32_t hash;
1346 
1347 	VXLAN_SO_LOCK_ASSERT(vso);
1348 	hash = VXLAN_SO_VNI_HASH(vni);
1349 
1350 	LIST_FOREACH(sc, &vso->vxlso_vni_hash[hash], vxl_entry) {
1351 		if (sc->vxl_vni == vni) {
1352 			VXLAN_ACQUIRE(sc);
1353 			break;
1354 		}
1355 	}
1356 
1357 	return (sc);
1358 }
1359 
1360 static struct vxlan_softc *
1361 vxlan_socket_lookup_softc(struct vxlan_socket *vso, uint32_t vni)
1362 {
1363 	struct rm_priotracker tracker;
1364 	struct vxlan_softc *sc;
1365 
1366 	VXLAN_SO_RLOCK(vso, &tracker);
1367 	sc = vxlan_socket_lookup_softc_locked(vso, vni);
1368 	VXLAN_SO_RUNLOCK(vso, &tracker);
1369 
1370 	return (sc);
1371 }
1372 
1373 static int
1374 vxlan_socket_insert_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1375 {
1376 	struct vxlan_softc *tsc;
1377 	uint32_t vni, hash;
1378 
1379 	vni = sc->vxl_vni;
1380 	hash = VXLAN_SO_VNI_HASH(vni);
1381 
1382 	VXLAN_SO_WLOCK(vso);
1383 	tsc = vxlan_socket_lookup_softc_locked(vso, vni);
1384 	if (tsc != NULL) {
1385 		VXLAN_SO_WUNLOCK(vso);
1386 		vxlan_release(tsc);
1387 		return (EEXIST);
1388 	}
1389 
1390 	VXLAN_ACQUIRE(sc);
1391 	LIST_INSERT_HEAD(&vso->vxlso_vni_hash[hash], sc, vxl_entry);
1392 	VXLAN_SO_WUNLOCK(vso);
1393 
1394 	return (0);
1395 }
1396 
1397 static void
1398 vxlan_socket_remove_softc(struct vxlan_socket *vso, struct vxlan_softc *sc)
1399 {
1400 
1401 	VXLAN_SO_WLOCK(vso);
1402 	LIST_REMOVE(sc, vxl_entry);
1403 	VXLAN_SO_WUNLOCK(vso);
1404 
1405 	vxlan_release(sc);
1406 }
1407 
1408 static struct ifnet *
1409 vxlan_multicast_if_ref(struct vxlan_softc *sc, int ipv4)
1410 {
1411 	struct ifnet *ifp;
1412 
1413 	VXLAN_LOCK_ASSERT(sc);
1414 
1415 	if (ipv4 && sc->vxl_im4o != NULL)
1416 		ifp = sc->vxl_im4o->imo_multicast_ifp;
1417 	else if (!ipv4 && sc->vxl_im6o != NULL)
1418 		ifp = sc->vxl_im6o->im6o_multicast_ifp;
1419 	else
1420 		ifp = NULL;
1421 
1422 	if (ifp != NULL)
1423 		if_ref(ifp);
1424 
1425 	return (ifp);
1426 }
1427 
1428 static void
1429 vxlan_free_multicast(struct vxlan_softc *sc)
1430 {
1431 
1432 	if (sc->vxl_mc_ifp != NULL) {
1433 		if_rele(sc->vxl_mc_ifp);
1434 		sc->vxl_mc_ifp = NULL;
1435 		sc->vxl_mc_ifindex = 0;
1436 	}
1437 
1438 	if (sc->vxl_im4o != NULL) {
1439 		free(sc->vxl_im4o, M_VXLAN);
1440 		sc->vxl_im4o = NULL;
1441 	}
1442 
1443 	if (sc->vxl_im6o != NULL) {
1444 		free(sc->vxl_im6o, M_VXLAN);
1445 		sc->vxl_im6o = NULL;
1446 	}
1447 }
1448 
1449 static int
1450 vxlan_setup_multicast_interface(struct vxlan_softc *sc)
1451 {
1452 	struct ifnet *ifp;
1453 
1454 	ifp = ifunit_ref(sc->vxl_mc_ifname);
1455 	if (ifp == NULL) {
1456 		if_printf(sc->vxl_ifp, "multicast interface %s does "
1457 		    "not exist\n", sc->vxl_mc_ifname);
1458 		return (ENOENT);
1459 	}
1460 
1461 	if ((ifp->if_flags & IFF_MULTICAST) == 0) {
1462 		if_printf(sc->vxl_ifp, "interface %s does not support "
1463 		     "multicast\n", sc->vxl_mc_ifname);
1464 		if_rele(ifp);
1465 		return (ENOTSUP);
1466 	}
1467 
1468 	sc->vxl_mc_ifp = ifp;
1469 	sc->vxl_mc_ifindex = ifp->if_index;
1470 
1471 	return (0);
1472 }
1473 
1474 static int
1475 vxlan_setup_multicast(struct vxlan_softc *sc)
1476 {
1477 	const union vxlan_sockaddr *group;
1478 	int error;
1479 
1480 	group = &sc->vxl_dst_addr;
1481 	error = 0;
1482 
1483 	if (sc->vxl_mc_ifname[0] != '\0') {
1484 		error = vxlan_setup_multicast_interface(sc);
1485 		if (error)
1486 			return (error);
1487 	}
1488 
1489 	/*
1490 	 * Initialize an multicast options structure that is sufficiently
1491 	 * populated for use in the respective IP output routine. This
1492 	 * structure is typically stored in the socket, but our sockets
1493 	 * may be shared among multiple interfaces.
1494 	 */
1495 	if (VXLAN_SOCKADDR_IS_IPV4(group)) {
1496 		sc->vxl_im4o = malloc(sizeof(struct ip_moptions), M_VXLAN,
1497 		    M_ZERO | M_WAITOK);
1498 		sc->vxl_im4o->imo_multicast_ifp = sc->vxl_mc_ifp;
1499 		sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
1500 		sc->vxl_im4o->imo_multicast_vif = -1;
1501 	} else if (VXLAN_SOCKADDR_IS_IPV6(group)) {
1502 		sc->vxl_im6o = malloc(sizeof(struct ip6_moptions), M_VXLAN,
1503 		    M_ZERO | M_WAITOK);
1504 		sc->vxl_im6o->im6o_multicast_ifp = sc->vxl_mc_ifp;
1505 		sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
1506 	}
1507 
1508 	return (error);
1509 }
1510 
1511 static int
1512 vxlan_setup_socket(struct vxlan_softc *sc)
1513 {
1514 	struct vxlan_socket *vso;
1515 	struct ifnet *ifp;
1516 	union vxlan_sockaddr *saddr, *daddr;
1517 	int multicast, error;
1518 
1519 	vso = NULL;
1520 	ifp = sc->vxl_ifp;
1521 	saddr = &sc->vxl_src_addr;
1522 	daddr = &sc->vxl_dst_addr;
1523 
1524 	multicast = vxlan_sockaddr_in_multicast(daddr);
1525 	MPASS(multicast != -1);
1526 	sc->vxl_vso_mc_index = -1;
1527 
1528 	/*
1529 	 * Try to create the socket. If that fails, attempt to use an
1530 	 * existing socket.
1531 	 */
1532 	error = vxlan_socket_create(ifp, multicast, saddr, &vso);
1533 	if (error) {
1534 		if (multicast != 0)
1535 			vso = vxlan_socket_mc_lookup(saddr);
1536 		else
1537 			vso = vxlan_socket_lookup(saddr);
1538 
1539 		if (vso == NULL) {
1540 			if_printf(ifp, "cannot create socket (error: %d), "
1541 			    "and no existing socket found\n", error);
1542 			goto out;
1543 		}
1544 	}
1545 
1546 	if (multicast != 0) {
1547 		error = vxlan_setup_multicast(sc);
1548 		if (error)
1549 			goto out;
1550 
1551 		error = vxlan_socket_mc_add_group(vso, daddr, saddr,
1552 		    sc->vxl_mc_ifindex, &sc->vxl_vso_mc_index);
1553 		if (error)
1554 			goto out;
1555 	}
1556 
1557 	sc->vxl_sock = vso;
1558 	error = vxlan_socket_insert_softc(vso, sc);
1559 	if (error) {
1560 		sc->vxl_sock = NULL;
1561 		if_printf(ifp, "network identifier %d already exists in "
1562 		    "this socket\n", sc->vxl_vni);
1563 		goto out;
1564 	}
1565 
1566 	return (0);
1567 
1568 out:
1569 	if (vso != NULL) {
1570 		if (sc->vxl_vso_mc_index != -1) {
1571 			vxlan_socket_mc_release_group_by_idx(vso,
1572 			    sc->vxl_vso_mc_index);
1573 			sc->vxl_vso_mc_index = -1;
1574 		}
1575 		if (multicast != 0)
1576 			vxlan_free_multicast(sc);
1577 		vxlan_socket_release(vso);
1578 	}
1579 
1580 	return (error);
1581 }
1582 
1583 #ifdef INET6
1584 static void
1585 vxlan_setup_zero_checksum_port(struct vxlan_softc *sc)
1586 {
1587 
1588 	if (!VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_src_addr))
1589 		return;
1590 
1591 	MPASS(sc->vxl_src_addr.in6.sin6_port != 0);
1592 	MPASS(sc->vxl_dst_addr.in6.sin6_port != 0);
1593 
1594 	if (sc->vxl_src_addr.in6.sin6_port != sc->vxl_dst_addr.in6.sin6_port) {
1595 		if_printf(sc->vxl_ifp, "port %d in src address does not match "
1596 		    "port %d in dst address, rfc6935_port (%d) not updated.\n",
1597 		    ntohs(sc->vxl_src_addr.in6.sin6_port),
1598 		    ntohs(sc->vxl_dst_addr.in6.sin6_port),
1599 		    V_zero_checksum_port);
1600 		return;
1601 	}
1602 
1603 	if (V_zero_checksum_port != 0) {
1604 		if (V_zero_checksum_port !=
1605 		    ntohs(sc->vxl_src_addr.in6.sin6_port)) {
1606 			if_printf(sc->vxl_ifp, "rfc6935_port is already set to "
1607 			    "%d, cannot set it to %d.\n", V_zero_checksum_port,
1608 			    ntohs(sc->vxl_src_addr.in6.sin6_port));
1609 		}
1610 		return;
1611 	}
1612 
1613 	V_zero_checksum_port = ntohs(sc->vxl_src_addr.in6.sin6_port);
1614 	if_printf(sc->vxl_ifp, "rfc6935_port set to %d\n",
1615 	    V_zero_checksum_port);
1616 }
1617 #endif
1618 
1619 static void
1620 vxlan_setup_interface_hdrlen(struct vxlan_softc *sc)
1621 {
1622 	struct ifnet *ifp;
1623 
1624 	VXLAN_LOCK_WASSERT(sc);
1625 
1626 	ifp = sc->vxl_ifp;
1627 	ifp->if_hdrlen = ETHER_HDR_LEN + sizeof(struct vxlanudphdr);
1628 
1629 	if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr) != 0)
1630 		ifp->if_hdrlen += sizeof(struct ip);
1631 	else if (VXLAN_SOCKADDR_IS_IPV6(&sc->vxl_dst_addr) != 0)
1632 		ifp->if_hdrlen += sizeof(struct ip6_hdr);
1633 
1634 	if ((sc->vxl_flags & VXLAN_FLAG_USER_MTU) == 0)
1635 		ifp->if_mtu = ETHERMTU - ifp->if_hdrlen;
1636 }
1637 
1638 static int
1639 vxlan_valid_init_config(struct vxlan_softc *sc)
1640 {
1641 	const char *reason;
1642 
1643 	if (vxlan_check_vni(sc->vxl_vni) != 0) {
1644 		reason = "invalid virtual network identifier specified";
1645 		goto fail;
1646 	}
1647 
1648 	if (vxlan_sockaddr_supported(&sc->vxl_src_addr, 1) == 0) {
1649 		reason = "source address type is not supported";
1650 		goto fail;
1651 	}
1652 
1653 	if (vxlan_sockaddr_supported(&sc->vxl_dst_addr, 0) == 0) {
1654 		reason = "destination address type is not supported";
1655 		goto fail;
1656 	}
1657 
1658 	if (vxlan_sockaddr_in_any(&sc->vxl_dst_addr) != 0) {
1659 		reason = "no valid destination address specified";
1660 		goto fail;
1661 	}
1662 
1663 	if (vxlan_sockaddr_in_multicast(&sc->vxl_dst_addr) == 0 &&
1664 	    sc->vxl_mc_ifname[0] != '\0') {
1665 		reason = "can only specify interface with a group address";
1666 		goto fail;
1667 	}
1668 
1669 	if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
1670 		if (VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_src_addr) ^
1671 		    VXLAN_SOCKADDR_IS_IPV4(&sc->vxl_dst_addr)) {
1672 			reason = "source and destination address must both "
1673 			    "be either IPv4 or IPv6";
1674 			goto fail;
1675 		}
1676 	}
1677 
1678 	if (sc->vxl_src_addr.in4.sin_port == 0) {
1679 		reason = "local port not specified";
1680 		goto fail;
1681 	}
1682 
1683 	if (sc->vxl_dst_addr.in4.sin_port == 0) {
1684 		reason = "remote port not specified";
1685 		goto fail;
1686 	}
1687 
1688 	return (0);
1689 
1690 fail:
1691 	if_printf(sc->vxl_ifp, "cannot initialize interface: %s\n", reason);
1692 	return (EINVAL);
1693 }
1694 
1695 static void
1696 vxlan_init_wait(struct vxlan_softc *sc)
1697 {
1698 
1699 	VXLAN_LOCK_WASSERT(sc);
1700 	while (sc->vxl_flags & VXLAN_FLAG_INIT)
1701 		rm_sleep(sc, &sc->vxl_lock, 0, "vxlint", hz);
1702 }
1703 
1704 static void
1705 vxlan_init_complete(struct vxlan_softc *sc)
1706 {
1707 
1708 	VXLAN_WLOCK(sc);
1709 	sc->vxl_flags &= ~VXLAN_FLAG_INIT;
1710 	wakeup(sc);
1711 	VXLAN_WUNLOCK(sc);
1712 }
1713 
1714 static void
1715 vxlan_init(void *xsc)
1716 {
1717 	static const uint8_t empty_mac[ETHER_ADDR_LEN];
1718 	struct vxlan_softc *sc;
1719 	struct ifnet *ifp;
1720 
1721 	sc = xsc;
1722 	ifp = sc->vxl_ifp;
1723 
1724 	sx_xlock(&vxlan_sx);
1725 	VXLAN_WLOCK(sc);
1726 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1727 		VXLAN_WUNLOCK(sc);
1728 		sx_xunlock(&vxlan_sx);
1729 		return;
1730 	}
1731 	sc->vxl_flags |= VXLAN_FLAG_INIT;
1732 	VXLAN_WUNLOCK(sc);
1733 
1734 	if (vxlan_valid_init_config(sc) != 0)
1735 		goto out;
1736 
1737 	if (vxlan_setup_socket(sc) != 0)
1738 		goto out;
1739 
1740 #ifdef INET6
1741 	vxlan_setup_zero_checksum_port(sc);
1742 #endif
1743 
1744 	/* Initialize the default forwarding entry. */
1745 	vxlan_ftable_entry_init(sc, &sc->vxl_default_fe, empty_mac,
1746 	    &sc->vxl_dst_addr.sa, VXLAN_FE_FLAG_STATIC);
1747 
1748 	VXLAN_WLOCK(sc);
1749 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1750 	callout_reset(&sc->vxl_callout, vxlan_ftable_prune_period * hz,
1751 	    vxlan_timer, sc);
1752 	VXLAN_WUNLOCK(sc);
1753 
1754 	if_link_state_change(ifp, LINK_STATE_UP);
1755 
1756 	EVENTHANDLER_INVOKE(vxlan_start, ifp, sc->vxl_src_addr.in4.sin_family,
1757 	    ntohs(sc->vxl_src_addr.in4.sin_port));
1758 out:
1759 	vxlan_init_complete(sc);
1760 	sx_xunlock(&vxlan_sx);
1761 }
1762 
1763 static void
1764 vxlan_release(struct vxlan_softc *sc)
1765 {
1766 
1767 	/*
1768 	 * The softc may be destroyed as soon as we release our reference,
1769 	 * so we cannot serialize the wakeup with the softc lock. We use a
1770 	 * timeout in our sleeps so a missed wakeup is unfortunate but not
1771 	 * fatal.
1772 	 */
1773 	if (VXLAN_RELEASE(sc) != 0)
1774 		wakeup(sc);
1775 }
1776 
1777 static void
1778 vxlan_teardown_wait(struct vxlan_softc *sc)
1779 {
1780 
1781 	VXLAN_LOCK_WASSERT(sc);
1782 	while (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1783 		rm_sleep(sc, &sc->vxl_lock, 0, "vxltrn", hz);
1784 }
1785 
1786 static void
1787 vxlan_teardown_complete(struct vxlan_softc *sc)
1788 {
1789 
1790 	VXLAN_WLOCK(sc);
1791 	sc->vxl_flags &= ~VXLAN_FLAG_TEARDOWN;
1792 	wakeup(sc);
1793 	VXLAN_WUNLOCK(sc);
1794 }
1795 
1796 static void
1797 vxlan_teardown_locked(struct vxlan_softc *sc)
1798 {
1799 	struct ifnet *ifp;
1800 	struct vxlan_socket *vso;
1801 
1802 	sx_assert(&vxlan_sx, SA_XLOCKED);
1803 	VXLAN_LOCK_WASSERT(sc);
1804 	MPASS(sc->vxl_flags & VXLAN_FLAG_TEARDOWN);
1805 
1806 	ifp = sc->vxl_ifp;
1807 	ifp->if_flags &= ~IFF_UP;
1808 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1809 	callout_stop(&sc->vxl_callout);
1810 	vso = sc->vxl_sock;
1811 	sc->vxl_sock = NULL;
1812 
1813 	VXLAN_WUNLOCK(sc);
1814 	if_link_state_change(ifp, LINK_STATE_DOWN);
1815 	EVENTHANDLER_INVOKE(vxlan_stop, ifp, sc->vxl_src_addr.in4.sin_family,
1816 	    ntohs(sc->vxl_src_addr.in4.sin_port));
1817 
1818 	if (vso != NULL) {
1819 		vxlan_socket_remove_softc(vso, sc);
1820 
1821 		if (sc->vxl_vso_mc_index != -1) {
1822 			vxlan_socket_mc_release_group_by_idx(vso,
1823 			    sc->vxl_vso_mc_index);
1824 			sc->vxl_vso_mc_index = -1;
1825 		}
1826 	}
1827 
1828 	VXLAN_WLOCK(sc);
1829 	while (sc->vxl_refcnt != 0)
1830 		rm_sleep(sc, &sc->vxl_lock, 0, "vxldrn", hz);
1831 	VXLAN_WUNLOCK(sc);
1832 
1833 	callout_drain(&sc->vxl_callout);
1834 
1835 	vxlan_free_multicast(sc);
1836 	if (vso != NULL)
1837 		vxlan_socket_release(vso);
1838 
1839 	vxlan_teardown_complete(sc);
1840 }
1841 
1842 static void
1843 vxlan_teardown(struct vxlan_softc *sc)
1844 {
1845 
1846 	sx_xlock(&vxlan_sx);
1847 	VXLAN_WLOCK(sc);
1848 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN) {
1849 		vxlan_teardown_wait(sc);
1850 		VXLAN_WUNLOCK(sc);
1851 		sx_xunlock(&vxlan_sx);
1852 		return;
1853 	}
1854 
1855 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1856 	vxlan_teardown_locked(sc);
1857 	sx_xunlock(&vxlan_sx);
1858 }
1859 
1860 static void
1861 vxlan_ifdetach(struct vxlan_softc *sc, struct ifnet *ifp,
1862     struct vxlan_softc_head *list)
1863 {
1864 
1865 	VXLAN_WLOCK(sc);
1866 
1867 	if (sc->vxl_mc_ifp != ifp)
1868 		goto out;
1869 	if (sc->vxl_flags & VXLAN_FLAG_TEARDOWN)
1870 		goto out;
1871 
1872 	sc->vxl_flags |= VXLAN_FLAG_TEARDOWN;
1873 	LIST_INSERT_HEAD(list, sc, vxl_ifdetach_list);
1874 
1875 out:
1876 	VXLAN_WUNLOCK(sc);
1877 }
1878 
1879 static void
1880 vxlan_timer(void *xsc)
1881 {
1882 	struct vxlan_softc *sc;
1883 
1884 	sc = xsc;
1885 	VXLAN_LOCK_WASSERT(sc);
1886 
1887 	vxlan_ftable_expire(sc);
1888 	callout_schedule(&sc->vxl_callout, vxlan_ftable_prune_period * hz);
1889 }
1890 
1891 static int
1892 vxlan_ioctl_ifflags(struct vxlan_softc *sc)
1893 {
1894 	struct ifnet *ifp;
1895 
1896 	ifp = sc->vxl_ifp;
1897 
1898 	if (ifp->if_flags & IFF_UP) {
1899 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1900 			vxlan_init(sc);
1901 	} else {
1902 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1903 			vxlan_teardown(sc);
1904 	}
1905 
1906 	return (0);
1907 }
1908 
1909 static int
1910 vxlan_ctrl_get_config(struct vxlan_softc *sc, void *arg)
1911 {
1912 	struct rm_priotracker tracker;
1913 	struct ifvxlancfg *cfg;
1914 
1915 	cfg = arg;
1916 	bzero(cfg, sizeof(*cfg));
1917 
1918 	VXLAN_RLOCK(sc, &tracker);
1919 	cfg->vxlc_vni = sc->vxl_vni;
1920 	memcpy(&cfg->vxlc_local_sa, &sc->vxl_src_addr,
1921 	    sizeof(union vxlan_sockaddr));
1922 	memcpy(&cfg->vxlc_remote_sa, &sc->vxl_dst_addr,
1923 	    sizeof(union vxlan_sockaddr));
1924 	cfg->vxlc_mc_ifindex = sc->vxl_mc_ifindex;
1925 	cfg->vxlc_ftable_cnt = sc->vxl_ftable_cnt;
1926 	cfg->vxlc_ftable_max = sc->vxl_ftable_max;
1927 	cfg->vxlc_ftable_timeout = sc->vxl_ftable_timeout;
1928 	cfg->vxlc_port_min = sc->vxl_min_port;
1929 	cfg->vxlc_port_max = sc->vxl_max_port;
1930 	cfg->vxlc_learn = (sc->vxl_flags & VXLAN_FLAG_LEARN) != 0;
1931 	cfg->vxlc_ttl = sc->vxl_ttl;
1932 	VXLAN_RUNLOCK(sc, &tracker);
1933 
1934 #ifdef INET6
1935 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_local_sa))
1936 		sa6_recoverscope(&cfg->vxlc_local_sa.in6);
1937 	if (VXLAN_SOCKADDR_IS_IPV6(&cfg->vxlc_remote_sa))
1938 		sa6_recoverscope(&cfg->vxlc_remote_sa.in6);
1939 #endif
1940 
1941 	return (0);
1942 }
1943 
1944 static int
1945 vxlan_ctrl_set_vni(struct vxlan_softc *sc, void *arg)
1946 {
1947 	struct ifvxlancmd *cmd;
1948 	int error;
1949 
1950 	cmd = arg;
1951 
1952 	if (vxlan_check_vni(cmd->vxlcmd_vni) != 0)
1953 		return (EINVAL);
1954 
1955 	VXLAN_WLOCK(sc);
1956 	if (vxlan_can_change_config(sc)) {
1957 		sc->vxl_vni = cmd->vxlcmd_vni;
1958 		error = 0;
1959 	} else
1960 		error = EBUSY;
1961 	VXLAN_WUNLOCK(sc);
1962 
1963 	return (error);
1964 }
1965 
1966 static int
1967 vxlan_ctrl_set_local_addr(struct vxlan_softc *sc, void *arg)
1968 {
1969 	struct ifvxlancmd *cmd;
1970 	union vxlan_sockaddr *vxlsa;
1971 	int error;
1972 
1973 	cmd = arg;
1974 	vxlsa = &cmd->vxlcmd_sa;
1975 
1976 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
1977 		return (EINVAL);
1978 	if (vxlan_sockaddr_in_multicast(vxlsa) != 0)
1979 		return (EINVAL);
1980 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
1981 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
1982 		if (error)
1983 			return (error);
1984 	}
1985 
1986 	VXLAN_WLOCK(sc);
1987 	if (vxlan_can_change_config(sc)) {
1988 		vxlan_sockaddr_in_copy(&sc->vxl_src_addr, &vxlsa->sa);
1989 		vxlan_set_hwcaps(sc);
1990 		error = 0;
1991 	} else
1992 		error = EBUSY;
1993 	VXLAN_WUNLOCK(sc);
1994 
1995 	return (error);
1996 }
1997 
1998 static int
1999 vxlan_ctrl_set_remote_addr(struct vxlan_softc *sc, void *arg)
2000 {
2001 	struct ifvxlancmd *cmd;
2002 	union vxlan_sockaddr *vxlsa;
2003 	int error;
2004 
2005 	cmd = arg;
2006 	vxlsa = &cmd->vxlcmd_sa;
2007 
2008 	if (!VXLAN_SOCKADDR_IS_IPV46(vxlsa))
2009 		return (EINVAL);
2010 	if (VXLAN_SOCKADDR_IS_IPV6(vxlsa)) {
2011 		error = vxlan_sockaddr_in6_embedscope(vxlsa);
2012 		if (error)
2013 			return (error);
2014 	}
2015 
2016 	VXLAN_WLOCK(sc);
2017 	if (vxlan_can_change_config(sc)) {
2018 		vxlan_sockaddr_in_copy(&sc->vxl_dst_addr, &vxlsa->sa);
2019 		vxlan_setup_interface_hdrlen(sc);
2020 		error = 0;
2021 	} else
2022 		error = EBUSY;
2023 	VXLAN_WUNLOCK(sc);
2024 
2025 	return (error);
2026 }
2027 
2028 static int
2029 vxlan_ctrl_set_local_port(struct vxlan_softc *sc, void *arg)
2030 {
2031 	struct ifvxlancmd *cmd;
2032 	int error;
2033 
2034 	cmd = arg;
2035 
2036 	if (cmd->vxlcmd_port == 0)
2037 		return (EINVAL);
2038 
2039 	VXLAN_WLOCK(sc);
2040 	if (vxlan_can_change_config(sc)) {
2041 		sc->vxl_src_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2042 		error = 0;
2043 	} else
2044 		error = EBUSY;
2045 	VXLAN_WUNLOCK(sc);
2046 
2047 	return (error);
2048 }
2049 
2050 static int
2051 vxlan_ctrl_set_remote_port(struct vxlan_softc *sc, void *arg)
2052 {
2053 	struct ifvxlancmd *cmd;
2054 	int error;
2055 
2056 	cmd = arg;
2057 
2058 	if (cmd->vxlcmd_port == 0)
2059 		return (EINVAL);
2060 
2061 	VXLAN_WLOCK(sc);
2062 	if (vxlan_can_change_config(sc)) {
2063 		sc->vxl_dst_addr.in4.sin_port = htons(cmd->vxlcmd_port);
2064 		error = 0;
2065 	} else
2066 		error = EBUSY;
2067 	VXLAN_WUNLOCK(sc);
2068 
2069 	return (error);
2070 }
2071 
2072 static int
2073 vxlan_ctrl_set_port_range(struct vxlan_softc *sc, void *arg)
2074 {
2075 	struct ifvxlancmd *cmd;
2076 	uint16_t min, max;
2077 	int error;
2078 
2079 	cmd = arg;
2080 	min = cmd->vxlcmd_port_min;
2081 	max = cmd->vxlcmd_port_max;
2082 
2083 	if (max < min)
2084 		return (EINVAL);
2085 
2086 	VXLAN_WLOCK(sc);
2087 	if (vxlan_can_change_config(sc)) {
2088 		sc->vxl_min_port = min;
2089 		sc->vxl_max_port = max;
2090 		error = 0;
2091 	} else
2092 		error = EBUSY;
2093 	VXLAN_WUNLOCK(sc);
2094 
2095 	return (error);
2096 }
2097 
2098 static int
2099 vxlan_ctrl_set_ftable_timeout(struct vxlan_softc *sc, void *arg)
2100 {
2101 	struct ifvxlancmd *cmd;
2102 	int error;
2103 
2104 	cmd = arg;
2105 
2106 	VXLAN_WLOCK(sc);
2107 	if (vxlan_check_ftable_timeout(cmd->vxlcmd_ftable_timeout) == 0) {
2108 		sc->vxl_ftable_timeout = cmd->vxlcmd_ftable_timeout;
2109 		error = 0;
2110 	} else
2111 		error = EINVAL;
2112 	VXLAN_WUNLOCK(sc);
2113 
2114 	return (error);
2115 }
2116 
2117 static int
2118 vxlan_ctrl_set_ftable_max(struct vxlan_softc *sc, void *arg)
2119 {
2120 	struct ifvxlancmd *cmd;
2121 	int error;
2122 
2123 	cmd = arg;
2124 
2125 	VXLAN_WLOCK(sc);
2126 	if (vxlan_check_ftable_max(cmd->vxlcmd_ftable_max) == 0) {
2127 		sc->vxl_ftable_max = cmd->vxlcmd_ftable_max;
2128 		error = 0;
2129 	} else
2130 		error = EINVAL;
2131 	VXLAN_WUNLOCK(sc);
2132 
2133 	return (error);
2134 }
2135 
2136 static int
2137 vxlan_ctrl_set_multicast_if(struct vxlan_softc * sc, void *arg)
2138 {
2139 	struct ifvxlancmd *cmd;
2140 	int error;
2141 
2142 	cmd = arg;
2143 
2144 	VXLAN_WLOCK(sc);
2145 	if (vxlan_can_change_config(sc)) {
2146 		strlcpy(sc->vxl_mc_ifname, cmd->vxlcmd_ifname, IFNAMSIZ);
2147 		vxlan_set_hwcaps(sc);
2148 		error = 0;
2149 	} else
2150 		error = EBUSY;
2151 	VXLAN_WUNLOCK(sc);
2152 
2153 	return (error);
2154 }
2155 
2156 static int
2157 vxlan_ctrl_set_ttl(struct vxlan_softc *sc, void *arg)
2158 {
2159 	struct ifvxlancmd *cmd;
2160 	int error;
2161 
2162 	cmd = arg;
2163 
2164 	VXLAN_WLOCK(sc);
2165 	if (vxlan_check_ttl(cmd->vxlcmd_ttl) == 0) {
2166 		sc->vxl_ttl = cmd->vxlcmd_ttl;
2167 		if (sc->vxl_im4o != NULL)
2168 			sc->vxl_im4o->imo_multicast_ttl = sc->vxl_ttl;
2169 		if (sc->vxl_im6o != NULL)
2170 			sc->vxl_im6o->im6o_multicast_hlim = sc->vxl_ttl;
2171 		error = 0;
2172 	} else
2173 		error = EINVAL;
2174 	VXLAN_WUNLOCK(sc);
2175 
2176 	return (error);
2177 }
2178 
2179 static int
2180 vxlan_ctrl_set_learn(struct vxlan_softc *sc, void *arg)
2181 {
2182 	struct ifvxlancmd *cmd;
2183 
2184 	cmd = arg;
2185 
2186 	VXLAN_WLOCK(sc);
2187 	if (cmd->vxlcmd_flags & VXLAN_CMD_FLAG_LEARN)
2188 		sc->vxl_flags |= VXLAN_FLAG_LEARN;
2189 	else
2190 		sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
2191 	VXLAN_WUNLOCK(sc);
2192 
2193 	return (0);
2194 }
2195 
2196 static int
2197 vxlan_ctrl_ftable_entry_add(struct vxlan_softc *sc, void *arg)
2198 {
2199 	union vxlan_sockaddr vxlsa;
2200 	struct ifvxlancmd *cmd;
2201 	struct vxlan_ftable_entry *fe;
2202 	int error;
2203 
2204 	cmd = arg;
2205 	vxlsa = cmd->vxlcmd_sa;
2206 
2207 	if (!VXLAN_SOCKADDR_IS_IPV46(&vxlsa))
2208 		return (EINVAL);
2209 	if (vxlan_sockaddr_in_any(&vxlsa) != 0)
2210 		return (EINVAL);
2211 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2212 		return (EINVAL);
2213 	/* BMV: We could support both IPv4 and IPv6 later. */
2214 	if (vxlsa.sa.sa_family != sc->vxl_dst_addr.sa.sa_family)
2215 		return (EAFNOSUPPORT);
2216 
2217 	if (VXLAN_SOCKADDR_IS_IPV6(&vxlsa)) {
2218 		error = vxlan_sockaddr_in6_embedscope(&vxlsa);
2219 		if (error)
2220 			return (error);
2221 	}
2222 
2223 	fe = vxlan_ftable_entry_alloc();
2224 	if (fe == NULL)
2225 		return (ENOMEM);
2226 
2227 	if (vxlsa.in4.sin_port == 0)
2228 		vxlsa.in4.sin_port = sc->vxl_dst_addr.in4.sin_port;
2229 
2230 	vxlan_ftable_entry_init(sc, fe, cmd->vxlcmd_mac, &vxlsa.sa,
2231 	    VXLAN_FE_FLAG_STATIC);
2232 
2233 	VXLAN_WLOCK(sc);
2234 	error = vxlan_ftable_entry_insert(sc, fe);
2235 	VXLAN_WUNLOCK(sc);
2236 
2237 	if (error)
2238 		vxlan_ftable_entry_free(fe);
2239 
2240 	return (error);
2241 }
2242 
2243 static int
2244 vxlan_ctrl_ftable_entry_rem(struct vxlan_softc *sc, void *arg)
2245 {
2246 	struct ifvxlancmd *cmd;
2247 	struct vxlan_ftable_entry *fe;
2248 	int error;
2249 
2250 	cmd = arg;
2251 
2252 	VXLAN_WLOCK(sc);
2253 	fe = vxlan_ftable_entry_lookup(sc, cmd->vxlcmd_mac);
2254 	if (fe != NULL) {
2255 		vxlan_ftable_entry_destroy(sc, fe);
2256 		error = 0;
2257 	} else
2258 		error = ENOENT;
2259 	VXLAN_WUNLOCK(sc);
2260 
2261 	return (error);
2262 }
2263 
2264 static int
2265 vxlan_ctrl_flush(struct vxlan_softc *sc, void *arg)
2266 {
2267 	struct ifvxlancmd *cmd;
2268 	int all;
2269 
2270 	cmd = arg;
2271 	all = cmd->vxlcmd_flags & VXLAN_CMD_FLAG_FLUSH_ALL;
2272 
2273 	VXLAN_WLOCK(sc);
2274 	vxlan_ftable_flush(sc, all);
2275 	VXLAN_WUNLOCK(sc);
2276 
2277 	return (0);
2278 }
2279 
2280 static int
2281 vxlan_ioctl_drvspec(struct vxlan_softc *sc, struct ifdrv *ifd, int get)
2282 {
2283 	const struct vxlan_control *vc;
2284 	union {
2285 		struct ifvxlancfg	cfg;
2286 		struct ifvxlancmd	cmd;
2287 	} args;
2288 	int out, error;
2289 
2290 	if (ifd->ifd_cmd >= vxlan_control_table_size)
2291 		return (EINVAL);
2292 
2293 	bzero(&args, sizeof(args));
2294 	vc = &vxlan_control_table[ifd->ifd_cmd];
2295 	out = (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) != 0;
2296 
2297 	if ((get != 0 && out == 0) || (get == 0 && out != 0))
2298 		return (EINVAL);
2299 
2300 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_SUSER) {
2301 		error = priv_check(curthread, PRIV_NET_VXLAN);
2302 		if (error)
2303 			return (error);
2304 	}
2305 
2306 	if (ifd->ifd_len != vc->vxlc_argsize ||
2307 	    ifd->ifd_len > sizeof(args))
2308 		return (EINVAL);
2309 
2310 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYIN) {
2311 		error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
2312 		if (error)
2313 			return (error);
2314 	}
2315 
2316 	error = vc->vxlc_func(sc, &args);
2317 	if (error)
2318 		return (error);
2319 
2320 	if (vc->vxlc_flags & VXLAN_CTRL_FLAG_COPYOUT) {
2321 		error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
2322 		if (error)
2323 			return (error);
2324 	}
2325 
2326 	return (0);
2327 }
2328 
2329 static int
2330 vxlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2331 {
2332 	struct rm_priotracker tracker;
2333 	struct vxlan_softc *sc;
2334 	struct ifreq *ifr;
2335 	struct ifdrv *ifd;
2336 	int error;
2337 
2338 	sc = ifp->if_softc;
2339 	ifr = (struct ifreq *) data;
2340 	ifd = (struct ifdrv *) data;
2341 
2342 	error = 0;
2343 
2344 	switch (cmd) {
2345 	case SIOCADDMULTI:
2346 	case SIOCDELMULTI:
2347 		break;
2348 
2349 	case SIOCGDRVSPEC:
2350 	case SIOCSDRVSPEC:
2351 		error = vxlan_ioctl_drvspec(sc, ifd, cmd == SIOCGDRVSPEC);
2352 		break;
2353 
2354 	case SIOCSIFFLAGS:
2355 		error = vxlan_ioctl_ifflags(sc);
2356 		break;
2357 
2358 	case SIOCSIFMEDIA:
2359 	case SIOCGIFMEDIA:
2360 		error = ifmedia_ioctl(ifp, ifr, &sc->vxl_media, cmd);
2361 		break;
2362 
2363 	case SIOCSIFMTU:
2364 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VXLAN_MAX_MTU) {
2365 			error = EINVAL;
2366 		} else {
2367 			VXLAN_WLOCK(sc);
2368 			ifp->if_mtu = ifr->ifr_mtu;
2369 			sc->vxl_flags |= VXLAN_FLAG_USER_MTU;
2370 			VXLAN_WUNLOCK(sc);
2371 		}
2372 		break;
2373 
2374 	case SIOCSIFCAP:
2375 		VXLAN_WLOCK(sc);
2376 		error = vxlan_set_reqcap(sc, ifp, ifr->ifr_reqcap);
2377 		if (error == 0)
2378 			vxlan_set_hwcaps(sc);
2379 		VXLAN_WUNLOCK(sc);
2380 		break;
2381 
2382 	case SIOCGTUNFIB:
2383 		VXLAN_RLOCK(sc, &tracker);
2384 		ifr->ifr_fib = sc->vxl_fibnum;
2385 		VXLAN_RUNLOCK(sc, &tracker);
2386 		break;
2387 
2388 	case SIOCSTUNFIB:
2389 		if ((error = priv_check(curthread, PRIV_NET_VXLAN)) != 0)
2390 			break;
2391 
2392 		if (ifr->ifr_fib >= rt_numfibs)
2393 			error = EINVAL;
2394 		else {
2395 			VXLAN_WLOCK(sc);
2396 			sc->vxl_fibnum = ifr->ifr_fib;
2397 			VXLAN_WUNLOCK(sc);
2398 		}
2399 		break;
2400 
2401 	default:
2402 		error = ether_ioctl(ifp, cmd, data);
2403 		break;
2404 	}
2405 
2406 	return (error);
2407 }
2408 
2409 #if defined(INET) || defined(INET6)
2410 static uint16_t
2411 vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
2412 {
2413 	int range;
2414 	uint32_t hash;
2415 
2416 	range = sc->vxl_max_port - sc->vxl_min_port + 1;
2417 
2418 	if (M_HASHTYPE_ISHASH(m))
2419 		hash = m->m_pkthdr.flowid;
2420 	else
2421 		hash = jenkins_hash(m->m_data, ETHER_HDR_LEN,
2422 		    sc->vxl_port_hash_key);
2423 
2424 	return (sc->vxl_min_port + (hash % range));
2425 }
2426 
2427 static void
2428 vxlan_encap_header(struct vxlan_softc *sc, struct mbuf *m, int ipoff,
2429     uint16_t srcport, uint16_t dstport)
2430 {
2431 	struct vxlanudphdr *hdr;
2432 	struct udphdr *udph;
2433 	struct vxlan_header *vxh;
2434 	int len;
2435 
2436 	len = m->m_pkthdr.len - ipoff;
2437 	MPASS(len >= sizeof(struct vxlanudphdr));
2438 	hdr = mtodo(m, ipoff);
2439 
2440 	udph = &hdr->vxlh_udp;
2441 	udph->uh_sport = srcport;
2442 	udph->uh_dport = dstport;
2443 	udph->uh_ulen = htons(len);
2444 	udph->uh_sum = 0;
2445 
2446 	vxh = &hdr->vxlh_hdr;
2447 	vxh->vxlh_flags = htonl(VXLAN_HDR_FLAGS_VALID_VNI);
2448 	vxh->vxlh_vni = htonl(sc->vxl_vni << VXLAN_HDR_VNI_SHIFT);
2449 }
2450 #endif
2451 
2452 #if defined(INET6) || defined(INET)
2453 /*
2454  * Return the CSUM_INNER_* equivalent of CSUM_* caps.
2455  */
2456 static uint32_t
2457 csum_flags_to_inner_flags(uint32_t csum_flags_in, const uint32_t encap)
2458 {
2459 	uint32_t csum_flags = encap;
2460 	const uint32_t v4 = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP;
2461 
2462 	/*
2463 	 * csum_flags can request either v4 or v6 offload but not both.
2464 	 * tcp_output always sets CSUM_TSO (both CSUM_IP_TSO and CSUM_IP6_TSO)
2465 	 * so those bits are no good to detect the IP version.  Other bits are
2466 	 * always set with CSUM_TSO and we use those to figure out the IP
2467 	 * version.
2468 	 */
2469 	if (csum_flags_in & v4) {
2470 		if (csum_flags_in & CSUM_IP)
2471 			csum_flags |= CSUM_INNER_IP;
2472 		if (csum_flags_in & CSUM_IP_UDP)
2473 			csum_flags |= CSUM_INNER_IP_UDP;
2474 		if (csum_flags_in & CSUM_IP_TCP)
2475 			csum_flags |= CSUM_INNER_IP_TCP;
2476 		if (csum_flags_in & CSUM_IP_TSO)
2477 			csum_flags |= CSUM_INNER_IP_TSO;
2478 	} else {
2479 #ifdef INVARIANTS
2480 		const uint32_t v6 = CSUM_IP6_UDP | CSUM_IP6_TCP;
2481 
2482 		MPASS((csum_flags_in & v6) != 0);
2483 #endif
2484 		if (csum_flags_in & CSUM_IP6_UDP)
2485 			csum_flags |= CSUM_INNER_IP6_UDP;
2486 		if (csum_flags_in & CSUM_IP6_TCP)
2487 			csum_flags |= CSUM_INNER_IP6_TCP;
2488 		if (csum_flags_in & CSUM_IP6_TSO)
2489 			csum_flags |= CSUM_INNER_IP6_TSO;
2490 	}
2491 
2492 	return (csum_flags);
2493 }
2494 #endif
2495 
2496 static int
2497 vxlan_encap4(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2498     struct mbuf *m)
2499 {
2500 #ifdef INET
2501 	struct ifnet *ifp;
2502 	struct ip *ip;
2503 	struct in_addr srcaddr, dstaddr;
2504 	uint16_t srcport, dstport;
2505 	int plen, mcast, error;
2506 	struct route route, *ro;
2507 	struct sockaddr_in *sin;
2508 	uint32_t csum_flags;
2509 
2510 	NET_EPOCH_ASSERT();
2511 
2512 	ifp = sc->vxl_ifp;
2513 	srcaddr = sc->vxl_src_addr.in4.sin_addr;
2514 	srcport = vxlan_pick_source_port(sc, m);
2515 	dstaddr = fvxlsa->in4.sin_addr;
2516 	dstport = fvxlsa->in4.sin_port;
2517 
2518 	plen = m->m_pkthdr.len;
2519 	M_PREPEND(m, sizeof(struct ip) + sizeof(struct vxlanudphdr),
2520 	    M_NOWAIT);
2521 	if (m == NULL) {
2522 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2523 		return (ENOBUFS);
2524 	}
2525 
2526 	ip = mtod(m, struct ip *);
2527 	ip->ip_tos = 0;
2528 	ip->ip_len = htons(m->m_pkthdr.len);
2529 	ip->ip_off = 0;
2530 	ip->ip_ttl = sc->vxl_ttl;
2531 	ip->ip_p = IPPROTO_UDP;
2532 	ip->ip_sum = 0;
2533 	ip->ip_src = srcaddr;
2534 	ip->ip_dst = dstaddr;
2535 
2536 	vxlan_encap_header(sc, m, sizeof(struct ip), srcport, dstport);
2537 
2538 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2539 	m->m_flags &= ~(M_MCAST | M_BCAST);
2540 
2541 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2542 	if (m->m_pkthdr.csum_flags != 0) {
2543 		/*
2544 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2545 		 * up the ifnet for the outbound route and verify that the
2546 		 * outbound ifnet can perform the requested operation on the
2547 		 * inner frame.
2548 		 */
2549 		bzero(&route, sizeof(route));
2550 		ro = &route;
2551 		sin = (struct sockaddr_in *)&ro->ro_dst;
2552 		sin->sin_family = AF_INET;
2553 		sin->sin_len = sizeof(*sin);
2554 		sin->sin_addr = ip->ip_dst;
2555 		ro->ro_nh = fib4_lookup(M_GETFIB(m), ip->ip_dst, 0, NHR_NONE,
2556 		    0);
2557 		if (ro->ro_nh == NULL) {
2558 			m_freem(m);
2559 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2560 			return (EHOSTUNREACH);
2561 		}
2562 
2563 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2564 		    CSUM_ENCAP_VXLAN);
2565 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2566 		    csum_flags) {
2567 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2568 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2569 
2570 				if_printf(ifp, "interface %s is missing hwcaps "
2571 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2572 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2573 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2574 				    m->m_pkthdr.csum_flags, csum_flags,
2575 				    (uint32_t)nh_ifp->if_hwassist);
2576 			}
2577 			m_freem(m);
2578 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2579 			return (ENXIO);
2580 		}
2581 		m->m_pkthdr.csum_flags = csum_flags;
2582 		if (csum_flags &
2583 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2584 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2585 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2586 			if (csum_flags & CSUM_INNER_TSO)
2587 				counter_u64_add(sc->vxl_stats.tso, 1);
2588 		}
2589 	} else
2590 		ro = NULL;
2591 	error = ip_output(m, NULL, ro, 0, sc->vxl_im4o, NULL);
2592 	if (error == 0) {
2593 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2594 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2595 		if (mcast != 0)
2596 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2597 	} else
2598 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2599 
2600 	return (error);
2601 #else
2602 	m_freem(m);
2603 	return (ENOTSUP);
2604 #endif
2605 }
2606 
2607 static int
2608 vxlan_encap6(struct vxlan_softc *sc, const union vxlan_sockaddr *fvxlsa,
2609     struct mbuf *m)
2610 {
2611 #ifdef INET6
2612 	struct ifnet *ifp;
2613 	struct ip6_hdr *ip6;
2614 	const struct in6_addr *srcaddr, *dstaddr;
2615 	uint16_t srcport, dstport;
2616 	int plen, mcast, error;
2617 	struct route_in6 route, *ro;
2618 	struct sockaddr_in6 *sin6;
2619 	uint32_t csum_flags;
2620 
2621 	NET_EPOCH_ASSERT();
2622 
2623 	ifp = sc->vxl_ifp;
2624 	srcaddr = &sc->vxl_src_addr.in6.sin6_addr;
2625 	srcport = vxlan_pick_source_port(sc, m);
2626 	dstaddr = &fvxlsa->in6.sin6_addr;
2627 	dstport = fvxlsa->in6.sin6_port;
2628 
2629 	plen = m->m_pkthdr.len;
2630 	M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct vxlanudphdr),
2631 	    M_NOWAIT);
2632 	if (m == NULL) {
2633 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2634 		return (ENOBUFS);
2635 	}
2636 
2637 	ip6 = mtod(m, struct ip6_hdr *);
2638 	ip6->ip6_flow = 0;		/* BMV: Keep in forwarding entry? */
2639 	ip6->ip6_vfc = IPV6_VERSION;
2640 	ip6->ip6_plen = 0;
2641 	ip6->ip6_nxt = IPPROTO_UDP;
2642 	ip6->ip6_hlim = sc->vxl_ttl;
2643 	ip6->ip6_src = *srcaddr;
2644 	ip6->ip6_dst = *dstaddr;
2645 
2646 	vxlan_encap_header(sc, m, sizeof(struct ip6_hdr), srcport, dstport);
2647 
2648 	mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
2649 	m->m_flags &= ~(M_MCAST | M_BCAST);
2650 
2651 	ro = NULL;
2652 	m->m_pkthdr.csum_flags &= CSUM_FLAGS_TX;
2653 	if (m->m_pkthdr.csum_flags != 0) {
2654 		/*
2655 		 * HW checksum (L3 and/or L4) or TSO has been requested.  Look
2656 		 * up the ifnet for the outbound route and verify that the
2657 		 * outbound ifnet can perform the requested operation on the
2658 		 * inner frame.
2659 		 */
2660 		bzero(&route, sizeof(route));
2661 		ro = &route;
2662 		sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
2663 		sin6->sin6_family = AF_INET6;
2664 		sin6->sin6_len = sizeof(*sin6);
2665 		sin6->sin6_addr = ip6->ip6_dst;
2666 		ro->ro_nh = fib6_lookup(M_GETFIB(m), &ip6->ip6_dst, 0,
2667 		    NHR_NONE, 0);
2668 		if (ro->ro_nh == NULL) {
2669 			m_freem(m);
2670 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2671 			return (EHOSTUNREACH);
2672 		}
2673 
2674 		csum_flags = csum_flags_to_inner_flags(m->m_pkthdr.csum_flags,
2675 		    CSUM_ENCAP_VXLAN);
2676 		if ((csum_flags & ro->ro_nh->nh_ifp->if_hwassist) !=
2677 		    csum_flags) {
2678 			if (ppsratecheck(&sc->err_time, &sc->err_pps, 1)) {
2679 				const struct ifnet *nh_ifp = ro->ro_nh->nh_ifp;
2680 
2681 				if_printf(ifp, "interface %s is missing hwcaps "
2682 				    "0x%08x, csum_flags 0x%08x -> 0x%08x, "
2683 				    "hwassist 0x%08x\n", nh_ifp->if_xname,
2684 				    csum_flags & ~(uint32_t)nh_ifp->if_hwassist,
2685 				    m->m_pkthdr.csum_flags, csum_flags,
2686 				    (uint32_t)nh_ifp->if_hwassist);
2687 			}
2688 			m_freem(m);
2689 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2690 			return (ENXIO);
2691 		}
2692 		m->m_pkthdr.csum_flags = csum_flags;
2693 		if (csum_flags &
2694 		    (CSUM_INNER_IP | CSUM_INNER_IP_UDP | CSUM_INNER_IP6_UDP |
2695 		    CSUM_INNER_IP_TCP | CSUM_INNER_IP6_TCP)) {
2696 			counter_u64_add(sc->vxl_stats.txcsum, 1);
2697 			if (csum_flags & CSUM_INNER_TSO)
2698 				counter_u64_add(sc->vxl_stats.tso, 1);
2699 		}
2700 	} else if (ntohs(dstport) != V_zero_checksum_port) {
2701 		struct udphdr *hdr = mtodo(m, sizeof(struct ip6_hdr));
2702 
2703 		hdr->uh_sum = in6_cksum_pseudo(ip6,
2704 		    m->m_pkthdr.len - sizeof(struct ip6_hdr), IPPROTO_UDP, 0);
2705 		m->m_pkthdr.csum_flags = CSUM_UDP_IPV6;
2706 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
2707 	}
2708 	error = ip6_output(m, NULL, ro, 0, sc->vxl_im6o, NULL, NULL);
2709 	if (error == 0) {
2710 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2711 		if_inc_counter(ifp, IFCOUNTER_OBYTES, plen);
2712 		if (mcast != 0)
2713 			if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
2714 	} else
2715 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2716 
2717 	return (error);
2718 #else
2719 	m_freem(m);
2720 	return (ENOTSUP);
2721 #endif
2722 }
2723 
2724 static int
2725 vxlan_transmit(struct ifnet *ifp, struct mbuf *m)
2726 {
2727 	struct rm_priotracker tracker;
2728 	union vxlan_sockaddr vxlsa;
2729 	struct vxlan_softc *sc;
2730 	struct vxlan_ftable_entry *fe;
2731 	struct ifnet *mcifp;
2732 	struct ether_header *eh;
2733 	int ipv4, error;
2734 
2735 	sc = ifp->if_softc;
2736 	eh = mtod(m, struct ether_header *);
2737 	fe = NULL;
2738 	mcifp = NULL;
2739 
2740 	ETHER_BPF_MTAP(ifp, m);
2741 
2742 	VXLAN_RLOCK(sc, &tracker);
2743 	M_SETFIB(m, sc->vxl_fibnum);
2744 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2745 		VXLAN_RUNLOCK(sc, &tracker);
2746 		m_freem(m);
2747 		return (ENETDOWN);
2748 	}
2749 
2750 	if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
2751 		fe = vxlan_ftable_entry_lookup(sc, eh->ether_dhost);
2752 	if (fe == NULL)
2753 		fe = &sc->vxl_default_fe;
2754 	vxlan_sockaddr_copy(&vxlsa, &fe->vxlfe_raddr.sa);
2755 
2756 	ipv4 = VXLAN_SOCKADDR_IS_IPV4(&vxlsa) != 0;
2757 	if (vxlan_sockaddr_in_multicast(&vxlsa) != 0)
2758 		mcifp = vxlan_multicast_if_ref(sc, ipv4);
2759 
2760 	VXLAN_ACQUIRE(sc);
2761 	VXLAN_RUNLOCK(sc, &tracker);
2762 
2763 	if (ipv4 != 0)
2764 		error = vxlan_encap4(sc, &vxlsa, m);
2765 	else
2766 		error = vxlan_encap6(sc, &vxlsa, m);
2767 
2768 	vxlan_release(sc);
2769 	if (mcifp != NULL)
2770 		if_rele(mcifp);
2771 
2772 	return (error);
2773 }
2774 
2775 static void
2776 vxlan_qflush(struct ifnet *ifp __unused)
2777 {
2778 }
2779 
2780 static bool
2781 vxlan_rcv_udp_packet(struct mbuf *m, int offset, struct inpcb *inpcb,
2782     const struct sockaddr *srcsa, void *xvso)
2783 {
2784 	struct vxlan_socket *vso;
2785 	struct vxlan_header *vxh, vxlanhdr;
2786 	uint32_t vni;
2787 	int error __unused;
2788 
2789 	M_ASSERTPKTHDR(m);
2790 	vso = xvso;
2791 	offset += sizeof(struct udphdr);
2792 
2793 	if (m->m_pkthdr.len < offset + sizeof(struct vxlan_header))
2794 		goto out;
2795 
2796 	if (__predict_false(m->m_len < offset + sizeof(struct vxlan_header))) {
2797 		m_copydata(m, offset, sizeof(struct vxlan_header),
2798 		    (caddr_t) &vxlanhdr);
2799 		vxh = &vxlanhdr;
2800 	} else
2801 		vxh = mtodo(m, offset);
2802 
2803 	/*
2804 	 * Drop if there is a reserved bit set in either the flags or VNI
2805 	 * fields of the header. This goes against the specification, but
2806 	 * a bit set may indicate an unsupported new feature. This matches
2807 	 * the behavior of the Linux implementation.
2808 	 */
2809 	if (vxh->vxlh_flags != htonl(VXLAN_HDR_FLAGS_VALID_VNI) ||
2810 	    vxh->vxlh_vni & ~VXLAN_VNI_MASK)
2811 		goto out;
2812 
2813 	vni = ntohl(vxh->vxlh_vni) >> VXLAN_HDR_VNI_SHIFT;
2814 
2815 	/* Adjust to the start of the inner Ethernet frame. */
2816 	m_adj_decap(m, offset + sizeof(struct vxlan_header));
2817 
2818 	error = vxlan_input(vso, vni, &m, srcsa);
2819 	MPASS(error != 0 || m == NULL);
2820 
2821 out:
2822 	if (m != NULL)
2823 		m_freem(m);
2824 
2825 	return (true);
2826 }
2827 
2828 static int
2829 vxlan_input(struct vxlan_socket *vso, uint32_t vni, struct mbuf **m0,
2830     const struct sockaddr *sa)
2831 {
2832 	struct vxlan_softc *sc;
2833 	struct ifnet *ifp;
2834 	struct mbuf *m;
2835 	struct ether_header *eh;
2836 	int error;
2837 
2838 	m = *m0;
2839 
2840 	if (m->m_pkthdr.len < ETHER_HDR_LEN)
2841 		return (EINVAL);
2842 
2843 	sc = vxlan_socket_lookup_softc(vso, vni);
2844 	if (sc == NULL)
2845 		return (ENOENT);
2846 
2847 	ifp = sc->vxl_ifp;
2848 	if (m->m_len < ETHER_HDR_LEN &&
2849 	    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
2850 		*m0 = NULL;
2851 		error = ENOBUFS;
2852 		goto out;
2853 	}
2854 	eh = mtod(m, struct ether_header *);
2855 
2856 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2857 		error = ENETDOWN;
2858 		goto out;
2859 	} else if (ifp == m->m_pkthdr.rcvif) {
2860 		/* XXX Does not catch more complex loops. */
2861 		error = EDEADLK;
2862 		goto out;
2863 	}
2864 
2865 	if (sc->vxl_flags & VXLAN_FLAG_LEARN)
2866 		vxlan_ftable_learn(sc, sa, eh->ether_shost);
2867 
2868 	m_clrprotoflags(m);
2869 	m->m_pkthdr.rcvif = ifp;
2870 	M_SETFIB(m, ifp->if_fib);
2871 	if (((ifp->if_capenable & IFCAP_RXCSUM &&
2872 	    m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC) ||
2873 	    (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2874 	    !(m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)))) {
2875 		uint32_t csum_flags = 0;
2876 
2877 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_CALC)
2878 			csum_flags |= CSUM_L3_CALC;
2879 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L3_VALID)
2880 			csum_flags |= CSUM_L3_VALID;
2881 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_CALC)
2882 			csum_flags |= CSUM_L4_CALC;
2883 		if (m->m_pkthdr.csum_flags & CSUM_INNER_L4_VALID)
2884 			csum_flags |= CSUM_L4_VALID;
2885 		m->m_pkthdr.csum_flags = csum_flags;
2886 		counter_u64_add(sc->vxl_stats.rxcsum, 1);
2887 	} else {
2888 		/* clear everything */
2889 		m->m_pkthdr.csum_flags = 0;
2890 		m->m_pkthdr.csum_data = 0;
2891 	}
2892 
2893 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2894 	(*ifp->if_input)(ifp, m);
2895 	*m0 = NULL;
2896 	error = 0;
2897 
2898 out:
2899 	vxlan_release(sc);
2900 	return (error);
2901 }
2902 
2903 static int
2904 vxlan_stats_alloc(struct vxlan_softc *sc)
2905 {
2906 	struct vxlan_statistics *stats = &sc->vxl_stats;
2907 
2908 	stats->txcsum = counter_u64_alloc(M_WAITOK);
2909 	if (stats->txcsum == NULL)
2910 		goto failed;
2911 
2912 	stats->tso = counter_u64_alloc(M_WAITOK);
2913 	if (stats->tso == NULL)
2914 		goto failed;
2915 
2916 	stats->rxcsum = counter_u64_alloc(M_WAITOK);
2917 	if (stats->rxcsum == NULL)
2918 		goto failed;
2919 
2920 	return (0);
2921 failed:
2922 	vxlan_stats_free(sc);
2923 	return (ENOMEM);
2924 }
2925 
2926 static void
2927 vxlan_stats_free(struct vxlan_softc *sc)
2928 {
2929 	struct vxlan_statistics *stats = &sc->vxl_stats;
2930 
2931 	if (stats->txcsum != NULL) {
2932 		counter_u64_free(stats->txcsum);
2933 		stats->txcsum = NULL;
2934 	}
2935 	if (stats->tso != NULL) {
2936 		counter_u64_free(stats->tso);
2937 		stats->tso = NULL;
2938 	}
2939 	if (stats->rxcsum != NULL) {
2940 		counter_u64_free(stats->rxcsum);
2941 		stats->rxcsum = NULL;
2942 	}
2943 }
2944 
2945 static void
2946 vxlan_set_default_config(struct vxlan_softc *sc)
2947 {
2948 
2949 	sc->vxl_flags |= VXLAN_FLAG_LEARN;
2950 
2951 	sc->vxl_vni = VXLAN_VNI_MAX;
2952 	sc->vxl_ttl = IPDEFTTL;
2953 
2954 	if (!vxlan_tunable_int(sc, "legacy_port", vxlan_legacy_port)) {
2955 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_PORT);
2956 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_PORT);
2957 	} else {
2958 		sc->vxl_src_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2959 		sc->vxl_dst_addr.in4.sin_port = htons(VXLAN_LEGACY_PORT);
2960 	}
2961 
2962 	sc->vxl_min_port = V_ipport_firstauto;
2963 	sc->vxl_max_port = V_ipport_lastauto;
2964 
2965 	sc->vxl_ftable_max = VXLAN_FTABLE_MAX;
2966 	sc->vxl_ftable_timeout = VXLAN_FTABLE_TIMEOUT;
2967 }
2968 
2969 static int
2970 vxlan_set_user_config(struct vxlan_softc *sc, struct ifvxlanparam *vxlp)
2971 {
2972 
2973 #ifndef INET
2974 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR4 |
2975 	    VXLAN_PARAM_WITH_REMOTE_ADDR4))
2976 		return (EAFNOSUPPORT);
2977 #endif
2978 
2979 #ifndef INET6
2980 	if (vxlp->vxlp_with & (VXLAN_PARAM_WITH_LOCAL_ADDR6 |
2981 	    VXLAN_PARAM_WITH_REMOTE_ADDR6))
2982 		return (EAFNOSUPPORT);
2983 #else
2984 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
2985 		int error = vxlan_sockaddr_in6_embedscope(&vxlp->vxlp_local_sa);
2986 		if (error)
2987 			return (error);
2988 	}
2989 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
2990 		int error = vxlan_sockaddr_in6_embedscope(
2991 		   &vxlp->vxlp_remote_sa);
2992 		if (error)
2993 			return (error);
2994 	}
2995 #endif
2996 
2997 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_VNI) {
2998 		if (vxlan_check_vni(vxlp->vxlp_vni) == 0)
2999 			sc->vxl_vni = vxlp->vxlp_vni;
3000 	}
3001 
3002 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR4) {
3003 		sc->vxl_src_addr.in4.sin_len = sizeof(struct sockaddr_in);
3004 		sc->vxl_src_addr.in4.sin_family = AF_INET;
3005 		sc->vxl_src_addr.in4.sin_addr =
3006 		    vxlp->vxlp_local_sa.in4.sin_addr;
3007 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_ADDR6) {
3008 		sc->vxl_src_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3009 		sc->vxl_src_addr.in6.sin6_family = AF_INET6;
3010 		sc->vxl_src_addr.in6.sin6_addr =
3011 		    vxlp->vxlp_local_sa.in6.sin6_addr;
3012 	}
3013 
3014 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR4) {
3015 		sc->vxl_dst_addr.in4.sin_len = sizeof(struct sockaddr_in);
3016 		sc->vxl_dst_addr.in4.sin_family = AF_INET;
3017 		sc->vxl_dst_addr.in4.sin_addr =
3018 		    vxlp->vxlp_remote_sa.in4.sin_addr;
3019 	} else if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_ADDR6) {
3020 		sc->vxl_dst_addr.in6.sin6_len = sizeof(struct sockaddr_in6);
3021 		sc->vxl_dst_addr.in6.sin6_family = AF_INET6;
3022 		sc->vxl_dst_addr.in6.sin6_addr =
3023 		    vxlp->vxlp_remote_sa.in6.sin6_addr;
3024 	}
3025 
3026 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LOCAL_PORT)
3027 		sc->vxl_src_addr.in4.sin_port = htons(vxlp->vxlp_local_port);
3028 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_REMOTE_PORT)
3029 		sc->vxl_dst_addr.in4.sin_port = htons(vxlp->vxlp_remote_port);
3030 
3031 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_PORT_RANGE) {
3032 		if (vxlp->vxlp_min_port <= vxlp->vxlp_max_port) {
3033 			sc->vxl_min_port = vxlp->vxlp_min_port;
3034 			sc->vxl_max_port = vxlp->vxlp_max_port;
3035 		}
3036 	}
3037 
3038 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_MULTICAST_IF)
3039 		strlcpy(sc->vxl_mc_ifname, vxlp->vxlp_mc_ifname, IFNAMSIZ);
3040 
3041 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_TIMEOUT) {
3042 		if (vxlan_check_ftable_timeout(vxlp->vxlp_ftable_timeout) == 0)
3043 			sc->vxl_ftable_timeout = vxlp->vxlp_ftable_timeout;
3044 	}
3045 
3046 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_FTABLE_MAX) {
3047 		if (vxlan_check_ftable_max(vxlp->vxlp_ftable_max) == 0)
3048 			sc->vxl_ftable_max = vxlp->vxlp_ftable_max;
3049 	}
3050 
3051 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_TTL) {
3052 		if (vxlan_check_ttl(vxlp->vxlp_ttl) == 0)
3053 			sc->vxl_ttl = vxlp->vxlp_ttl;
3054 	}
3055 
3056 	if (vxlp->vxlp_with & VXLAN_PARAM_WITH_LEARN) {
3057 		if (vxlp->vxlp_learn == 0)
3058 			sc->vxl_flags &= ~VXLAN_FLAG_LEARN;
3059 	}
3060 
3061 	return (0);
3062 }
3063 
3064 static int
3065 vxlan_set_reqcap(struct vxlan_softc *sc, struct ifnet *ifp, int reqcap)
3066 {
3067 	int mask = reqcap ^ ifp->if_capenable;
3068 
3069 	/* Disable TSO if tx checksums are disabled. */
3070 	if (mask & IFCAP_TXCSUM && !(reqcap & IFCAP_TXCSUM) &&
3071 	    reqcap & IFCAP_TSO4) {
3072 		reqcap &= ~IFCAP_TSO4;
3073 		if_printf(ifp, "tso4 disabled due to -txcsum.\n");
3074 	}
3075 	if (mask & IFCAP_TXCSUM_IPV6 && !(reqcap & IFCAP_TXCSUM_IPV6) &&
3076 	    reqcap & IFCAP_TSO6) {
3077 		reqcap &= ~IFCAP_TSO6;
3078 		if_printf(ifp, "tso6 disabled due to -txcsum6.\n");
3079 	}
3080 
3081 	/* Do not enable TSO if tx checksums are disabled. */
3082 	if (mask & IFCAP_TSO4 && reqcap & IFCAP_TSO4 &&
3083 	    !(reqcap & IFCAP_TXCSUM)) {
3084 		if_printf(ifp, "enable txcsum first.\n");
3085 		return (EAGAIN);
3086 	}
3087 	if (mask & IFCAP_TSO6 && reqcap & IFCAP_TSO6 &&
3088 	    !(reqcap & IFCAP_TXCSUM_IPV6)) {
3089 		if_printf(ifp, "enable txcsum6 first.\n");
3090 		return (EAGAIN);
3091 	}
3092 
3093 	sc->vxl_reqcap = reqcap;
3094 	return (0);
3095 }
3096 
3097 /*
3098  * A VXLAN interface inherits the capabilities of the vxlandev or the interface
3099  * hosting the vxlanlocal address.
3100  */
3101 static void
3102 vxlan_set_hwcaps(struct vxlan_softc *sc)
3103 {
3104 	struct epoch_tracker et;
3105 	struct ifnet *p;
3106 	struct ifaddr *ifa;
3107 	u_long hwa;
3108 	int cap, ena;
3109 	bool rel;
3110 	struct ifnet *ifp = sc->vxl_ifp;
3111 
3112 	/* reset caps */
3113 	ifp->if_capabilities &= VXLAN_BASIC_IFCAPS;
3114 	ifp->if_capenable &= VXLAN_BASIC_IFCAPS;
3115 	ifp->if_hwassist = 0;
3116 
3117 	NET_EPOCH_ENTER(et);
3118 	CURVNET_SET(ifp->if_vnet);
3119 
3120 	rel = false;
3121 	p = NULL;
3122 	if (sc->vxl_mc_ifname[0] != '\0') {
3123 		rel = true;
3124 		p = ifunit_ref(sc->vxl_mc_ifname);
3125 	} else if (vxlan_sockaddr_in_any(&sc->vxl_src_addr) == 0) {
3126 		if (sc->vxl_src_addr.sa.sa_family == AF_INET) {
3127 			struct sockaddr_in in4 = sc->vxl_src_addr.in4;
3128 
3129 			in4.sin_port = 0;
3130 			ifa = ifa_ifwithaddr((struct sockaddr *)&in4);
3131 			if (ifa != NULL)
3132 				p = ifa->ifa_ifp;
3133 		} else if (sc->vxl_src_addr.sa.sa_family == AF_INET6) {
3134 			struct sockaddr_in6 in6 = sc->vxl_src_addr.in6;
3135 
3136 			in6.sin6_port = 0;
3137 			ifa = ifa_ifwithaddr((struct sockaddr *)&in6);
3138 			if (ifa != NULL)
3139 				p = ifa->ifa_ifp;
3140 		}
3141 	}
3142 	if (p == NULL)
3143 		goto done;
3144 
3145 	cap = ena = hwa = 0;
3146 
3147 	/* checksum offload */
3148 	if (p->if_capabilities & IFCAP_VXLAN_HWCSUM)
3149 		cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3150 	if (p->if_capenable & IFCAP_VXLAN_HWCSUM) {
3151 		ena |= sc->vxl_reqcap & p->if_capenable &
3152 		    (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6);
3153 		if (ena & IFCAP_TXCSUM) {
3154 			if (p->if_hwassist & CSUM_INNER_IP)
3155 				hwa |= CSUM_IP;
3156 			if (p->if_hwassist & CSUM_INNER_IP_UDP)
3157 				hwa |= CSUM_IP_UDP;
3158 			if (p->if_hwassist & CSUM_INNER_IP_TCP)
3159 				hwa |= CSUM_IP_TCP;
3160 		}
3161 		if (ena & IFCAP_TXCSUM_IPV6) {
3162 			if (p->if_hwassist & CSUM_INNER_IP6_UDP)
3163 				hwa |= CSUM_IP6_UDP;
3164 			if (p->if_hwassist & CSUM_INNER_IP6_TCP)
3165 				hwa |= CSUM_IP6_TCP;
3166 		}
3167 	}
3168 
3169 	/* hardware TSO */
3170 	if (p->if_capabilities & IFCAP_VXLAN_HWTSO) {
3171 		cap |= p->if_capabilities & IFCAP_TSO;
3172 		if (p->if_hw_tsomax > IP_MAXPACKET - ifp->if_hdrlen)
3173 			ifp->if_hw_tsomax = IP_MAXPACKET - ifp->if_hdrlen;
3174 		else
3175 			ifp->if_hw_tsomax = p->if_hw_tsomax;
3176 		/* XXX: tsomaxsegcount decrement is cxgbe specific  */
3177 		ifp->if_hw_tsomaxsegcount = p->if_hw_tsomaxsegcount - 1;
3178 		ifp->if_hw_tsomaxsegsize = p->if_hw_tsomaxsegsize;
3179 	}
3180 	if (p->if_capenable & IFCAP_VXLAN_HWTSO) {
3181 		ena |= sc->vxl_reqcap & p->if_capenable & IFCAP_TSO;
3182 		if (ena & IFCAP_TSO) {
3183 			if (p->if_hwassist & CSUM_INNER_IP_TSO)
3184 				hwa |= CSUM_IP_TSO;
3185 			if (p->if_hwassist & CSUM_INNER_IP6_TSO)
3186 				hwa |= CSUM_IP6_TSO;
3187 		}
3188 	}
3189 
3190 	ifp->if_capabilities |= cap;
3191 	ifp->if_capenable |= ena;
3192 	ifp->if_hwassist |= hwa;
3193 	if (rel)
3194 		if_rele(p);
3195 done:
3196 	CURVNET_RESTORE();
3197 	NET_EPOCH_EXIT(et);
3198 }
3199 
3200 static int
3201 vxlan_clone_create(struct if_clone *ifc, char *name, size_t len,
3202     struct ifc_data *ifd, struct ifnet **ifpp)
3203 {
3204 	struct vxlan_softc *sc;
3205 	struct ifnet *ifp;
3206 	struct ifvxlanparam vxlp;
3207 	int error;
3208 
3209 	sc = malloc(sizeof(struct vxlan_softc), M_VXLAN, M_WAITOK | M_ZERO);
3210 	sc->vxl_unit = ifd->unit;
3211 	sc->vxl_fibnum = curthread->td_proc->p_fibnum;
3212 	vxlan_set_default_config(sc);
3213 	error = vxlan_stats_alloc(sc);
3214 	if (error != 0)
3215 		goto fail;
3216 
3217 	if (ifd->params != NULL) {
3218 		error = ifc_copyin(ifd, &vxlp, sizeof(vxlp));
3219 		if (error)
3220 			goto fail;
3221 
3222 		error = vxlan_set_user_config(sc, &vxlp);
3223 		if (error)
3224 			goto fail;
3225 	}
3226 
3227 	ifp = if_alloc(IFT_ETHER);
3228 	if (ifp == NULL) {
3229 		error = ENOSPC;
3230 		goto fail;
3231 	}
3232 
3233 	sc->vxl_ifp = ifp;
3234 	rm_init(&sc->vxl_lock, "vxlanrm");
3235 	callout_init_rw(&sc->vxl_callout, &sc->vxl_lock, 0);
3236 	sc->vxl_port_hash_key = arc4random();
3237 	vxlan_ftable_init(sc);
3238 
3239 	vxlan_sysctl_setup(sc);
3240 
3241 	ifp->if_softc = sc;
3242 	if_initname(ifp, vxlan_name, ifd->unit);
3243 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3244 	ifp->if_init = vxlan_init;
3245 	ifp->if_ioctl = vxlan_ioctl;
3246 	ifp->if_transmit = vxlan_transmit;
3247 	ifp->if_qflush = vxlan_qflush;
3248 	ifp->if_capabilities = VXLAN_BASIC_IFCAPS;
3249 	ifp->if_capenable = VXLAN_BASIC_IFCAPS;
3250 	sc->vxl_reqcap = -1;
3251 	vxlan_set_hwcaps(sc);
3252 
3253 	ifmedia_init(&sc->vxl_media, 0, vxlan_media_change, vxlan_media_status);
3254 	ifmedia_add(&sc->vxl_media, IFM_ETHER | IFM_AUTO, 0, NULL);
3255 	ifmedia_set(&sc->vxl_media, IFM_ETHER | IFM_AUTO);
3256 
3257 	ether_gen_addr(ifp, &sc->vxl_hwaddr);
3258 	ether_ifattach(ifp, sc->vxl_hwaddr.octet);
3259 
3260 	ifp->if_baudrate = 0;
3261 
3262 	VXLAN_WLOCK(sc);
3263 	vxlan_setup_interface_hdrlen(sc);
3264 	VXLAN_WUNLOCK(sc);
3265 	*ifpp = ifp;
3266 
3267 	return (0);
3268 
3269 fail:
3270 	free(sc, M_VXLAN);
3271 	return (error);
3272 }
3273 
3274 static int
3275 vxlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
3276 {
3277 	struct vxlan_softc *sc;
3278 
3279 	sc = ifp->if_softc;
3280 
3281 	vxlan_teardown(sc);
3282 
3283 	vxlan_ftable_flush(sc, 1);
3284 
3285 	ether_ifdetach(ifp);
3286 	if_free(ifp);
3287 	ifmedia_removeall(&sc->vxl_media);
3288 
3289 	vxlan_ftable_fini(sc);
3290 
3291 	vxlan_sysctl_destroy(sc);
3292 	rm_destroy(&sc->vxl_lock);
3293 	vxlan_stats_free(sc);
3294 	free(sc, M_VXLAN);
3295 
3296 	return (0);
3297 }
3298 
3299 /* BMV: Taken from if_bridge. */
3300 static uint32_t
3301 vxlan_mac_hash(struct vxlan_softc *sc, const uint8_t *addr)
3302 {
3303 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->vxl_ftable_hash_key;
3304 
3305 	b += addr[5] << 8;
3306 	b += addr[4];
3307 	a += addr[3] << 24;
3308 	a += addr[2] << 16;
3309 	a += addr[1] << 8;
3310 	a += addr[0];
3311 
3312 /*
3313  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3314  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3315  */
3316 #define	mix(a, b, c)							\
3317 do {									\
3318 	a -= b; a -= c; a ^= (c >> 13);					\
3319 	b -= c; b -= a; b ^= (a << 8);					\
3320 	c -= a; c -= b; c ^= (b >> 13);					\
3321 	a -= b; a -= c; a ^= (c >> 12);					\
3322 	b -= c; b -= a; b ^= (a << 16);					\
3323 	c -= a; c -= b; c ^= (b >> 5);					\
3324 	a -= b; a -= c; a ^= (c >> 3);					\
3325 	b -= c; b -= a; b ^= (a << 10);					\
3326 	c -= a; c -= b; c ^= (b >> 15);					\
3327 } while (0)
3328 
3329 	mix(a, b, c);
3330 
3331 #undef mix
3332 
3333 	return (c);
3334 }
3335 
3336 static int
3337 vxlan_media_change(struct ifnet *ifp)
3338 {
3339 
3340 	/* Ignore. */
3341 	return (0);
3342 }
3343 
3344 static void
3345 vxlan_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3346 {
3347 
3348 	ifmr->ifm_status = IFM_ACTIVE | IFM_AVALID;
3349 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
3350 }
3351 
3352 static int
3353 vxlan_sockaddr_cmp(const union vxlan_sockaddr *vxladdr,
3354     const struct sockaddr *sa)
3355 {
3356 
3357 	return (bcmp(&vxladdr->sa, sa, vxladdr->sa.sa_len));
3358 }
3359 
3360 static void
3361 vxlan_sockaddr_copy(union vxlan_sockaddr *vxladdr,
3362     const struct sockaddr *sa)
3363 {
3364 
3365 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3366 	bzero(vxladdr, sizeof(*vxladdr));
3367 
3368 	if (sa->sa_family == AF_INET) {
3369 		vxladdr->in4 = *satoconstsin(sa);
3370 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3371 	} else if (sa->sa_family == AF_INET6) {
3372 		vxladdr->in6 = *satoconstsin6(sa);
3373 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3374 	}
3375 }
3376 
3377 static int
3378 vxlan_sockaddr_in_equal(const union vxlan_sockaddr *vxladdr,
3379     const struct sockaddr *sa)
3380 {
3381 	int equal;
3382 
3383 	if (sa->sa_family == AF_INET) {
3384 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3385 		equal = in4->s_addr == vxladdr->in4.sin_addr.s_addr;
3386 	} else if (sa->sa_family == AF_INET6) {
3387 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3388 		equal = IN6_ARE_ADDR_EQUAL(in6, &vxladdr->in6.sin6_addr);
3389 	} else
3390 		equal = 0;
3391 
3392 	return (equal);
3393 }
3394 
3395 static void
3396 vxlan_sockaddr_in_copy(union vxlan_sockaddr *vxladdr,
3397     const struct sockaddr *sa)
3398 {
3399 
3400 	MPASS(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
3401 
3402 	if (sa->sa_family == AF_INET) {
3403 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3404 		vxladdr->in4.sin_family = AF_INET;
3405 		vxladdr->in4.sin_len = sizeof(struct sockaddr_in);
3406 		vxladdr->in4.sin_addr = *in4;
3407 	} else if (sa->sa_family == AF_INET6) {
3408 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3409 		vxladdr->in6.sin6_family = AF_INET6;
3410 		vxladdr->in6.sin6_len = sizeof(struct sockaddr_in6);
3411 		vxladdr->in6.sin6_addr = *in6;
3412 	}
3413 }
3414 
3415 static int
3416 vxlan_sockaddr_supported(const union vxlan_sockaddr *vxladdr, int unspec)
3417 {
3418 	const struct sockaddr *sa;
3419 	int supported;
3420 
3421 	sa = &vxladdr->sa;
3422 	supported = 0;
3423 
3424 	if (sa->sa_family == AF_UNSPEC && unspec != 0) {
3425 		supported = 1;
3426 	} else if (sa->sa_family == AF_INET) {
3427 #ifdef INET
3428 		supported = 1;
3429 #endif
3430 	} else if (sa->sa_family == AF_INET6) {
3431 #ifdef INET6
3432 		supported = 1;
3433 #endif
3434 	}
3435 
3436 	return (supported);
3437 }
3438 
3439 static int
3440 vxlan_sockaddr_in_any(const union vxlan_sockaddr *vxladdr)
3441 {
3442 	const struct sockaddr *sa;
3443 	int any;
3444 
3445 	sa = &vxladdr->sa;
3446 
3447 	if (sa->sa_family == AF_INET) {
3448 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3449 		any = in4->s_addr == INADDR_ANY;
3450 	} else if (sa->sa_family == AF_INET6) {
3451 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3452 		any = IN6_IS_ADDR_UNSPECIFIED(in6);
3453 	} else
3454 		any = -1;
3455 
3456 	return (any);
3457 }
3458 
3459 static int
3460 vxlan_sockaddr_in_multicast(const union vxlan_sockaddr *vxladdr)
3461 {
3462 	const struct sockaddr *sa;
3463 	int mc;
3464 
3465 	sa = &vxladdr->sa;
3466 
3467 	if (sa->sa_family == AF_INET) {
3468 		const struct in_addr *in4 = &satoconstsin(sa)->sin_addr;
3469 		mc = IN_MULTICAST(ntohl(in4->s_addr));
3470 	} else if (sa->sa_family == AF_INET6) {
3471 		const struct in6_addr *in6 = &satoconstsin6(sa)->sin6_addr;
3472 		mc = IN6_IS_ADDR_MULTICAST(in6);
3473 	} else
3474 		mc = -1;
3475 
3476 	return (mc);
3477 }
3478 
3479 static int
3480 vxlan_sockaddr_in6_embedscope(union vxlan_sockaddr *vxladdr)
3481 {
3482 	int error;
3483 
3484 	MPASS(VXLAN_SOCKADDR_IS_IPV6(vxladdr));
3485 #ifdef INET6
3486 	error = sa6_embedscope(&vxladdr->in6, V_ip6_use_defzone);
3487 #else
3488 	error = EAFNOSUPPORT;
3489 #endif
3490 
3491 	return (error);
3492 }
3493 
3494 static int
3495 vxlan_can_change_config(struct vxlan_softc *sc)
3496 {
3497 	struct ifnet *ifp;
3498 
3499 	ifp = sc->vxl_ifp;
3500 	VXLAN_LOCK_ASSERT(sc);
3501 
3502 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3503 		return (0);
3504 	if (sc->vxl_flags & (VXLAN_FLAG_INIT | VXLAN_FLAG_TEARDOWN))
3505 		return (0);
3506 
3507 	return (1);
3508 }
3509 
3510 static int
3511 vxlan_check_vni(uint32_t vni)
3512 {
3513 
3514 	return (vni >= VXLAN_VNI_MAX);
3515 }
3516 
3517 static int
3518 vxlan_check_ttl(int ttl)
3519 {
3520 
3521 	return (ttl > MAXTTL);
3522 }
3523 
3524 static int
3525 vxlan_check_ftable_timeout(uint32_t timeout)
3526 {
3527 
3528 	return (timeout > VXLAN_FTABLE_MAX_TIMEOUT);
3529 }
3530 
3531 static int
3532 vxlan_check_ftable_max(uint32_t max)
3533 {
3534 
3535 	return (max > VXLAN_FTABLE_MAX);
3536 }
3537 
3538 static void
3539 vxlan_sysctl_setup(struct vxlan_softc *sc)
3540 {
3541 	struct sysctl_ctx_list *ctx;
3542 	struct sysctl_oid *node;
3543 	struct vxlan_statistics *stats;
3544 	char namebuf[8];
3545 
3546 	ctx = &sc->vxl_sysctl_ctx;
3547 	stats = &sc->vxl_stats;
3548 	snprintf(namebuf, sizeof(namebuf), "%d", sc->vxl_unit);
3549 
3550 	sysctl_ctx_init(ctx);
3551 	sc->vxl_sysctl_node = SYSCTL_ADD_NODE(ctx,
3552 	    SYSCTL_STATIC_CHILDREN(_net_link_vxlan), OID_AUTO, namebuf,
3553 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3554 
3555 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3556 	    OID_AUTO, "ftable", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3557 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "count",
3558 	    CTLFLAG_RD, &sc->vxl_ftable_cnt, 0,
3559 	    "Number of entries in forwarding table");
3560 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "max",
3561 	     CTLFLAG_RD, &sc->vxl_ftable_max, 0,
3562 	    "Maximum number of entries allowed in forwarding table");
3563 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "timeout",
3564 	    CTLFLAG_RD, &sc->vxl_ftable_timeout, 0,
3565 	    "Number of seconds between prunes of the forwarding table");
3566 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "dump",
3567 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
3568 	    sc, 0, vxlan_ftable_sysctl_dump, "A",
3569 	    "Dump the forwarding table entries");
3570 
3571 	node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(sc->vxl_sysctl_node),
3572 	    OID_AUTO, "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
3573 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3574 	    "ftable_nospace", CTLFLAG_RD, &stats->ftable_nospace, 0,
3575 	    "Fowarding table reached maximum entries");
3576 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
3577 	    "ftable_lock_upgrade_failed", CTLFLAG_RD,
3578 	    &stats->ftable_lock_upgrade_failed, 0,
3579 	    "Forwarding table update required lock upgrade");
3580 
3581 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "txcsum",
3582 	    CTLFLAG_RD, &stats->txcsum,
3583 	    "# of times hardware assisted with tx checksum");
3584 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tso",
3585 	    CTLFLAG_RD, &stats->tso, "# of times hardware assisted with TSO");
3586 	SYSCTL_ADD_COUNTER_U64(ctx, SYSCTL_CHILDREN(node), OID_AUTO, "rxcsum",
3587 	    CTLFLAG_RD, &stats->rxcsum,
3588 	    "# of times hardware assisted with rx checksum");
3589 }
3590 
3591 static void
3592 vxlan_sysctl_destroy(struct vxlan_softc *sc)
3593 {
3594 
3595 	sysctl_ctx_free(&sc->vxl_sysctl_ctx);
3596 	sc->vxl_sysctl_node = NULL;
3597 }
3598 
3599 static int
3600 vxlan_tunable_int(struct vxlan_softc *sc, const char *knob, int def)
3601 {
3602 	char path[64];
3603 
3604 	snprintf(path, sizeof(path), "net.link.vxlan.%d.%s",
3605 	    sc->vxl_unit, knob);
3606 	TUNABLE_INT_FETCH(path, &def);
3607 
3608 	return (def);
3609 }
3610 
3611 static void
3612 vxlan_ifdetach_event(void *arg __unused, struct ifnet *ifp)
3613 {
3614 	struct vxlan_softc_head list;
3615 	struct vxlan_socket *vso;
3616 	struct vxlan_softc *sc, *tsc;
3617 
3618 	LIST_INIT(&list);
3619 
3620 	if (ifp->if_flags & IFF_RENAMING)
3621 		return;
3622 	if ((ifp->if_flags & IFF_MULTICAST) == 0)
3623 		return;
3624 
3625 	VXLAN_LIST_LOCK();
3626 	LIST_FOREACH(vso, &vxlan_socket_list, vxlso_entry)
3627 		vxlan_socket_ifdetach(vso, ifp, &list);
3628 	VXLAN_LIST_UNLOCK();
3629 
3630 	LIST_FOREACH_SAFE(sc, &list, vxl_ifdetach_list, tsc) {
3631 		LIST_REMOVE(sc, vxl_ifdetach_list);
3632 
3633 		sx_xlock(&vxlan_sx);
3634 		VXLAN_WLOCK(sc);
3635 		if (sc->vxl_flags & VXLAN_FLAG_INIT)
3636 			vxlan_init_wait(sc);
3637 		vxlan_teardown_locked(sc);
3638 		sx_xunlock(&vxlan_sx);
3639 	}
3640 }
3641 
3642 static void
3643 vxlan_load(void)
3644 {
3645 
3646 	mtx_init(&vxlan_list_mtx, "vxlan list", NULL, MTX_DEF);
3647 	LIST_INIT(&vxlan_socket_list);
3648 	vxlan_ifdetach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
3649 	    vxlan_ifdetach_event, NULL, EVENTHANDLER_PRI_ANY);
3650 
3651 	struct if_clone_addreq req = {
3652 		.create_f = vxlan_clone_create,
3653 		.destroy_f = vxlan_clone_destroy,
3654 		.flags = IFC_F_AUTOUNIT,
3655 	};
3656 	vxlan_cloner = ifc_attach_cloner(vxlan_name, &req);
3657 }
3658 
3659 static void
3660 vxlan_unload(void)
3661 {
3662 
3663 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
3664 	    vxlan_ifdetach_event_tag);
3665 	ifc_detach_cloner(vxlan_cloner);
3666 	mtx_destroy(&vxlan_list_mtx);
3667 	MPASS(LIST_EMPTY(&vxlan_socket_list));
3668 }
3669 
3670 static int
3671 vxlan_modevent(module_t mod, int type, void *unused)
3672 {
3673 	int error;
3674 
3675 	error = 0;
3676 
3677 	switch (type) {
3678 	case MOD_LOAD:
3679 		vxlan_load();
3680 		break;
3681 	case MOD_UNLOAD:
3682 		vxlan_unload();
3683 		break;
3684 	default:
3685 		error = ENOTSUP;
3686 		break;
3687 	}
3688 
3689 	return (error);
3690 }
3691 
3692 static moduledata_t vxlan_mod = {
3693 	"if_vxlan",
3694 	vxlan_modevent,
3695 	0
3696 };
3697 
3698 DECLARE_MODULE(if_vxlan, vxlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3699 MODULE_VERSION(if_vxlan, 1);
3700