xref: /freebsd/sys/net/if_lagg.c (revision 64c2a712d661db9be31f02fe97c3b59710290ae3)
1 /*	$OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $	*/
2 
3 /*
4  * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5  * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6  * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 __FBSDID("$FreeBSD$");
23 
24 #include "opt_inet.h"
25 #include "opt_inet6.h"
26 #include "opt_kern_tls.h"
27 #include "opt_ratelimit.h"
28 
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/queue.h>
34 #include <sys/socket.h>
35 #include <sys/sockio.h>
36 #include <sys/sysctl.h>
37 #include <sys/module.h>
38 #include <sys/priv.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/lock.h>
42 #include <sys/rmlock.h>
43 #include <sys/sx.h>
44 #include <sys/taskqueue.h>
45 #include <sys/eventhandler.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/if_clone.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_var.h>
55 #include <net/if_private.h>
56 #include <net/bpf.h>
57 #include <net/route.h>
58 #include <net/vnet.h>
59 #include <net/infiniband.h>
60 
61 #if defined(INET) || defined(INET6)
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #endif
65 #ifdef INET
66 #include <netinet/in_systm.h>
67 #include <netinet/if_ether.h>
68 #endif
69 
70 #ifdef INET6
71 #include <netinet/ip6.h>
72 #include <netinet6/in6_var.h>
73 #include <netinet6/in6_ifattach.h>
74 #endif
75 
76 #include <net/if_vlan_var.h>
77 #include <net/if_lagg.h>
78 #include <net/ieee8023ad_lacp.h>
79 
80 #ifdef DEV_NETMAP
81 MODULE_DEPEND(if_lagg, netmap, 1, 1, 1);
82 #endif
83 
84 #define	LAGG_SX_INIT(_sc)	sx_init(&(_sc)->sc_sx, "if_lagg sx")
85 #define	LAGG_SX_DESTROY(_sc)	sx_destroy(&(_sc)->sc_sx)
86 #define	LAGG_XLOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
87 #define	LAGG_XUNLOCK(_sc)	sx_xunlock(&(_sc)->sc_sx)
88 #define	LAGG_SXLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SA_LOCKED)
89 #define	LAGG_XLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
90 
91 /* Special flags we should propagate to the lagg ports. */
92 static struct {
93 	int flag;
94 	int (*func)(struct ifnet *, int);
95 } lagg_pflags[] = {
96 	{IFF_PROMISC, ifpromisc},
97 	{IFF_ALLMULTI, if_allmulti},
98 	{0, NULL}
99 };
100 
101 struct lagg_snd_tag {
102 	struct m_snd_tag com;
103 	struct m_snd_tag *tag;
104 };
105 
106 VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
107 #define	V_lagg_list	VNET(lagg_list)
108 VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
109 #define	V_lagg_list_mtx	VNET(lagg_list_mtx)
110 #define	LAGG_LIST_LOCK_INIT(x)		mtx_init(&V_lagg_list_mtx, \
111 					"if_lagg list", NULL, MTX_DEF)
112 #define	LAGG_LIST_LOCK_DESTROY(x)	mtx_destroy(&V_lagg_list_mtx)
113 #define	LAGG_LIST_LOCK(x)		mtx_lock(&V_lagg_list_mtx)
114 #define	LAGG_LIST_UNLOCK(x)		mtx_unlock(&V_lagg_list_mtx)
115 eventhandler_tag	lagg_detach_cookie = NULL;
116 
117 static int	lagg_clone_create(struct if_clone *, char *, size_t,
118 		    struct ifc_data *, struct ifnet **);
119 static int	lagg_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
120 VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
121 #define	V_lagg_cloner	VNET(lagg_cloner)
122 static const char laggname[] = "lagg";
123 static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
124 
125 static void	lagg_capabilities(struct lagg_softc *);
126 static int	lagg_port_create(struct lagg_softc *, struct ifnet *);
127 static int	lagg_port_destroy(struct lagg_port *, int);
128 static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *);
129 static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *);
130 static void	lagg_linkstate(struct lagg_softc *);
131 static void	lagg_port_state(struct ifnet *, int);
132 static int	lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
133 static int	lagg_port_output(struct ifnet *, struct mbuf *,
134 		    const struct sockaddr *, struct route *);
135 static void	lagg_port_ifdetach(void *arg __unused, struct ifnet *);
136 #ifdef LAGG_PORT_STACKING
137 static int	lagg_port_checkstacking(struct lagg_softc *);
138 #endif
139 static void	lagg_port2req(struct lagg_port *, struct lagg_reqport *);
140 static void	lagg_init(void *);
141 static void	lagg_stop(struct lagg_softc *);
142 static int	lagg_ioctl(struct ifnet *, u_long, caddr_t);
143 #if defined(KERN_TLS) || defined(RATELIMIT)
144 static int	lagg_snd_tag_alloc(struct ifnet *,
145 		    union if_snd_tag_alloc_params *,
146 		    struct m_snd_tag **);
147 static int	lagg_snd_tag_modify(struct m_snd_tag *,
148 		    union if_snd_tag_modify_params *);
149 static int	lagg_snd_tag_query(struct m_snd_tag *,
150 		    union if_snd_tag_query_params *);
151 static void	lagg_snd_tag_free(struct m_snd_tag *);
152 static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *);
153 static void     lagg_ratelimit_query(struct ifnet *,
154 		    struct if_ratelimit_query_results *);
155 #endif
156 static int	lagg_setmulti(struct lagg_port *);
157 static int	lagg_clrmulti(struct lagg_port *);
158 static	void	lagg_setcaps(struct lagg_port *, int cap, int cap2);
159 static	int	lagg_setflag(struct lagg_port *, int, int,
160 		    int (*func)(struct ifnet *, int));
161 static	int	lagg_setflags(struct lagg_port *, int status);
162 static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
163 static int	lagg_transmit_ethernet(struct ifnet *, struct mbuf *);
164 static int	lagg_transmit_infiniband(struct ifnet *, struct mbuf *);
165 static void	lagg_qflush(struct ifnet *);
166 static int	lagg_media_change(struct ifnet *);
167 static void	lagg_media_status(struct ifnet *, struct ifmediareq *);
168 static struct lagg_port *lagg_link_active(struct lagg_softc *,
169 	    struct lagg_port *);
170 
171 /* Simple round robin */
172 static void	lagg_rr_attach(struct lagg_softc *);
173 static int	lagg_rr_start(struct lagg_softc *, struct mbuf *);
174 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
175 		    struct mbuf *);
176 
177 /* Active failover */
178 static int	lagg_fail_start(struct lagg_softc *, struct mbuf *);
179 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
180 		    struct mbuf *);
181 
182 /* Loadbalancing */
183 static void	lagg_lb_attach(struct lagg_softc *);
184 static void	lagg_lb_detach(struct lagg_softc *);
185 static int	lagg_lb_port_create(struct lagg_port *);
186 static void	lagg_lb_port_destroy(struct lagg_port *);
187 static int	lagg_lb_start(struct lagg_softc *, struct mbuf *);
188 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
189 		    struct mbuf *);
190 static int	lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
191 
192 /* Broadcast */
193 static int    lagg_bcast_start(struct lagg_softc *, struct mbuf *);
194 static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
195 		    struct mbuf *);
196 
197 /* 802.3ad LACP */
198 static void	lagg_lacp_attach(struct lagg_softc *);
199 static void	lagg_lacp_detach(struct lagg_softc *);
200 static int	lagg_lacp_start(struct lagg_softc *, struct mbuf *);
201 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
202 		    struct mbuf *);
203 static void	lagg_lacp_lladdr(struct lagg_softc *);
204 
205 /* lagg protocol table */
206 static const struct lagg_proto {
207 	lagg_proto	pr_num;
208 	void		(*pr_attach)(struct lagg_softc *);
209 	void		(*pr_detach)(struct lagg_softc *);
210 	int		(*pr_start)(struct lagg_softc *, struct mbuf *);
211 	struct mbuf *	(*pr_input)(struct lagg_softc *, struct lagg_port *,
212 			    struct mbuf *);
213 	int		(*pr_addport)(struct lagg_port *);
214 	void		(*pr_delport)(struct lagg_port *);
215 	void		(*pr_linkstate)(struct lagg_port *);
216 	void 		(*pr_init)(struct lagg_softc *);
217 	void 		(*pr_stop)(struct lagg_softc *);
218 	void 		(*pr_lladdr)(struct lagg_softc *);
219 	void		(*pr_request)(struct lagg_softc *, void *);
220 	void		(*pr_portreq)(struct lagg_port *, void *);
221 } lagg_protos[] = {
222     {
223 	.pr_num = LAGG_PROTO_NONE
224     },
225     {
226 	.pr_num = LAGG_PROTO_ROUNDROBIN,
227 	.pr_attach = lagg_rr_attach,
228 	.pr_start = lagg_rr_start,
229 	.pr_input = lagg_rr_input,
230     },
231     {
232 	.pr_num = LAGG_PROTO_FAILOVER,
233 	.pr_start = lagg_fail_start,
234 	.pr_input = lagg_fail_input,
235     },
236     {
237 	.pr_num = LAGG_PROTO_LOADBALANCE,
238 	.pr_attach = lagg_lb_attach,
239 	.pr_detach = lagg_lb_detach,
240 	.pr_start = lagg_lb_start,
241 	.pr_input = lagg_lb_input,
242 	.pr_addport = lagg_lb_port_create,
243 	.pr_delport = lagg_lb_port_destroy,
244     },
245     {
246 	.pr_num = LAGG_PROTO_LACP,
247 	.pr_attach = lagg_lacp_attach,
248 	.pr_detach = lagg_lacp_detach,
249 	.pr_start = lagg_lacp_start,
250 	.pr_input = lagg_lacp_input,
251 	.pr_addport = lacp_port_create,
252 	.pr_delport = lacp_port_destroy,
253 	.pr_linkstate = lacp_linkstate,
254 	.pr_init = lacp_init,
255 	.pr_stop = lacp_stop,
256 	.pr_lladdr = lagg_lacp_lladdr,
257 	.pr_request = lacp_req,
258 	.pr_portreq = lacp_portreq,
259     },
260     {
261 	.pr_num = LAGG_PROTO_BROADCAST,
262 	.pr_start = lagg_bcast_start,
263 	.pr_input = lagg_bcast_input,
264     },
265 };
266 
267 SYSCTL_DECL(_net_link);
268 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
269     "Link Aggregation");
270 
271 /* Allow input on any failover links */
272 VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
273 #define	V_lagg_failover_rx_all	VNET(lagg_failover_rx_all)
274 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
275     &VNET_NAME(lagg_failover_rx_all), 0,
276     "Accept input from any interface in a failover lagg");
277 
278 /* Default value for using flowid */
279 VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
280 #define	V_def_use_flowid	VNET(def_use_flowid)
281 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
282     &VNET_NAME(def_use_flowid), 0,
283     "Default setting for using flow id for load sharing");
284 
285 /* Default value for using numa */
286 VNET_DEFINE_STATIC(int, def_use_numa) = 1;
287 #define	V_def_use_numa	VNET(def_use_numa)
288 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa, CTLFLAG_RWTUN,
289     &VNET_NAME(def_use_numa), 0,
290     "Use numa to steer flows");
291 
292 /* Default value for flowid shift */
293 VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
294 #define	V_def_flowid_shift	VNET(def_flowid_shift)
295 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
296     &VNET_NAME(def_flowid_shift), 0,
297     "Default setting for flowid shift for load sharing");
298 
299 static void
300 vnet_lagg_init(const void *unused __unused)
301 {
302 
303 	LAGG_LIST_LOCK_INIT();
304 	SLIST_INIT(&V_lagg_list);
305 	struct if_clone_addreq req = {
306 		.create_f = lagg_clone_create,
307 		.destroy_f = lagg_clone_destroy,
308 		.flags = IFC_F_AUTOUNIT,
309 	};
310 	V_lagg_cloner = ifc_attach_cloner(laggname, &req);
311 }
312 VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
313     vnet_lagg_init, NULL);
314 
315 static void
316 vnet_lagg_uninit(const void *unused __unused)
317 {
318 
319 	ifc_detach_cloner(V_lagg_cloner);
320 	LAGG_LIST_LOCK_DESTROY();
321 }
322 VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
323     vnet_lagg_uninit, NULL);
324 
325 static int
326 lagg_modevent(module_t mod, int type, void *data)
327 {
328 
329 	switch (type) {
330 	case MOD_LOAD:
331 		lagg_input_ethernet_p = lagg_input_ethernet;
332 		lagg_input_infiniband_p = lagg_input_infiniband;
333 		lagg_linkstate_p = lagg_port_state;
334 		lagg_detach_cookie = EVENTHANDLER_REGISTER(
335 		    ifnet_departure_event, lagg_port_ifdetach, NULL,
336 		    EVENTHANDLER_PRI_ANY);
337 		break;
338 	case MOD_UNLOAD:
339 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
340 		    lagg_detach_cookie);
341 		lagg_input_ethernet_p = NULL;
342 		lagg_input_infiniband_p = NULL;
343 		lagg_linkstate_p = NULL;
344 		break;
345 	default:
346 		return (EOPNOTSUPP);
347 	}
348 	return (0);
349 }
350 
351 static moduledata_t lagg_mod = {
352 	"if_lagg",
353 	lagg_modevent,
354 	0
355 };
356 
357 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
358 MODULE_VERSION(if_lagg, 1);
359 MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1);
360 
361 static void
362 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
363 {
364 
365 	LAGG_XLOCK_ASSERT(sc);
366 	KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
367 	    __func__, sc));
368 
369 	if (sc->sc_ifflags & IFF_DEBUG)
370 		if_printf(sc->sc_ifp, "using proto %u\n", pr);
371 
372 	if (lagg_protos[pr].pr_attach != NULL)
373 		lagg_protos[pr].pr_attach(sc);
374 	sc->sc_proto = pr;
375 }
376 
377 static void
378 lagg_proto_detach(struct lagg_softc *sc)
379 {
380 	lagg_proto pr;
381 
382 	LAGG_XLOCK_ASSERT(sc);
383 	pr = sc->sc_proto;
384 	sc->sc_proto = LAGG_PROTO_NONE;
385 
386 	if (lagg_protos[pr].pr_detach != NULL)
387 		lagg_protos[pr].pr_detach(sc);
388 }
389 
390 static int
391 lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
392 {
393 
394 	return (lagg_protos[sc->sc_proto].pr_start(sc, m));
395 }
396 
397 static struct mbuf *
398 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
399 {
400 
401 	return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
402 }
403 
404 static int
405 lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
406 {
407 
408 	if (lagg_protos[sc->sc_proto].pr_addport == NULL)
409 		return (0);
410 	else
411 		return (lagg_protos[sc->sc_proto].pr_addport(lp));
412 }
413 
414 static void
415 lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
416 {
417 
418 	if (lagg_protos[sc->sc_proto].pr_delport != NULL)
419 		lagg_protos[sc->sc_proto].pr_delport(lp);
420 }
421 
422 static void
423 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
424 {
425 
426 	if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
427 		lagg_protos[sc->sc_proto].pr_linkstate(lp);
428 }
429 
430 static void
431 lagg_proto_init(struct lagg_softc *sc)
432 {
433 
434 	if (lagg_protos[sc->sc_proto].pr_init != NULL)
435 		lagg_protos[sc->sc_proto].pr_init(sc);
436 }
437 
438 static void
439 lagg_proto_stop(struct lagg_softc *sc)
440 {
441 
442 	if (lagg_protos[sc->sc_proto].pr_stop != NULL)
443 		lagg_protos[sc->sc_proto].pr_stop(sc);
444 }
445 
446 static void
447 lagg_proto_lladdr(struct lagg_softc *sc)
448 {
449 
450 	if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
451 		lagg_protos[sc->sc_proto].pr_lladdr(sc);
452 }
453 
454 static void
455 lagg_proto_request(struct lagg_softc *sc, void *v)
456 {
457 
458 	if (lagg_protos[sc->sc_proto].pr_request != NULL)
459 		lagg_protos[sc->sc_proto].pr_request(sc, v);
460 }
461 
462 static void
463 lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
464 {
465 
466 	if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
467 		lagg_protos[sc->sc_proto].pr_portreq(lp, v);
468 }
469 
470 /*
471  * This routine is run via an vlan
472  * config EVENT
473  */
474 static void
475 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
476 {
477 	struct lagg_softc *sc = ifp->if_softc;
478 	struct lagg_port *lp;
479 
480 	if (ifp->if_softc !=  arg)   /* Not our event */
481 		return;
482 
483 	LAGG_XLOCK(sc);
484 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
485 		EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
486 	LAGG_XUNLOCK(sc);
487 }
488 
489 /*
490  * This routine is run via an vlan
491  * unconfig EVENT
492  */
493 static void
494 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
495 {
496 	struct lagg_softc *sc = ifp->if_softc;
497 	struct lagg_port *lp;
498 
499 	if (ifp->if_softc !=  arg)   /* Not our event */
500 		return;
501 
502 	LAGG_XLOCK(sc);
503 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
504 		EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
505 	LAGG_XUNLOCK(sc);
506 }
507 
508 static int
509 lagg_clone_create(struct if_clone *ifc, char *name, size_t len,
510     struct ifc_data *ifd, struct ifnet **ifpp)
511 {
512 	struct iflaggparam iflp;
513 	struct lagg_softc *sc;
514 	struct ifnet *ifp;
515 	int if_type;
516 	int error;
517 	static const uint8_t eaddr[LAGG_ADDR_LEN];
518 
519 	if (ifd->params != NULL) {
520 		error = ifc_copyin(ifd, &iflp, sizeof(iflp));
521 		if (error)
522 			return (error);
523 
524 		switch (iflp.lagg_type) {
525 		case LAGG_TYPE_ETHERNET:
526 			if_type = IFT_ETHER;
527 			break;
528 		case LAGG_TYPE_INFINIBAND:
529 			if_type = IFT_INFINIBAND;
530 			break;
531 		default:
532 			return (EINVAL);
533 		}
534 	} else {
535 		if_type = IFT_ETHER;
536 	}
537 
538 	sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO);
539 	ifp = sc->sc_ifp = if_alloc(if_type);
540 	if (ifp == NULL) {
541 		free(sc, M_LAGG);
542 		return (ENOSPC);
543 	}
544 	LAGG_SX_INIT(sc);
545 
546 	mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF);
547 	callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
548 
549 	LAGG_XLOCK(sc);
550 	if (V_def_use_flowid)
551 		sc->sc_opts |= LAGG_OPT_USE_FLOWID;
552 	if (V_def_use_numa)
553 		sc->sc_opts |= LAGG_OPT_USE_NUMA;
554 	sc->flowid_shift = V_def_flowid_shift;
555 
556 	/* Hash all layers by default */
557 	sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
558 
559 	lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
560 
561 	CK_SLIST_INIT(&sc->sc_ports);
562 
563 	switch (if_type) {
564 	case IFT_ETHER:
565 		/* Initialise pseudo media types */
566 		ifmedia_init(&sc->sc_media, 0, lagg_media_change,
567 		    lagg_media_status);
568 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
569 		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
570 
571 		if_initname(ifp, laggname, ifd->unit);
572 		ifp->if_transmit = lagg_transmit_ethernet;
573 		break;
574 	case IFT_INFINIBAND:
575 		if_initname(ifp, laggname, ifd->unit);
576 		ifp->if_transmit = lagg_transmit_infiniband;
577 		break;
578 	default:
579 		break;
580 	}
581 	ifp->if_softc = sc;
582 	ifp->if_qflush = lagg_qflush;
583 	ifp->if_init = lagg_init;
584 	ifp->if_ioctl = lagg_ioctl;
585 	ifp->if_get_counter = lagg_get_counter;
586 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
587 #if defined(KERN_TLS) || defined(RATELIMIT)
588 	ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
589 	ifp->if_ratelimit_query = lagg_ratelimit_query;
590 #endif
591 	ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
592 
593 	/*
594 	 * Attach as an ordinary ethernet device, children will be attached
595 	 * as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG.
596 	 */
597 	switch (if_type) {
598 	case IFT_ETHER:
599 		ether_ifattach(ifp, eaddr);
600 		break;
601 	case IFT_INFINIBAND:
602 		infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr);
603 		break;
604 	default:
605 		break;
606 	}
607 
608 	sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
609 		lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
610 	sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
611 		lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
612 
613 	/* Insert into the global list of laggs */
614 	LAGG_LIST_LOCK();
615 	SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
616 	LAGG_LIST_UNLOCK();
617 	LAGG_XUNLOCK(sc);
618 	*ifpp = ifp;
619 
620 	return (0);
621 }
622 
623 static int
624 lagg_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
625 {
626 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
627 	struct lagg_port *lp;
628 
629 	LAGG_XLOCK(sc);
630 	sc->sc_destroying = 1;
631 	lagg_stop(sc);
632 	ifp->if_flags &= ~IFF_UP;
633 
634 	EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
635 	EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
636 
637 	/* Shutdown and remove lagg ports */
638 	while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
639 		lagg_port_destroy(lp, 1);
640 
641 	/* Unhook the aggregation protocol */
642 	lagg_proto_detach(sc);
643 	LAGG_XUNLOCK(sc);
644 
645 	switch (ifp->if_type) {
646 	case IFT_ETHER:
647 		ifmedia_removeall(&sc->sc_media);
648 		ether_ifdetach(ifp);
649 		break;
650 	case IFT_INFINIBAND:
651 		infiniband_ifdetach(ifp);
652 		break;
653 	default:
654 		break;
655 	}
656 	if_free(ifp);
657 
658 	LAGG_LIST_LOCK();
659 	SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
660 	LAGG_LIST_UNLOCK();
661 
662 	mtx_destroy(&sc->sc_mtx);
663 	LAGG_SX_DESTROY(sc);
664 	free(sc, M_LAGG);
665 
666 	return (0);
667 }
668 
669 static void
670 lagg_capabilities(struct lagg_softc *sc)
671 {
672 	struct lagg_port *lp;
673 	int cap, cap2, ena, ena2, pena, pena2;
674 	uint64_t hwa;
675 	struct ifnet_hw_tsomax hw_tsomax;
676 
677 	LAGG_XLOCK_ASSERT(sc);
678 
679 	/* Get common enabled capabilities for the lagg ports */
680 	ena = ena2 = ~0;
681 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
682 		ena &= lp->lp_ifp->if_capenable;
683 		ena2 &= lp->lp_ifp->if_capenable2;
684 	}
685 	if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
686 		ena = ena2 = 0;
687 
688 	/*
689 	 * Apply common enabled capabilities back to the lagg ports.
690 	 * May require several iterations if they are dependent.
691 	 */
692 	do {
693 		pena = ena;
694 		pena2 = ena2;
695 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
696 			lagg_setcaps(lp, ena, ena2);
697 			ena &= lp->lp_ifp->if_capenable;
698 			ena2 &= lp->lp_ifp->if_capenable2;
699 		}
700 	} while (pena != ena || pena2 != ena2);
701 
702 	/* Get other capabilities from the lagg ports */
703 	cap = cap2 = ~0;
704 	hwa = ~(uint64_t)0;
705 	memset(&hw_tsomax, 0, sizeof(hw_tsomax));
706 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
707 		cap &= lp->lp_ifp->if_capabilities;
708 		cap2 &= lp->lp_ifp->if_capabilities2;
709 		hwa &= lp->lp_ifp->if_hwassist;
710 		if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
711 	}
712 	if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
713 		cap = cap2 = hwa = 0;
714 
715 	if (sc->sc_ifp->if_capabilities != cap ||
716 	    sc->sc_ifp->if_capenable != ena ||
717 	    sc->sc_ifp->if_capenable2 != ena2 ||
718 	    sc->sc_ifp->if_hwassist != hwa ||
719 	    if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
720 		sc->sc_ifp->if_capabilities = cap;
721 		sc->sc_ifp->if_capabilities2 = cap2;
722 		sc->sc_ifp->if_capenable = ena;
723 		sc->sc_ifp->if_capenable2 = ena2;
724 		sc->sc_ifp->if_hwassist = hwa;
725 		getmicrotime(&sc->sc_ifp->if_lastchange);
726 
727 		if (sc->sc_ifflags & IFF_DEBUG)
728 			if_printf(sc->sc_ifp,
729 			    "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
730 	}
731 }
732 
733 static int
734 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
735 {
736 	struct lagg_softc *sc_ptr;
737 	struct lagg_port *lp, *tlp;
738 	struct ifreq ifr;
739 	int error, i, oldmtu;
740 	int if_type;
741 	uint64_t *pval;
742 
743 	LAGG_XLOCK_ASSERT(sc);
744 
745 	if (sc->sc_ifp == ifp) {
746 		if_printf(sc->sc_ifp,
747 		    "cannot add a lagg to itself as a port\n");
748 		return (EINVAL);
749 	}
750 
751 	if (sc->sc_destroying == 1)
752 		return (ENXIO);
753 
754 	/* Limit the maximal number of lagg ports */
755 	if (sc->sc_count >= LAGG_MAX_PORTS)
756 		return (ENOSPC);
757 
758 	/* Check if port has already been associated to a lagg */
759 	if (ifp->if_lagg != NULL) {
760 		/* Port is already in the current lagg? */
761 		lp = (struct lagg_port *)ifp->if_lagg;
762 		if (lp->lp_softc == sc)
763 			return (EEXIST);
764 		return (EBUSY);
765 	}
766 
767 	switch (sc->sc_ifp->if_type) {
768 	case IFT_ETHER:
769 		/* XXX Disallow non-ethernet interfaces (this should be any of 802) */
770 		if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
771 			return (EPROTONOSUPPORT);
772 		if_type = IFT_IEEE8023ADLAG;
773 		break;
774 	case IFT_INFINIBAND:
775 		/* XXX Disallow non-infiniband interfaces */
776 		if (ifp->if_type != IFT_INFINIBAND)
777 			return (EPROTONOSUPPORT);
778 		if_type = IFT_INFINIBANDLAG;
779 		break;
780 	default:
781 		break;
782 	}
783 
784 	/* Allow the first Ethernet member to define the MTU */
785 	oldmtu = -1;
786 	if (CK_SLIST_EMPTY(&sc->sc_ports)) {
787 		sc->sc_ifp->if_mtu = ifp->if_mtu;
788 	} else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
789 		if (ifp->if_ioctl == NULL) {
790 			if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
791 			    ifp->if_xname);
792 			return (EINVAL);
793 		}
794 		oldmtu = ifp->if_mtu;
795 		strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
796 		ifr.ifr_mtu = sc->sc_ifp->if_mtu;
797 		error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
798 		if (error != 0) {
799 			if_printf(sc->sc_ifp, "invalid MTU for %s\n",
800 			    ifp->if_xname);
801 			return (error);
802 		}
803 		ifr.ifr_mtu = oldmtu;
804 	}
805 
806 	lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK|M_ZERO);
807 	lp->lp_softc = sc;
808 
809 	/* Check if port is a stacked lagg */
810 	LAGG_LIST_LOCK();
811 	SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
812 		if (ifp == sc_ptr->sc_ifp) {
813 			LAGG_LIST_UNLOCK();
814 			free(lp, M_LAGG);
815 			if (oldmtu != -1)
816 				(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
817 				    (caddr_t)&ifr);
818 			return (EINVAL);
819 			/* XXX disable stacking for the moment, its untested */
820 #ifdef LAGG_PORT_STACKING
821 			lp->lp_flags |= LAGG_PORT_STACK;
822 			if (lagg_port_checkstacking(sc_ptr) >=
823 			    LAGG_MAX_STACKING) {
824 				LAGG_LIST_UNLOCK();
825 				free(lp, M_LAGG);
826 				if (oldmtu != -1)
827 					(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
828 					    (caddr_t)&ifr);
829 				return (E2BIG);
830 			}
831 #endif
832 		}
833 	}
834 	LAGG_LIST_UNLOCK();
835 
836 	if_ref(ifp);
837 	lp->lp_ifp = ifp;
838 
839 	bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen);
840 	lp->lp_ifcapenable = ifp->if_capenable;
841 	if (CK_SLIST_EMPTY(&sc->sc_ports)) {
842 		bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
843 		lagg_proto_lladdr(sc);
844 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
845 	} else {
846 		if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
847 	}
848 	lagg_setflags(lp, 1);
849 
850 	if (CK_SLIST_EMPTY(&sc->sc_ports))
851 		sc->sc_primary = lp;
852 
853 	/* Change the interface type */
854 	lp->lp_iftype = ifp->if_type;
855 	ifp->if_type = if_type;
856 	ifp->if_lagg = lp;
857 	lp->lp_ioctl = ifp->if_ioctl;
858 	ifp->if_ioctl = lagg_port_ioctl;
859 	lp->lp_output = ifp->if_output;
860 	ifp->if_output = lagg_port_output;
861 
862 	/* Read port counters */
863 	pval = lp->port_counters.val;
864 	for (i = 0; i < IFCOUNTERS; i++, pval++)
865 		*pval = ifp->if_get_counter(ifp, i);
866 
867 	/*
868 	 * Insert into the list of ports.
869 	 * Keep ports sorted by if_index. It is handy, when configuration
870 	 * is predictable and `ifconfig laggN create ...` command
871 	 * will lead to the same result each time.
872 	 */
873 	CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
874 		if (tlp->lp_ifp->if_index < ifp->if_index && (
875 		    CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
876 		    ((struct  lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
877 		    ifp->if_index))
878 			break;
879 	}
880 	if (tlp != NULL)
881 		CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
882 	else
883 		CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
884 	sc->sc_count++;
885 
886 	lagg_setmulti(lp);
887 
888 	if ((error = lagg_proto_addport(sc, lp)) != 0) {
889 		/* Remove the port, without calling pr_delport. */
890 		lagg_port_destroy(lp, 0);
891 		if (oldmtu != -1)
892 			(*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
893 		return (error);
894 	}
895 
896 	/* Update lagg capabilities */
897 	lagg_capabilities(sc);
898 	lagg_linkstate(sc);
899 
900 	return (0);
901 }
902 
903 #ifdef LAGG_PORT_STACKING
904 static int
905 lagg_port_checkstacking(struct lagg_softc *sc)
906 {
907 	struct lagg_softc *sc_ptr;
908 	struct lagg_port *lp;
909 	int m = 0;
910 
911 	LAGG_SXLOCK_ASSERT(sc);
912 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
913 		if (lp->lp_flags & LAGG_PORT_STACK) {
914 			sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
915 			m = MAX(m, lagg_port_checkstacking(sc_ptr));
916 		}
917 	}
918 
919 	return (m + 1);
920 }
921 #endif
922 
923 static void
924 lagg_port_destroy_cb(epoch_context_t ec)
925 {
926 	struct lagg_port *lp;
927 	struct ifnet *ifp;
928 
929 	lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
930 	ifp = lp->lp_ifp;
931 
932 	if_rele(ifp);
933 	free(lp, M_LAGG);
934 }
935 
936 static int
937 lagg_port_destroy(struct lagg_port *lp, int rundelport)
938 {
939 	struct lagg_softc *sc = lp->lp_softc;
940 	struct lagg_port *lp_ptr, *lp0;
941 	struct ifnet *ifp = lp->lp_ifp;
942 	uint64_t *pval, vdiff;
943 	int i;
944 
945 	LAGG_XLOCK_ASSERT(sc);
946 
947 	if (rundelport)
948 		lagg_proto_delport(sc, lp);
949 
950 	if (lp->lp_detaching == 0)
951 		lagg_clrmulti(lp);
952 
953 	/* Restore interface */
954 	ifp->if_type = lp->lp_iftype;
955 	ifp->if_ioctl = lp->lp_ioctl;
956 	ifp->if_output = lp->lp_output;
957 	ifp->if_lagg = NULL;
958 
959 	/* Update detached port counters */
960 	pval = lp->port_counters.val;
961 	for (i = 0; i < IFCOUNTERS; i++, pval++) {
962 		vdiff = ifp->if_get_counter(ifp, i) - *pval;
963 		sc->detached_counters.val[i] += vdiff;
964 	}
965 
966 	/* Finally, remove the port from the lagg */
967 	CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
968 	sc->sc_count--;
969 
970 	/* Update the primary interface */
971 	if (lp == sc->sc_primary) {
972 		uint8_t lladdr[LAGG_ADDR_LEN];
973 
974 		if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
975 			bzero(&lladdr, LAGG_ADDR_LEN);
976 		else
977 			bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN);
978 		sc->sc_primary = lp0;
979 		if (sc->sc_destroying == 0) {
980 			bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen);
981 			lagg_proto_lladdr(sc);
982 			EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
983 
984 			/*
985 			 * Update lladdr for each port (new primary needs update
986 			 * as well, to switch from old lladdr to its 'real' one).
987 			 * We can skip this if the lagg is being destroyed.
988 			 */
989 			CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
990 				if_setlladdr(lp_ptr->lp_ifp, lladdr,
991 				    lp_ptr->lp_ifp->if_addrlen);
992 		}
993 	}
994 
995 	if (lp->lp_ifflags)
996 		if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
997 
998 	if (lp->lp_detaching == 0) {
999 		lagg_setflags(lp, 0);
1000 		lagg_setcaps(lp, lp->lp_ifcapenable, lp->lp_ifcapenable2);
1001 		if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen);
1002 	}
1003 
1004 	/*
1005 	 * free port and release it's ifnet reference after a grace period has
1006 	 * elapsed.
1007 	 */
1008 	NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
1009 	/* Update lagg capabilities */
1010 	lagg_capabilities(sc);
1011 	lagg_linkstate(sc);
1012 
1013 	return (0);
1014 }
1015 
1016 static int
1017 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1018 {
1019 	struct epoch_tracker et;
1020 	struct lagg_reqport *rp = (struct lagg_reqport *)data;
1021 	struct lagg_softc *sc;
1022 	struct lagg_port *lp = NULL;
1023 	int error = 0;
1024 
1025 	/* Should be checked by the caller */
1026 	switch (ifp->if_type) {
1027 	case IFT_IEEE8023ADLAG:
1028 	case IFT_INFINIBANDLAG:
1029 		if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
1030 			goto fallback;
1031 		break;
1032 	default:
1033 		goto fallback;
1034 	}
1035 
1036 	switch (cmd) {
1037 	case SIOCGLAGGPORT:
1038 		if (rp->rp_portname[0] == '\0' ||
1039 		    ifunit(rp->rp_portname) != ifp) {
1040 			error = EINVAL;
1041 			break;
1042 		}
1043 
1044 		NET_EPOCH_ENTER(et);
1045 		if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
1046 			error = ENOENT;
1047 			NET_EPOCH_EXIT(et);
1048 			break;
1049 		}
1050 
1051 		lagg_port2req(lp, rp);
1052 		NET_EPOCH_EXIT(et);
1053 		break;
1054 
1055 	case SIOCSIFCAP:
1056 	case SIOCSIFCAPNV:
1057 		if (lp->lp_ioctl == NULL) {
1058 			error = EINVAL;
1059 			break;
1060 		}
1061 		error = (*lp->lp_ioctl)(ifp, cmd, data);
1062 		if (error)
1063 			break;
1064 
1065 		/* Update lagg interface capabilities */
1066 		LAGG_XLOCK(sc);
1067 		lagg_capabilities(sc);
1068 		LAGG_XUNLOCK(sc);
1069 		VLAN_CAPABILITIES(sc->sc_ifp);
1070 		break;
1071 
1072 	case SIOCSIFMTU:
1073 		/* Do not allow the MTU to be changed once joined */
1074 		error = EINVAL;
1075 		break;
1076 
1077 	default:
1078 		goto fallback;
1079 	}
1080 
1081 	return (error);
1082 
1083 fallback:
1084 	if (lp != NULL && lp->lp_ioctl != NULL)
1085 		return ((*lp->lp_ioctl)(ifp, cmd, data));
1086 
1087 	return (EINVAL);
1088 }
1089 
1090 /*
1091  * Requests counter @cnt data.
1092  *
1093  * Counter value is calculated the following way:
1094  * 1) for each port, sum  difference between current and "initial" measurements.
1095  * 2) add lagg logical interface counters.
1096  * 3) add data from detached_counters array.
1097  *
1098  * We also do the following things on ports attach/detach:
1099  * 1) On port attach we store all counters it has into port_counter array.
1100  * 2) On port detach we add the different between "initial" and
1101  *   current counters data to detached_counters array.
1102  */
1103 static uint64_t
1104 lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
1105 {
1106 	struct epoch_tracker et;
1107 	struct lagg_softc *sc;
1108 	struct lagg_port *lp;
1109 	struct ifnet *lpifp;
1110 	uint64_t newval, oldval, vsum;
1111 
1112 	/* Revise this when we've got non-generic counters. */
1113 	KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
1114 
1115 	sc = (struct lagg_softc *)ifp->if_softc;
1116 
1117 	vsum = 0;
1118 	NET_EPOCH_ENTER(et);
1119 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1120 		/* Saved attached value */
1121 		oldval = lp->port_counters.val[cnt];
1122 		/* current value */
1123 		lpifp = lp->lp_ifp;
1124 		newval = lpifp->if_get_counter(lpifp, cnt);
1125 		/* Calculate diff and save new */
1126 		vsum += newval - oldval;
1127 	}
1128 	NET_EPOCH_EXIT(et);
1129 
1130 	/*
1131 	 * Add counter data which might be added by upper
1132 	 * layer protocols operating on logical interface.
1133 	 */
1134 	vsum += if_get_counter_default(ifp, cnt);
1135 
1136 	/*
1137 	 * Add counter data from detached ports counters
1138 	 */
1139 	vsum += sc->detached_counters.val[cnt];
1140 
1141 	return (vsum);
1142 }
1143 
1144 /*
1145  * For direct output to child ports.
1146  */
1147 static int
1148 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
1149 	const struct sockaddr *dst, struct route *ro)
1150 {
1151 	struct lagg_port *lp = ifp->if_lagg;
1152 
1153 	switch (dst->sa_family) {
1154 		case pseudo_AF_HDRCMPLT:
1155 		case AF_UNSPEC:
1156 			if (lp != NULL)
1157 				return ((*lp->lp_output)(ifp, m, dst, ro));
1158 	}
1159 
1160 	/* drop any other frames */
1161 	m_freem(m);
1162 	return (ENETDOWN);
1163 }
1164 
1165 static void
1166 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
1167 {
1168 	struct lagg_port *lp;
1169 	struct lagg_softc *sc;
1170 
1171 	if ((lp = ifp->if_lagg) == NULL)
1172 		return;
1173 	/* If the ifnet is just being renamed, don't do anything. */
1174 	if (ifp->if_flags & IFF_RENAMING)
1175 		return;
1176 
1177 	sc = lp->lp_softc;
1178 
1179 	LAGG_XLOCK(sc);
1180 	lp->lp_detaching = 1;
1181 	lagg_port_destroy(lp, 1);
1182 	LAGG_XUNLOCK(sc);
1183 	VLAN_CAPABILITIES(sc->sc_ifp);
1184 }
1185 
1186 static void
1187 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
1188 {
1189 	struct lagg_softc *sc = lp->lp_softc;
1190 
1191 	strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
1192 	strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
1193 	rp->rp_prio = lp->lp_prio;
1194 	rp->rp_flags = lp->lp_flags;
1195 	lagg_proto_portreq(sc, lp, &rp->rp_psc);
1196 
1197 	/* Add protocol specific flags */
1198 	switch (sc->sc_proto) {
1199 		case LAGG_PROTO_FAILOVER:
1200 			if (lp == sc->sc_primary)
1201 				rp->rp_flags |= LAGG_PORT_MASTER;
1202 			if (lp == lagg_link_active(sc, sc->sc_primary))
1203 				rp->rp_flags |= LAGG_PORT_ACTIVE;
1204 			break;
1205 
1206 		case LAGG_PROTO_ROUNDROBIN:
1207 		case LAGG_PROTO_LOADBALANCE:
1208 		case LAGG_PROTO_BROADCAST:
1209 			if (LAGG_PORTACTIVE(lp))
1210 				rp->rp_flags |= LAGG_PORT_ACTIVE;
1211 			break;
1212 
1213 		case LAGG_PROTO_LACP:
1214 			/* LACP has a different definition of active */
1215 			if (lacp_isactive(lp))
1216 				rp->rp_flags |= LAGG_PORT_ACTIVE;
1217 			if (lacp_iscollecting(lp))
1218 				rp->rp_flags |= LAGG_PORT_COLLECTING;
1219 			if (lacp_isdistributing(lp))
1220 				rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
1221 			break;
1222 	}
1223 
1224 }
1225 
1226 static void
1227 lagg_watchdog_infiniband(void *arg)
1228 {
1229 	struct epoch_tracker et;
1230 	struct lagg_softc *sc;
1231 	struct lagg_port *lp;
1232 	struct ifnet *ifp;
1233 	struct ifnet *lp_ifp;
1234 
1235 	sc = arg;
1236 
1237 	/*
1238 	 * Because infiniband nodes have a fixed MAC address, which is
1239 	 * generated by the so-called GID, we need to regularly update
1240 	 * the link level address of the parent lagg<N> device when
1241 	 * the active port changes. Possibly we could piggy-back on
1242 	 * link up/down events aswell, but using a timer also provides
1243 	 * a guarantee against too frequent events. This operation
1244 	 * does not have to be atomic.
1245 	 */
1246 	NET_EPOCH_ENTER(et);
1247 	lp = lagg_link_active(sc, sc->sc_primary);
1248 	if (lp != NULL) {
1249 		ifp = sc->sc_ifp;
1250 		lp_ifp = lp->lp_ifp;
1251 
1252 		if (ifp != NULL && lp_ifp != NULL &&
1253 		    (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 ||
1254 		     memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) {
1255 			memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen);
1256 			memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen);
1257 
1258 			CURVNET_SET(ifp->if_vnet);
1259 			EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1260 			CURVNET_RESTORE();
1261 		}
1262 	}
1263 	NET_EPOCH_EXIT(et);
1264 
1265 	callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg);
1266 }
1267 
1268 static void
1269 lagg_init(void *xsc)
1270 {
1271 	struct lagg_softc *sc = (struct lagg_softc *)xsc;
1272 	struct ifnet *ifp = sc->sc_ifp;
1273 	struct lagg_port *lp;
1274 
1275 	LAGG_XLOCK(sc);
1276 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1277 		LAGG_XUNLOCK(sc);
1278 		return;
1279 	}
1280 
1281 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1282 
1283 	/*
1284 	 * Update the port lladdrs if needed.
1285 	 * This might be if_setlladdr() notification
1286 	 * that lladdr has been changed.
1287 	 */
1288 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1289 		if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
1290 		    ifp->if_addrlen) != 0)
1291 			if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen);
1292 	}
1293 
1294 	lagg_proto_init(sc);
1295 
1296 	if (ifp->if_type == IFT_INFINIBAND) {
1297 		mtx_lock(&sc->sc_mtx);
1298 		lagg_watchdog_infiniband(sc);
1299 		mtx_unlock(&sc->sc_mtx);
1300 	}
1301 
1302 	LAGG_XUNLOCK(sc);
1303 }
1304 
1305 static void
1306 lagg_stop(struct lagg_softc *sc)
1307 {
1308 	struct ifnet *ifp = sc->sc_ifp;
1309 
1310 	LAGG_XLOCK_ASSERT(sc);
1311 
1312 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1313 		return;
1314 
1315 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1316 
1317 	lagg_proto_stop(sc);
1318 
1319 	mtx_lock(&sc->sc_mtx);
1320 	callout_stop(&sc->sc_watchdog);
1321 	mtx_unlock(&sc->sc_mtx);
1322 
1323 	callout_drain(&sc->sc_watchdog);
1324 }
1325 
1326 static int
1327 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1328 {
1329 	struct epoch_tracker et;
1330 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1331 	struct lagg_reqall *ra = (struct lagg_reqall *)data;
1332 	struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
1333 	struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
1334 	struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
1335 	struct ifreq *ifr = (struct ifreq *)data;
1336 	struct lagg_port *lp;
1337 	struct ifnet *tpif;
1338 	struct thread *td = curthread;
1339 	char *buf, *outbuf;
1340 	int count, buflen, len, error = 0, oldmtu;
1341 
1342 	bzero(&rpbuf, sizeof(rpbuf));
1343 
1344 	/* XXX: This can race with lagg_clone_destroy. */
1345 
1346 	switch (cmd) {
1347 	case SIOCGLAGG:
1348 		LAGG_XLOCK(sc);
1349 		buflen = sc->sc_count * sizeof(struct lagg_reqport);
1350 		outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1351 		ra->ra_proto = sc->sc_proto;
1352 		lagg_proto_request(sc, &ra->ra_psc);
1353 		count = 0;
1354 		buf = outbuf;
1355 		len = min(ra->ra_size, buflen);
1356 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1357 			if (len < sizeof(rpbuf))
1358 				break;
1359 
1360 			lagg_port2req(lp, &rpbuf);
1361 			memcpy(buf, &rpbuf, sizeof(rpbuf));
1362 			count++;
1363 			buf += sizeof(rpbuf);
1364 			len -= sizeof(rpbuf);
1365 		}
1366 		LAGG_XUNLOCK(sc);
1367 		ra->ra_ports = count;
1368 		ra->ra_size = count * sizeof(rpbuf);
1369 		error = copyout(outbuf, ra->ra_port, ra->ra_size);
1370 		free(outbuf, M_TEMP);
1371 		break;
1372 	case SIOCSLAGG:
1373 		error = priv_check(td, PRIV_NET_LAGG);
1374 		if (error)
1375 			break;
1376 		if (ra->ra_proto >= LAGG_PROTO_MAX) {
1377 			error = EPROTONOSUPPORT;
1378 			break;
1379 		}
1380 		/* Infiniband only supports the failover protocol. */
1381 		if (ra->ra_proto != LAGG_PROTO_FAILOVER &&
1382 		    ifp->if_type == IFT_INFINIBAND) {
1383 			error = EPROTONOSUPPORT;
1384 			break;
1385 		}
1386 		LAGG_XLOCK(sc);
1387 		lagg_proto_detach(sc);
1388 		lagg_proto_attach(sc, ra->ra_proto);
1389 		LAGG_XUNLOCK(sc);
1390 		break;
1391 	case SIOCGLAGGOPTS:
1392 		LAGG_XLOCK(sc);
1393 		ro->ro_opts = sc->sc_opts;
1394 		if (sc->sc_proto == LAGG_PROTO_LACP) {
1395 			struct lacp_softc *lsc;
1396 
1397 			lsc = (struct lacp_softc *)sc->sc_psc;
1398 			if (lsc->lsc_debug.lsc_tx_test != 0)
1399 				ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
1400 			if (lsc->lsc_debug.lsc_rx_test != 0)
1401 				ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
1402 			if (lsc->lsc_strict_mode != 0)
1403 				ro->ro_opts |= LAGG_OPT_LACP_STRICT;
1404 			if (lsc->lsc_fast_timeout != 0)
1405 				ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO;
1406 
1407 			ro->ro_active = sc->sc_active;
1408 		} else {
1409 			ro->ro_active = 0;
1410 			CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1411 				ro->ro_active += LAGG_PORTACTIVE(lp);
1412 		}
1413 		ro->ro_bkt = sc->sc_stride;
1414 		ro->ro_flapping = sc->sc_flapping;
1415 		ro->ro_flowid_shift = sc->flowid_shift;
1416 		LAGG_XUNLOCK(sc);
1417 		break;
1418 	case SIOCSLAGGOPTS:
1419 		error = priv_check(td, PRIV_NET_LAGG);
1420 		if (error)
1421 			break;
1422 
1423 		/*
1424 		 * The stride option was added without defining a corresponding
1425 		 * LAGG_OPT flag, so handle a non-zero value before checking
1426 		 * anything else to preserve compatibility.
1427 		 */
1428 		LAGG_XLOCK(sc);
1429 		if (ro->ro_opts == 0 && ro->ro_bkt != 0) {
1430 			if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) {
1431 				LAGG_XUNLOCK(sc);
1432 				error = EINVAL;
1433 				break;
1434 			}
1435 			sc->sc_stride = ro->ro_bkt;
1436 		}
1437 		if (ro->ro_opts == 0) {
1438 			LAGG_XUNLOCK(sc);
1439 			break;
1440 		}
1441 
1442 		/*
1443 		 * Set options.  LACP options are stored in sc->sc_psc,
1444 		 * not in sc_opts.
1445 		 */
1446 		int valid, lacp;
1447 
1448 		switch (ro->ro_opts) {
1449 		case LAGG_OPT_USE_FLOWID:
1450 		case -LAGG_OPT_USE_FLOWID:
1451 		case LAGG_OPT_USE_NUMA:
1452 		case -LAGG_OPT_USE_NUMA:
1453 		case LAGG_OPT_FLOWIDSHIFT:
1454 		case LAGG_OPT_RR_LIMIT:
1455 			valid = 1;
1456 			lacp = 0;
1457 			break;
1458 		case LAGG_OPT_LACP_TXTEST:
1459 		case -LAGG_OPT_LACP_TXTEST:
1460 		case LAGG_OPT_LACP_RXTEST:
1461 		case -LAGG_OPT_LACP_RXTEST:
1462 		case LAGG_OPT_LACP_STRICT:
1463 		case -LAGG_OPT_LACP_STRICT:
1464 		case LAGG_OPT_LACP_FAST_TIMO:
1465 		case -LAGG_OPT_LACP_FAST_TIMO:
1466 			valid = lacp = 1;
1467 			break;
1468 		default:
1469 			valid = lacp = 0;
1470 			break;
1471 		}
1472 
1473 		if (valid == 0 ||
1474 		    (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
1475 			/* Invalid combination of options specified. */
1476 			error = EINVAL;
1477 			LAGG_XUNLOCK(sc);
1478 			break;	/* Return from SIOCSLAGGOPTS. */
1479 		}
1480 
1481 		/*
1482 		 * Store new options into sc->sc_opts except for
1483 		 * FLOWIDSHIFT, RR and LACP options.
1484 		 */
1485 		if (lacp == 0) {
1486 			if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
1487 				sc->flowid_shift = ro->ro_flowid_shift;
1488 			else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) {
1489 				if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN ||
1490 				    ro->ro_bkt == 0) {
1491 					error = EINVAL;
1492 					LAGG_XUNLOCK(sc);
1493 					break;
1494 				}
1495 				sc->sc_stride = ro->ro_bkt;
1496 			} else if (ro->ro_opts > 0)
1497 				sc->sc_opts |= ro->ro_opts;
1498 			else
1499 				sc->sc_opts &= ~ro->ro_opts;
1500 		} else {
1501 			struct lacp_softc *lsc;
1502 			struct lacp_port *lp;
1503 
1504 			lsc = (struct lacp_softc *)sc->sc_psc;
1505 
1506 			switch (ro->ro_opts) {
1507 			case LAGG_OPT_LACP_TXTEST:
1508 				lsc->lsc_debug.lsc_tx_test = 1;
1509 				break;
1510 			case -LAGG_OPT_LACP_TXTEST:
1511 				lsc->lsc_debug.lsc_tx_test = 0;
1512 				break;
1513 			case LAGG_OPT_LACP_RXTEST:
1514 				lsc->lsc_debug.lsc_rx_test = 1;
1515 				break;
1516 			case -LAGG_OPT_LACP_RXTEST:
1517 				lsc->lsc_debug.lsc_rx_test = 0;
1518 				break;
1519 			case LAGG_OPT_LACP_STRICT:
1520 				lsc->lsc_strict_mode = 1;
1521 				break;
1522 			case -LAGG_OPT_LACP_STRICT:
1523 				lsc->lsc_strict_mode = 0;
1524 				break;
1525 			case LAGG_OPT_LACP_FAST_TIMO:
1526 				LACP_LOCK(lsc);
1527         			LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1528                         		lp->lp_state |= LACP_STATE_TIMEOUT;
1529 				LACP_UNLOCK(lsc);
1530 				lsc->lsc_fast_timeout = 1;
1531 				break;
1532 			case -LAGG_OPT_LACP_FAST_TIMO:
1533 				LACP_LOCK(lsc);
1534         			LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1535                         		lp->lp_state &= ~LACP_STATE_TIMEOUT;
1536 				LACP_UNLOCK(lsc);
1537 				lsc->lsc_fast_timeout = 0;
1538 				break;
1539 			}
1540 		}
1541 		LAGG_XUNLOCK(sc);
1542 		break;
1543 	case SIOCGLAGGFLAGS:
1544 		rf->rf_flags = 0;
1545 		LAGG_XLOCK(sc);
1546 		if (sc->sc_flags & MBUF_HASHFLAG_L2)
1547 			rf->rf_flags |= LAGG_F_HASHL2;
1548 		if (sc->sc_flags & MBUF_HASHFLAG_L3)
1549 			rf->rf_flags |= LAGG_F_HASHL3;
1550 		if (sc->sc_flags & MBUF_HASHFLAG_L4)
1551 			rf->rf_flags |= LAGG_F_HASHL4;
1552 		LAGG_XUNLOCK(sc);
1553 		break;
1554 	case SIOCSLAGGHASH:
1555 		error = priv_check(td, PRIV_NET_LAGG);
1556 		if (error)
1557 			break;
1558 		if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1559 			error = EINVAL;
1560 			break;
1561 		}
1562 		LAGG_XLOCK(sc);
1563 		sc->sc_flags = 0;
1564 		if (rf->rf_flags & LAGG_F_HASHL2)
1565 			sc->sc_flags |= MBUF_HASHFLAG_L2;
1566 		if (rf->rf_flags & LAGG_F_HASHL3)
1567 			sc->sc_flags |= MBUF_HASHFLAG_L3;
1568 		if (rf->rf_flags & LAGG_F_HASHL4)
1569 			sc->sc_flags |= MBUF_HASHFLAG_L4;
1570 		LAGG_XUNLOCK(sc);
1571 		break;
1572 	case SIOCGLAGGPORT:
1573 		if (rp->rp_portname[0] == '\0' ||
1574 		    (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1575 			error = EINVAL;
1576 			break;
1577 		}
1578 
1579 		NET_EPOCH_ENTER(et);
1580 		if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1581 		    lp->lp_softc != sc) {
1582 			error = ENOENT;
1583 			NET_EPOCH_EXIT(et);
1584 			if_rele(tpif);
1585 			break;
1586 		}
1587 
1588 		lagg_port2req(lp, rp);
1589 		NET_EPOCH_EXIT(et);
1590 		if_rele(tpif);
1591 		break;
1592 	case SIOCSLAGGPORT:
1593 		error = priv_check(td, PRIV_NET_LAGG);
1594 		if (error)
1595 			break;
1596 		if (rp->rp_portname[0] == '\0' ||
1597 		    (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1598 			error = EINVAL;
1599 			break;
1600 		}
1601 #ifdef INET6
1602 		/*
1603 		 * A laggport interface should not have inet6 address
1604 		 * because two interfaces with a valid link-local
1605 		 * scope zone must not be merged in any form.  This
1606 		 * restriction is needed to prevent violation of
1607 		 * link-local scope zone.  Attempts to add a laggport
1608 		 * interface which has inet6 addresses triggers
1609 		 * removal of all inet6 addresses on the member
1610 		 * interface.
1611 		 */
1612 		if (in6ifa_llaonifp(tpif)) {
1613 			in6_ifdetach(tpif);
1614 				if_printf(sc->sc_ifp,
1615 				    "IPv6 addresses on %s have been removed "
1616 				    "before adding it as a member to prevent "
1617 				    "IPv6 address scope violation.\n",
1618 				    tpif->if_xname);
1619 		}
1620 #endif
1621 		oldmtu = ifp->if_mtu;
1622 		LAGG_XLOCK(sc);
1623 		error = lagg_port_create(sc, tpif);
1624 		LAGG_XUNLOCK(sc);
1625 		if_rele(tpif);
1626 
1627 		/*
1628 		 * LAGG MTU may change during addition of the first port.
1629 		 * If it did, do network layer specific procedure.
1630 		 */
1631 		if (ifp->if_mtu != oldmtu)
1632 			if_notifymtu(ifp);
1633 
1634 		VLAN_CAPABILITIES(ifp);
1635 		break;
1636 	case SIOCSLAGGDELPORT:
1637 		error = priv_check(td, PRIV_NET_LAGG);
1638 		if (error)
1639 			break;
1640 		if (rp->rp_portname[0] == '\0' ||
1641 		    (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1642 			error = EINVAL;
1643 			break;
1644 		}
1645 
1646 		LAGG_XLOCK(sc);
1647 		if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1648 		    lp->lp_softc != sc) {
1649 			error = ENOENT;
1650 			LAGG_XUNLOCK(sc);
1651 			if_rele(tpif);
1652 			break;
1653 		}
1654 
1655 		error = lagg_port_destroy(lp, 1);
1656 		LAGG_XUNLOCK(sc);
1657 		if_rele(tpif);
1658 		VLAN_CAPABILITIES(ifp);
1659 		break;
1660 	case SIOCSIFFLAGS:
1661 		/* Set flags on ports too */
1662 		LAGG_XLOCK(sc);
1663 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1664 			lagg_setflags(lp, 1);
1665 		}
1666 
1667 		if (!(ifp->if_flags & IFF_UP) &&
1668 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1669 			/*
1670 			 * If interface is marked down and it is running,
1671 			 * then stop and disable it.
1672 			 */
1673 			lagg_stop(sc);
1674 			LAGG_XUNLOCK(sc);
1675 		} else if ((ifp->if_flags & IFF_UP) &&
1676 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1677 			/*
1678 			 * If interface is marked up and it is stopped, then
1679 			 * start it.
1680 			 */
1681 			LAGG_XUNLOCK(sc);
1682 			(*ifp->if_init)(sc);
1683 		} else
1684 			LAGG_XUNLOCK(sc);
1685 		break;
1686 	case SIOCADDMULTI:
1687 	case SIOCDELMULTI:
1688 		LAGG_XLOCK(sc);
1689 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1690 			lagg_clrmulti(lp);
1691 			lagg_setmulti(lp);
1692 		}
1693 		LAGG_XUNLOCK(sc);
1694 		error = 0;
1695 		break;
1696 	case SIOCSIFMEDIA:
1697 	case SIOCGIFMEDIA:
1698 		if (ifp->if_type == IFT_INFINIBAND)
1699 			error = EINVAL;
1700 		else
1701 			error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1702 		break;
1703 
1704 	case SIOCSIFCAP:
1705 	case SIOCSIFCAPNV:
1706 		LAGG_XLOCK(sc);
1707 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1708 			if (lp->lp_ioctl != NULL)
1709 				(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1710 		}
1711 		lagg_capabilities(sc);
1712 		LAGG_XUNLOCK(sc);
1713 		VLAN_CAPABILITIES(ifp);
1714 		error = 0;
1715 		break;
1716 
1717 	case SIOCGIFCAPNV:
1718 		error = 0;
1719 		break;
1720 
1721 	case SIOCSIFMTU:
1722 		LAGG_XLOCK(sc);
1723 		CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1724 			if (lp->lp_ioctl != NULL)
1725 				error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1726 			else
1727 				error = EINVAL;
1728 			if (error != 0) {
1729 				if_printf(ifp,
1730 				    "failed to change MTU to %d on port %s, "
1731 				    "reverting all ports to original MTU (%d)\n",
1732 				    ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
1733 				break;
1734 			}
1735 		}
1736 		if (error == 0) {
1737 			ifp->if_mtu = ifr->ifr_mtu;
1738 		} else {
1739 			/* set every port back to the original MTU */
1740 			ifr->ifr_mtu = ifp->if_mtu;
1741 			CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1742 				if (lp->lp_ioctl != NULL)
1743 					(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1744 			}
1745 		}
1746 		lagg_capabilities(sc);
1747 		LAGG_XUNLOCK(sc);
1748 		VLAN_CAPABILITIES(ifp);
1749 		break;
1750 
1751 	default:
1752 		error = ether_ioctl(ifp, cmd, data);
1753 		break;
1754 	}
1755 	return (error);
1756 }
1757 
1758 #if defined(KERN_TLS) || defined(RATELIMIT)
1759 #ifdef RATELIMIT
1760 static const struct if_snd_tag_sw lagg_snd_tag_ul_sw = {
1761 	.snd_tag_modify = lagg_snd_tag_modify,
1762 	.snd_tag_query = lagg_snd_tag_query,
1763 	.snd_tag_free = lagg_snd_tag_free,
1764 	.next_snd_tag = lagg_next_snd_tag,
1765 	.type = IF_SND_TAG_TYPE_UNLIMITED
1766 };
1767 
1768 static const struct if_snd_tag_sw lagg_snd_tag_rl_sw = {
1769 	.snd_tag_modify = lagg_snd_tag_modify,
1770 	.snd_tag_query = lagg_snd_tag_query,
1771 	.snd_tag_free = lagg_snd_tag_free,
1772 	.next_snd_tag = lagg_next_snd_tag,
1773 	.type = IF_SND_TAG_TYPE_RATE_LIMIT
1774 };
1775 #endif
1776 
1777 #ifdef KERN_TLS
1778 static const struct if_snd_tag_sw lagg_snd_tag_tls_sw = {
1779 	.snd_tag_modify = lagg_snd_tag_modify,
1780 	.snd_tag_query = lagg_snd_tag_query,
1781 	.snd_tag_free = lagg_snd_tag_free,
1782 	.next_snd_tag = lagg_next_snd_tag,
1783 	.type = IF_SND_TAG_TYPE_TLS
1784 };
1785 
1786 #ifdef RATELIMIT
1787 static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = {
1788 	.snd_tag_modify = lagg_snd_tag_modify,
1789 	.snd_tag_query = lagg_snd_tag_query,
1790 	.snd_tag_free = lagg_snd_tag_free,
1791 	.next_snd_tag = lagg_next_snd_tag,
1792 	.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
1793 };
1794 #endif
1795 #endif
1796 
1797 static inline struct lagg_snd_tag *
1798 mst_to_lst(struct m_snd_tag *mst)
1799 {
1800 
1801 	return (__containerof(mst, struct lagg_snd_tag, com));
1802 }
1803 
1804 /*
1805  * Look up the port used by a specific flow.  This only works for lagg
1806  * protocols with deterministic port mappings (e.g. not roundrobin).
1807  * In addition protocols which use a hash to map flows to ports must
1808  * be configured to use the mbuf flowid rather than hashing packet
1809  * contents.
1810  */
1811 static struct lagg_port *
1812 lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype,
1813     uint8_t numa_domain)
1814 {
1815 	struct lagg_softc *sc;
1816 	struct lagg_port *lp;
1817 	struct lagg_lb *lb;
1818 	uint32_t hash, p;
1819 	int err;
1820 
1821 	sc = ifp->if_softc;
1822 
1823 	switch (sc->sc_proto) {
1824 	case LAGG_PROTO_FAILOVER:
1825 		return (lagg_link_active(sc, sc->sc_primary));
1826 	case LAGG_PROTO_LOADBALANCE:
1827 		if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1828 		    flowtype == M_HASHTYPE_NONE)
1829 			return (NULL);
1830 		p = flowid >> sc->flowid_shift;
1831 		p %= sc->sc_count;
1832 		lb = (struct lagg_lb *)sc->sc_psc;
1833 		lp = lb->lb_ports[p];
1834 		return (lagg_link_active(sc, lp));
1835 	case LAGG_PROTO_LACP:
1836 		if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1837 		    flowtype == M_HASHTYPE_NONE)
1838 			return (NULL);
1839 		hash = flowid >> sc->flowid_shift;
1840 		return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err));
1841 	default:
1842 		return (NULL);
1843 	}
1844 }
1845 
1846 static int
1847 lagg_snd_tag_alloc(struct ifnet *ifp,
1848     union if_snd_tag_alloc_params *params,
1849     struct m_snd_tag **ppmt)
1850 {
1851 	struct epoch_tracker et;
1852 	const struct if_snd_tag_sw *sw;
1853 	struct lagg_snd_tag *lst;
1854 	struct lagg_port *lp;
1855 	struct ifnet *lp_ifp;
1856 	struct m_snd_tag *mst;
1857 	int error;
1858 
1859 	switch (params->hdr.type) {
1860 #ifdef RATELIMIT
1861 	case IF_SND_TAG_TYPE_UNLIMITED:
1862 		sw = &lagg_snd_tag_ul_sw;
1863 		break;
1864 	case IF_SND_TAG_TYPE_RATE_LIMIT:
1865 		sw = &lagg_snd_tag_rl_sw;
1866 		break;
1867 #endif
1868 #ifdef KERN_TLS
1869 	case IF_SND_TAG_TYPE_TLS:
1870 		sw = &lagg_snd_tag_tls_sw;
1871 		break;
1872 	case IF_SND_TAG_TYPE_TLS_RX:
1873 		/* Return tag from port interface directly. */
1874 		sw = NULL;
1875 		break;
1876 #ifdef RATELIMIT
1877 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
1878 		sw = &lagg_snd_tag_tls_rl_sw;
1879 		break;
1880 #endif
1881 #endif
1882 	default:
1883 		return (EOPNOTSUPP);
1884 	}
1885 
1886 	NET_EPOCH_ENTER(et);
1887 	lp = lookup_snd_tag_port(ifp, params->hdr.flowid,
1888 	    params->hdr.flowtype, params->hdr.numa_domain);
1889 	if (lp == NULL) {
1890 		NET_EPOCH_EXIT(et);
1891 		return (EOPNOTSUPP);
1892 	}
1893 	if (lp->lp_ifp == NULL) {
1894 		NET_EPOCH_EXIT(et);
1895 		return (EOPNOTSUPP);
1896 	}
1897 	lp_ifp = lp->lp_ifp;
1898 	if_ref(lp_ifp);
1899 	NET_EPOCH_EXIT(et);
1900 
1901 	if (sw != NULL) {
1902 		lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
1903 		if (lst == NULL) {
1904 			if_rele(lp_ifp);
1905 			return (ENOMEM);
1906 		}
1907 	} else
1908 		lst = NULL;
1909 
1910 	error = m_snd_tag_alloc(lp_ifp, params, &mst);
1911 	if_rele(lp_ifp);
1912 	if (error) {
1913 		free(lst, M_LAGG);
1914 		return (error);
1915 	}
1916 
1917 	if (sw != NULL) {
1918 		m_snd_tag_init(&lst->com, ifp, sw);
1919 		lst->tag = mst;
1920 
1921 		*ppmt = &lst->com;
1922 	} else
1923 		*ppmt = mst;
1924 
1925 	return (0);
1926 }
1927 
1928 static struct m_snd_tag *
1929 lagg_next_snd_tag(struct m_snd_tag *mst)
1930 {
1931 	struct lagg_snd_tag *lst;
1932 
1933 	lst = mst_to_lst(mst);
1934 	return (lst->tag);
1935 }
1936 
1937 static int
1938 lagg_snd_tag_modify(struct m_snd_tag *mst,
1939     union if_snd_tag_modify_params *params)
1940 {
1941 	struct lagg_snd_tag *lst;
1942 
1943 	lst = mst_to_lst(mst);
1944 	return (lst->tag->sw->snd_tag_modify(lst->tag, params));
1945 }
1946 
1947 static int
1948 lagg_snd_tag_query(struct m_snd_tag *mst,
1949     union if_snd_tag_query_params *params)
1950 {
1951 	struct lagg_snd_tag *lst;
1952 
1953 	lst = mst_to_lst(mst);
1954 	return (lst->tag->sw->snd_tag_query(lst->tag, params));
1955 }
1956 
1957 static void
1958 lagg_snd_tag_free(struct m_snd_tag *mst)
1959 {
1960 	struct lagg_snd_tag *lst;
1961 
1962 	lst = mst_to_lst(mst);
1963 	m_snd_tag_rele(lst->tag);
1964 	free(lst, M_LAGG);
1965 }
1966 
1967 static void
1968 lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
1969 {
1970 	/*
1971 	 * For lagg, we have an indirect
1972 	 * interface. The caller needs to
1973 	 * get a ratelimit tag on the actual
1974 	 * interface the flow will go on.
1975 	 */
1976 	q->rate_table = NULL;
1977 	q->flags = RT_IS_INDIRECT;
1978 	q->max_flows = 0;
1979 	q->number_of_rates = 0;
1980 }
1981 #endif
1982 
1983 static int
1984 lagg_setmulti(struct lagg_port *lp)
1985 {
1986 	struct lagg_softc *sc = lp->lp_softc;
1987 	struct ifnet *ifp = lp->lp_ifp;
1988 	struct ifnet *scifp = sc->sc_ifp;
1989 	struct lagg_mc *mc;
1990 	struct ifmultiaddr *ifma;
1991 	int error;
1992 
1993 	IF_ADDR_WLOCK(scifp);
1994 	CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1995 		if (ifma->ifma_addr->sa_family != AF_LINK)
1996 			continue;
1997 		mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
1998 		if (mc == NULL) {
1999 			IF_ADDR_WUNLOCK(scifp);
2000 			return (ENOMEM);
2001 		}
2002 		bcopy(ifma->ifma_addr, &mc->mc_addr,
2003 		    ifma->ifma_addr->sa_len);
2004 		mc->mc_addr.sdl_index = ifp->if_index;
2005 		mc->mc_ifma = NULL;
2006 		SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
2007 	}
2008 	IF_ADDR_WUNLOCK(scifp);
2009 	SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
2010 		error = if_addmulti(ifp,
2011 		    (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
2012 		if (error)
2013 			return (error);
2014 	}
2015 	return (0);
2016 }
2017 
2018 static int
2019 lagg_clrmulti(struct lagg_port *lp)
2020 {
2021 	struct lagg_mc *mc;
2022 
2023 	LAGG_XLOCK_ASSERT(lp->lp_softc);
2024 	while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
2025 		SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
2026 		if (mc->mc_ifma && lp->lp_detaching == 0)
2027 			if_delmulti_ifma(mc->mc_ifma);
2028 		free(mc, M_LAGG);
2029 	}
2030 	return (0);
2031 }
2032 
2033 static void
2034 lagg_setcaps(struct lagg_port *lp, int cap, int cap2)
2035 {
2036 	struct ifreq ifr;
2037 	struct siocsifcapnv_driver_data drv_ioctl_data;
2038 
2039 	if (lp->lp_ifp->if_capenable == cap &&
2040 	    lp->lp_ifp->if_capenable2 == cap2)
2041 		return;
2042 	if (lp->lp_ioctl == NULL)
2043 		return;
2044 	/* XXX */
2045 	if ((lp->lp_ifp->if_capabilities & IFCAP_NV) != 0) {
2046 		drv_ioctl_data.reqcap = cap;
2047 		drv_ioctl_data.reqcap2 = cap2;
2048 		drv_ioctl_data.nvcap = NULL;
2049 		(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAPNV,
2050 		    (caddr_t)&drv_ioctl_data);
2051 	} else {
2052 		ifr.ifr_reqcap = cap;
2053 		(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr);
2054 	}
2055 }
2056 
2057 /* Handle a ref counted flag that should be set on the lagg port as well */
2058 static int
2059 lagg_setflag(struct lagg_port *lp, int flag, int status,
2060     int (*func)(struct ifnet *, int))
2061 {
2062 	struct lagg_softc *sc = lp->lp_softc;
2063 	struct ifnet *scifp = sc->sc_ifp;
2064 	struct ifnet *ifp = lp->lp_ifp;
2065 	int error;
2066 
2067 	LAGG_XLOCK_ASSERT(sc);
2068 
2069 	status = status ? (scifp->if_flags & flag) : 0;
2070 	/* Now "status" contains the flag value or 0 */
2071 
2072 	/*
2073 	 * See if recorded ports status is different from what
2074 	 * we want it to be.  If it is, flip it.  We record ports
2075 	 * status in lp_ifflags so that we won't clear ports flag
2076 	 * we haven't set.  In fact, we don't clear or set ports
2077 	 * flags directly, but get or release references to them.
2078 	 * That's why we can be sure that recorded flags still are
2079 	 * in accord with actual ports flags.
2080 	 */
2081 	if (status != (lp->lp_ifflags & flag)) {
2082 		error = (*func)(ifp, status);
2083 		if (error)
2084 			return (error);
2085 		lp->lp_ifflags &= ~flag;
2086 		lp->lp_ifflags |= status;
2087 	}
2088 	return (0);
2089 }
2090 
2091 /*
2092  * Handle IFF_* flags that require certain changes on the lagg port
2093  * if "status" is true, update ports flags respective to the lagg
2094  * if "status" is false, forcedly clear the flags set on port.
2095  */
2096 static int
2097 lagg_setflags(struct lagg_port *lp, int status)
2098 {
2099 	int error, i;
2100 
2101 	for (i = 0; lagg_pflags[i].flag; i++) {
2102 		error = lagg_setflag(lp, lagg_pflags[i].flag,
2103 		    status, lagg_pflags[i].func);
2104 		if (error)
2105 			return (error);
2106 	}
2107 	return (0);
2108 }
2109 
2110 static int
2111 lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m)
2112 {
2113 	struct epoch_tracker et;
2114 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2115 	int error;
2116 
2117 #if defined(KERN_TLS) || defined(RATELIMIT)
2118 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2119 		MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2120 #endif
2121 	NET_EPOCH_ENTER(et);
2122 	/* We need a Tx algorithm and at least one port */
2123 	if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
2124 		NET_EPOCH_EXIT(et);
2125 		m_freem(m);
2126 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2127 		return (ENXIO);
2128 	}
2129 
2130 	ETHER_BPF_MTAP(ifp, m);
2131 
2132 	error = lagg_proto_start(sc, m);
2133 	NET_EPOCH_EXIT(et);
2134 	return (error);
2135 }
2136 
2137 static int
2138 lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m)
2139 {
2140 	struct epoch_tracker et;
2141 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2142 	int error;
2143 
2144 #if defined(KERN_TLS) || defined(RATELIMIT)
2145 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2146 		MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2147 #endif
2148 	NET_EPOCH_ENTER(et);
2149 	/* We need a Tx algorithm and at least one port */
2150 	if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
2151 		NET_EPOCH_EXIT(et);
2152 		m_freem(m);
2153 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2154 		return (ENXIO);
2155 	}
2156 
2157 	INFINIBAND_BPF_MTAP(ifp, m);
2158 
2159 	error = lagg_proto_start(sc, m);
2160 	NET_EPOCH_EXIT(et);
2161 	return (error);
2162 }
2163 
2164 /*
2165  * The ifp->if_qflush entry point for lagg(4) is no-op.
2166  */
2167 static void
2168 lagg_qflush(struct ifnet *ifp __unused)
2169 {
2170 }
2171 
2172 static struct mbuf *
2173 lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m)
2174 {
2175 	struct epoch_tracker et;
2176 	struct lagg_port *lp = ifp->if_lagg;
2177 	struct lagg_softc *sc = lp->lp_softc;
2178 	struct ifnet *scifp = sc->sc_ifp;
2179 
2180 	NET_EPOCH_ENTER(et);
2181 	if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2182 	    lp->lp_detaching != 0 ||
2183 	    sc->sc_proto == LAGG_PROTO_NONE) {
2184 		NET_EPOCH_EXIT(et);
2185 		m_freem(m);
2186 		return (NULL);
2187 	}
2188 
2189 	ETHER_BPF_MTAP(scifp, m);
2190 
2191 	m = lagg_proto_input(sc, lp, m);
2192 	if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
2193 		m_freem(m);
2194 		m = NULL;
2195 	}
2196 
2197 #ifdef DEV_NETMAP
2198 	if (m != NULL && scifp->if_capenable & IFCAP_NETMAP) {
2199 		scifp->if_input(scifp, m);
2200 		m = NULL;
2201 	}
2202 #endif	/* DEV_NETMAP */
2203 
2204 	NET_EPOCH_EXIT(et);
2205 	return (m);
2206 }
2207 
2208 static struct mbuf *
2209 lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m)
2210 {
2211 	struct epoch_tracker et;
2212 	struct lagg_port *lp = ifp->if_lagg;
2213 	struct lagg_softc *sc = lp->lp_softc;
2214 	struct ifnet *scifp = sc->sc_ifp;
2215 
2216 	NET_EPOCH_ENTER(et);
2217 	if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2218 	    lp->lp_detaching != 0 ||
2219 	    sc->sc_proto == LAGG_PROTO_NONE) {
2220 		NET_EPOCH_EXIT(et);
2221 		m_freem(m);
2222 		return (NULL);
2223 	}
2224 
2225 	INFINIBAND_BPF_MTAP(scifp, m);
2226 
2227 	m = lagg_proto_input(sc, lp, m);
2228 	if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
2229 		m_freem(m);
2230 		m = NULL;
2231 	}
2232 
2233 	NET_EPOCH_EXIT(et);
2234 	return (m);
2235 }
2236 
2237 static int
2238 lagg_media_change(struct ifnet *ifp)
2239 {
2240 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2241 
2242 	if (sc->sc_ifflags & IFF_DEBUG)
2243 		printf("%s\n", __func__);
2244 
2245 	/* Ignore */
2246 	return (0);
2247 }
2248 
2249 static void
2250 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2251 {
2252 	struct epoch_tracker et;
2253 	struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2254 	struct lagg_port *lp;
2255 
2256 	imr->ifm_status = IFM_AVALID;
2257 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
2258 
2259 	NET_EPOCH_ENTER(et);
2260 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2261 		if (LAGG_PORTACTIVE(lp))
2262 			imr->ifm_status |= IFM_ACTIVE;
2263 	}
2264 	NET_EPOCH_EXIT(et);
2265 }
2266 
2267 static void
2268 lagg_linkstate(struct lagg_softc *sc)
2269 {
2270 	struct epoch_tracker et;
2271 	struct lagg_port *lp;
2272 	int new_link = LINK_STATE_DOWN;
2273 	uint64_t speed;
2274 
2275 	LAGG_XLOCK_ASSERT(sc);
2276 
2277 	/* LACP handles link state itself */
2278 	if (sc->sc_proto == LAGG_PROTO_LACP)
2279 		return;
2280 
2281 	/* Our link is considered up if at least one of our ports is active */
2282 	NET_EPOCH_ENTER(et);
2283 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2284 		if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
2285 			new_link = LINK_STATE_UP;
2286 			break;
2287 		}
2288 	}
2289 	NET_EPOCH_EXIT(et);
2290 	if_link_state_change(sc->sc_ifp, new_link);
2291 
2292 	/* Update if_baudrate to reflect the max possible speed */
2293 	switch (sc->sc_proto) {
2294 		case LAGG_PROTO_FAILOVER:
2295 			sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
2296 			    sc->sc_primary->lp_ifp->if_baudrate : 0;
2297 			break;
2298 		case LAGG_PROTO_ROUNDROBIN:
2299 		case LAGG_PROTO_LOADBALANCE:
2300 		case LAGG_PROTO_BROADCAST:
2301 			speed = 0;
2302 			NET_EPOCH_ENTER(et);
2303 			CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2304 				speed += lp->lp_ifp->if_baudrate;
2305 			NET_EPOCH_EXIT(et);
2306 			sc->sc_ifp->if_baudrate = speed;
2307 			break;
2308 		case LAGG_PROTO_LACP:
2309 			/* LACP updates if_baudrate itself */
2310 			break;
2311 	}
2312 }
2313 
2314 static void
2315 lagg_port_state(struct ifnet *ifp, int state)
2316 {
2317 	struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
2318 	struct lagg_softc *sc = NULL;
2319 
2320 	if (lp != NULL)
2321 		sc = lp->lp_softc;
2322 	if (sc == NULL)
2323 		return;
2324 
2325 	LAGG_XLOCK(sc);
2326 	lagg_linkstate(sc);
2327 	lagg_proto_linkstate(sc, lp);
2328 	LAGG_XUNLOCK(sc);
2329 }
2330 
2331 struct lagg_port *
2332 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
2333 {
2334 	struct lagg_port *lp_next, *rval = NULL;
2335 
2336 	/*
2337 	 * Search a port which reports an active link state.
2338 	 */
2339 
2340 #ifdef INVARIANTS
2341 	/*
2342 	 * This is called with either in the network epoch
2343 	 * or with LAGG_XLOCK(sc) held.
2344 	 */
2345 	if (!in_epoch(net_epoch_preempt))
2346 		LAGG_XLOCK_ASSERT(sc);
2347 #endif
2348 
2349 	if (lp == NULL)
2350 		goto search;
2351 	if (LAGG_PORTACTIVE(lp)) {
2352 		rval = lp;
2353 		goto found;
2354 	}
2355 	if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
2356 	    LAGG_PORTACTIVE(lp_next)) {
2357 		rval = lp_next;
2358 		goto found;
2359 	}
2360 
2361 search:
2362 	CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2363 		if (LAGG_PORTACTIVE(lp_next)) {
2364 			return (lp_next);
2365 		}
2366 	}
2367 found:
2368 	return (rval);
2369 }
2370 
2371 int
2372 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
2373 {
2374 
2375 #if defined(KERN_TLS) || defined(RATELIMIT)
2376 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2377 		struct lagg_snd_tag *lst;
2378 		struct m_snd_tag *mst;
2379 
2380 		mst = m->m_pkthdr.snd_tag;
2381 		lst = mst_to_lst(mst);
2382 		if (lst->tag->ifp != ifp) {
2383 			m_freem(m);
2384 			return (EAGAIN);
2385 		}
2386 		m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
2387 		m_snd_tag_rele(mst);
2388 	}
2389 #endif
2390 	return (ifp->if_transmit)(ifp, m);
2391 }
2392 
2393 /*
2394  * Simple round robin aggregation
2395  */
2396 static void
2397 lagg_rr_attach(struct lagg_softc *sc)
2398 {
2399 	sc->sc_seq = 0;
2400 	sc->sc_stride = 1;
2401 }
2402 
2403 static int
2404 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
2405 {
2406 	struct lagg_port *lp;
2407 	uint32_t p;
2408 
2409 	p = atomic_fetchadd_32(&sc->sc_seq, 1);
2410 	p /= sc->sc_stride;
2411 	p %= sc->sc_count;
2412 	lp = CK_SLIST_FIRST(&sc->sc_ports);
2413 
2414 	while (p--)
2415 		lp = CK_SLIST_NEXT(lp, lp_entries);
2416 
2417 	/*
2418 	 * Check the port's link state. This will return the next active
2419 	 * port if the link is down or the port is NULL.
2420 	 */
2421 	if ((lp = lagg_link_active(sc, lp)) == NULL) {
2422 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2423 		m_freem(m);
2424 		return (ENETDOWN);
2425 	}
2426 
2427 	/* Send mbuf */
2428 	return (lagg_enqueue(lp->lp_ifp, m));
2429 }
2430 
2431 static struct mbuf *
2432 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2433 {
2434 	struct ifnet *ifp = sc->sc_ifp;
2435 
2436 	/* Just pass in the packet to our lagg device */
2437 	m->m_pkthdr.rcvif = ifp;
2438 
2439 	return (m);
2440 }
2441 
2442 /*
2443  * Broadcast mode
2444  */
2445 static int
2446 lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
2447 {
2448 	int errors = 0;
2449 	int ret;
2450 	struct lagg_port *lp, *last = NULL;
2451 	struct mbuf *m0;
2452 
2453 	NET_EPOCH_ASSERT();
2454 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2455 		if (!LAGG_PORTACTIVE(lp))
2456 			continue;
2457 
2458 		if (last != NULL) {
2459 			m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
2460 			if (m0 == NULL) {
2461 				ret = ENOBUFS;
2462 				errors++;
2463 				break;
2464 			}
2465 			lagg_enqueue(last->lp_ifp, m0);
2466 		}
2467 		last = lp;
2468 	}
2469 
2470 	if (last == NULL) {
2471 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2472 		m_freem(m);
2473 		return (ENOENT);
2474 	}
2475 	if ((last = lagg_link_active(sc, last)) == NULL) {
2476 		errors++;
2477 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2478 		m_freem(m);
2479 		return (ENETDOWN);
2480 	}
2481 
2482 	ret = lagg_enqueue(last->lp_ifp, m);
2483 	if (errors != 0)
2484 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2485 
2486 	return (ret);
2487 }
2488 
2489 static struct mbuf*
2490 lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2491 {
2492 	struct ifnet *ifp = sc->sc_ifp;
2493 
2494 	/* Just pass in the packet to our lagg device */
2495 	m->m_pkthdr.rcvif = ifp;
2496 	return (m);
2497 }
2498 
2499 /*
2500  * Active failover
2501  */
2502 static int
2503 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
2504 {
2505 	struct lagg_port *lp;
2506 
2507 	/* Use the master port if active or the next available port */
2508 	if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
2509 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2510 		m_freem(m);
2511 		return (ENETDOWN);
2512 	}
2513 
2514 	/* Send mbuf */
2515 	return (lagg_enqueue(lp->lp_ifp, m));
2516 }
2517 
2518 static struct mbuf *
2519 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2520 {
2521 	struct ifnet *ifp = sc->sc_ifp;
2522 	struct lagg_port *tmp_tp;
2523 
2524 	if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
2525 		m->m_pkthdr.rcvif = ifp;
2526 		return (m);
2527 	}
2528 
2529 	if (!LAGG_PORTACTIVE(sc->sc_primary)) {
2530 		tmp_tp = lagg_link_active(sc, sc->sc_primary);
2531 		/*
2532 		 * If tmp_tp is null, we've received a packet when all
2533 		 * our links are down. Weird, but process it anyways.
2534 		 */
2535 		if ((tmp_tp == NULL || tmp_tp == lp)) {
2536 			m->m_pkthdr.rcvif = ifp;
2537 			return (m);
2538 		}
2539 	}
2540 
2541 	m_freem(m);
2542 	return (NULL);
2543 }
2544 
2545 /*
2546  * Loadbalancing
2547  */
2548 static void
2549 lagg_lb_attach(struct lagg_softc *sc)
2550 {
2551 	struct lagg_port *lp;
2552 	struct lagg_lb *lb;
2553 
2554 	LAGG_XLOCK_ASSERT(sc);
2555 	lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
2556 	lb->lb_key = m_ether_tcpip_hash_init();
2557 	sc->sc_psc = lb;
2558 
2559 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2560 		lagg_lb_port_create(lp);
2561 }
2562 
2563 static void
2564 lagg_lb_detach(struct lagg_softc *sc)
2565 {
2566 	struct lagg_lb *lb;
2567 
2568 	lb = (struct lagg_lb *)sc->sc_psc;
2569 	if (lb != NULL)
2570 		free(lb, M_LAGG);
2571 }
2572 
2573 static int
2574 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
2575 {
2576 	struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2577 	struct lagg_port *lp_next;
2578 	int i = 0, rv;
2579 
2580 	rv = 0;
2581 	bzero(&lb->lb_ports, sizeof(lb->lb_ports));
2582 	LAGG_XLOCK_ASSERT(sc);
2583 	CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2584 		if (lp_next == lp)
2585 			continue;
2586 		if (i >= LAGG_MAX_PORTS) {
2587 			rv = EINVAL;
2588 			break;
2589 		}
2590 		if (sc->sc_ifflags & IFF_DEBUG)
2591 			printf("%s: port %s at index %d\n",
2592 			    sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
2593 		lb->lb_ports[i++] = lp_next;
2594 	}
2595 
2596 	return (rv);
2597 }
2598 
2599 static int
2600 lagg_lb_port_create(struct lagg_port *lp)
2601 {
2602 	struct lagg_softc *sc = lp->lp_softc;
2603 	return (lagg_lb_porttable(sc, NULL));
2604 }
2605 
2606 static void
2607 lagg_lb_port_destroy(struct lagg_port *lp)
2608 {
2609 	struct lagg_softc *sc = lp->lp_softc;
2610 	lagg_lb_porttable(sc, lp);
2611 }
2612 
2613 static int
2614 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
2615 {
2616 	struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2617 	struct lagg_port *lp = NULL;
2618 	uint32_t p = 0;
2619 
2620 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
2621 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2622 		p = m->m_pkthdr.flowid >> sc->flowid_shift;
2623 	else
2624 		p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
2625 	p %= sc->sc_count;
2626 	lp = lb->lb_ports[p];
2627 
2628 	/*
2629 	 * Check the port's link state. This will return the next active
2630 	 * port if the link is down or the port is NULL.
2631 	 */
2632 	if ((lp = lagg_link_active(sc, lp)) == NULL) {
2633 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2634 		m_freem(m);
2635 		return (ENETDOWN);
2636 	}
2637 
2638 	/* Send mbuf */
2639 	return (lagg_enqueue(lp->lp_ifp, m));
2640 }
2641 
2642 static struct mbuf *
2643 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2644 {
2645 	struct ifnet *ifp = sc->sc_ifp;
2646 
2647 	/* Just pass in the packet to our lagg device */
2648 	m->m_pkthdr.rcvif = ifp;
2649 
2650 	return (m);
2651 }
2652 
2653 /*
2654  * 802.3ad LACP
2655  */
2656 static void
2657 lagg_lacp_attach(struct lagg_softc *sc)
2658 {
2659 	struct lagg_port *lp;
2660 
2661 	lacp_attach(sc);
2662 	LAGG_XLOCK_ASSERT(sc);
2663 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2664 		lacp_port_create(lp);
2665 }
2666 
2667 static void
2668 lagg_lacp_detach(struct lagg_softc *sc)
2669 {
2670 	struct lagg_port *lp;
2671 	void *psc;
2672 
2673 	LAGG_XLOCK_ASSERT(sc);
2674 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2675 		lacp_port_destroy(lp);
2676 
2677 	psc = sc->sc_psc;
2678 	sc->sc_psc = NULL;
2679 	lacp_detach(psc);
2680 }
2681 
2682 static void
2683 lagg_lacp_lladdr(struct lagg_softc *sc)
2684 {
2685 	struct lagg_port *lp;
2686 
2687 	LAGG_SXLOCK_ASSERT(sc);
2688 
2689 	/* purge all the lacp ports */
2690 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2691 		lacp_port_destroy(lp);
2692 
2693 	/* add them back in */
2694 	CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2695 		lacp_port_create(lp);
2696 }
2697 
2698 static int
2699 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
2700 {
2701 	struct lagg_port *lp;
2702 	int err;
2703 
2704 	lp = lacp_select_tx_port(sc, m, &err);
2705 	if (lp == NULL) {
2706 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2707 		m_freem(m);
2708 		return (err);
2709 	}
2710 
2711 	/* Send mbuf */
2712 	return (lagg_enqueue(lp->lp_ifp, m));
2713 }
2714 
2715 static struct mbuf *
2716 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2717 {
2718 	struct ifnet *ifp = sc->sc_ifp;
2719 	struct ether_header *eh;
2720 	u_short etype;
2721 
2722 	eh = mtod(m, struct ether_header *);
2723 	etype = ntohs(eh->ether_type);
2724 
2725 	/* Tap off LACP control messages */
2726 	if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
2727 		m = lacp_input(lp, m);
2728 		if (m == NULL)
2729 			return (NULL);
2730 	}
2731 
2732 	/*
2733 	 * If the port is not collecting or not in the active aggregator then
2734 	 * free and return.
2735 	 */
2736 	if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
2737 		m_freem(m);
2738 		return (NULL);
2739 	}
2740 
2741 	m->m_pkthdr.rcvif = ifp;
2742 	return (m);
2743 }
2744