xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision bc7512cc58af2e8bbe5bbf5ca0059b1daa1da897)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c)2005 YAMAMOTO Takashi,
7  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_kern_tls.h"
36 #include "opt_ratelimit.h"
37 
38 #include <sys/param.h>
39 #include <sys/callout.h>
40 #include <sys/eventhandler.h>
41 #include <sys/mbuf.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h> /* hz */
45 #include <sys/socket.h> /* for net/if.h */
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <machine/stdarg.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/taskqueue.h>
52 #include <sys/time.h>
53 
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_dl.h>
57 #include <net/ethernet.h>
58 #include <net/infiniband.h>
59 #include <net/if_media.h>
60 #include <net/if_types.h>
61 
62 #include <net/if_lagg.h>
63 #include <net/ieee8023ad_lacp.h>
64 
65 /*
66  * actor system priority and port priority.
67  * XXX should be configurable.
68  */
69 
70 #define	LACP_SYSTEM_PRIO	0x8000
71 #define	LACP_PORT_PRIO		0x8000
72 
73 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
74     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
75 
76 static const struct tlv_template lacp_info_tlv_template[] = {
77 	{ LACP_TYPE_ACTORINFO,
78 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
79 	{ LACP_TYPE_PARTNERINFO,
80 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
81 	{ LACP_TYPE_COLLECTORINFO,
82 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
83 	{ 0, 0 },
84 };
85 
86 static const struct tlv_template marker_info_tlv_template[] = {
87 	{ MARKER_TYPE_INFO,
88 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
89 	{ 0, 0 },
90 };
91 
92 static const struct tlv_template marker_response_tlv_template[] = {
93 	{ MARKER_TYPE_RESPONSE,
94 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
95 	{ 0, 0 },
96 };
97 
98 typedef void (*lacp_timer_func_t)(struct lacp_port *);
99 
100 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
101 static void	lacp_fill_markerinfo(struct lacp_port *,
102 		    struct lacp_markerinfo *);
103 
104 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
105 static void	lacp_suppress_distributing(struct lacp_softc *,
106 		    struct lacp_aggregator *);
107 static void	lacp_transit_expire(void *);
108 static void	lacp_update_portmap(struct lacp_softc *);
109 static void	lacp_select_active_aggregator(struct lacp_softc *);
110 static uint16_t	lacp_compose_key(struct lacp_port *);
111 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
112 		    const struct tlv_template *, boolean_t);
113 static void	lacp_tick(void *);
114 
115 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
116 		    const struct lacp_port *);
117 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
118 		    const struct lacp_peerinfo *);
119 static int	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
120 		    const struct lacp_port *);
121 static int	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
122 		    const struct lacp_peerinfo *);
123 
124 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
125 		    struct lacp_port *);
126 static void	lacp_aggregator_addref(struct lacp_softc *,
127 		    struct lacp_aggregator *);
128 static void	lacp_aggregator_delref(struct lacp_softc *,
129 		    struct lacp_aggregator *);
130 
131 /* receive machine */
132 
133 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
134 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
135 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
136 static void	lacp_sm_rx_timer(struct lacp_port *);
137 static void	lacp_sm_rx_set_expired(struct lacp_port *);
138 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
139 		    const struct lacpdu *);
140 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
141 		    const struct lacpdu *);
142 static void	lacp_sm_rx_update_selected(struct lacp_port *,
143 		    const struct lacpdu *);
144 static void	lacp_sm_rx_record_default(struct lacp_port *);
145 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
146 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
147 		    const struct lacp_peerinfo *);
148 
149 /* mux machine */
150 
151 static void	lacp_sm_mux(struct lacp_port *);
152 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
153 static void	lacp_sm_mux_timer(struct lacp_port *);
154 
155 /* periodic transmit machine */
156 
157 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
158 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
159 static void	lacp_sm_ptx_timer(struct lacp_port *);
160 
161 /* transmit machine */
162 
163 static void	lacp_sm_tx(struct lacp_port *);
164 static void	lacp_sm_assert_ntt(struct lacp_port *);
165 
166 static void	lacp_run_timers(struct lacp_port *);
167 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
168 		    const struct lacp_peerinfo *);
169 static int	lacp_compare_systemid(const struct lacp_systemid *,
170 		    const struct lacp_systemid *);
171 static void	lacp_port_enable(struct lacp_port *);
172 static void	lacp_port_disable(struct lacp_port *);
173 static void	lacp_select(struct lacp_port *);
174 static void	lacp_unselect(struct lacp_port *);
175 static void	lacp_disable_collecting(struct lacp_port *);
176 static void	lacp_enable_collecting(struct lacp_port *);
177 static void	lacp_disable_distributing(struct lacp_port *);
178 static void	lacp_enable_distributing(struct lacp_port *);
179 static int	lacp_xmit_lacpdu(struct lacp_port *);
180 static int	lacp_xmit_marker(struct lacp_port *);
181 
182 /* Debugging */
183 
184 static void	lacp_dump_lacpdu(const struct lacpdu *);
185 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
186 		    size_t);
187 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
188 		    const struct lacp_peerinfo *, char *, size_t);
189 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
190 		    char *, size_t);
191 static const char *lacp_format_state(uint8_t, char *, size_t);
192 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
193 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
194 		    size_t);
195 static const char *lacp_format_portid(const struct lacp_portid *, char *,
196 		    size_t);
197 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
198 		    __attribute__((__format__(__printf__, 2, 3)));
199 
200 VNET_DEFINE_STATIC(int, lacp_debug);
201 #define	V_lacp_debug	VNET(lacp_debug)
202 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
203     "ieee802.3ad");
204 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
205     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
206 
207 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1;
208 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode,
209     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0,
210     "LACP strict protocol compliance default");
211 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
212 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
213 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
214 
215 /*
216  * partner administration variables.
217  * XXX should be configurable.
218  */
219 
220 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
221 	.lip_systemid = { .lsi_prio = 0xffff },
222 	.lip_portid = { .lpi_prio = 0xffff },
223 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
224 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
225 };
226 
227 static const struct lacp_peerinfo lacp_partner_admin_strict = {
228 	.lip_systemid = { .lsi_prio = 0xffff },
229 	.lip_portid = { .lpi_prio = 0xffff },
230 	.lip_state = 0,
231 };
232 
233 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
234 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
235 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
236 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
237 };
238 
239 struct mbuf *
240 lacp_input(struct lagg_port *lgp, struct mbuf *m)
241 {
242 	struct lacp_port *lp = LACP_PORT(lgp);
243 	uint8_t subtype;
244 
245 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
246 		m_freem(m);
247 		return (NULL);
248 	}
249 
250 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
251 	switch (subtype) {
252 		case SLOWPROTOCOLS_SUBTYPE_LACP:
253 			lacp_pdu_input(lp, m);
254 			return (NULL);
255 
256 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
257 			lacp_marker_input(lp, m);
258 			return (NULL);
259 	}
260 
261 	/* Not a subtype we are interested in */
262 	return (m);
263 }
264 
265 /*
266  * lacp_pdu_input: process lacpdu
267  */
268 static int
269 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
270 {
271 	struct lacp_softc *lsc = lp->lp_lsc;
272 	struct lacpdu *du;
273 	int error = 0;
274 
275 	if (m->m_pkthdr.len != sizeof(*du)) {
276 		goto bad;
277 	}
278 
279 	if ((m->m_flags & M_MCAST) == 0) {
280 		goto bad;
281 	}
282 
283 	if (m->m_len < sizeof(*du)) {
284 		m = m_pullup(m, sizeof(*du));
285 		if (m == NULL) {
286 			return (ENOMEM);
287 		}
288 	}
289 
290 	du = mtod(m, struct lacpdu *);
291 
292 	if (memcmp(&du->ldu_eh.ether_dhost,
293 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
294 		goto bad;
295 	}
296 
297 	/*
298 	 * ignore the version for compatibility with
299 	 * the future protocol revisions.
300 	 */
301 #if 0
302 	if (du->ldu_sph.sph_version != 1) {
303 		goto bad;
304 	}
305 #endif
306 
307 	/*
308 	 * ignore tlv types for compatibility with
309 	 * the future protocol revisions.
310 	 */
311 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
312 	    lacp_info_tlv_template, FALSE)) {
313 		goto bad;
314 	}
315 
316         if (V_lacp_debug > 0) {
317 		lacp_dprintf(lp, "lacpdu receive\n");
318 		lacp_dump_lacpdu(du);
319 	}
320 
321 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
322 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
323 		goto bad;
324 	}
325 
326 	LACP_LOCK(lsc);
327 	lacp_sm_rx(lp, du);
328 	LACP_UNLOCK(lsc);
329 
330 	m_freem(m);
331 	return (error);
332 
333 bad:
334 	m_freem(m);
335 	return (EINVAL);
336 }
337 
338 static void
339 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
340 {
341 	struct lagg_port *lgp = lp->lp_lagg;
342 	struct lagg_softc *sc = lgp->lp_softc;
343 
344 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
345 	memcpy(&info->lip_systemid.lsi_mac,
346 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
347 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
348 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
349 	info->lip_state = lp->lp_state;
350 }
351 
352 static void
353 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
354 {
355 	struct ifnet *ifp = lp->lp_ifp;
356 
357 	/* Fill in the port index and system id (encoded as the MAC) */
358 	info->mi_rq_port = htons(ifp->if_index);
359 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
360 	info->mi_rq_xid = htonl(0);
361 }
362 
363 static int
364 lacp_xmit_lacpdu(struct lacp_port *lp)
365 {
366 	struct lagg_port *lgp = lp->lp_lagg;
367 	struct mbuf *m;
368 	struct lacpdu *du;
369 	int error;
370 
371 	LACP_LOCK_ASSERT(lp->lp_lsc);
372 
373 	m = m_gethdr(M_NOWAIT, MT_DATA);
374 	if (m == NULL) {
375 		return (ENOMEM);
376 	}
377 	m->m_len = m->m_pkthdr.len = sizeof(*du);
378 
379 	du = mtod(m, struct lacpdu *);
380 	memset(du, 0, sizeof(*du));
381 
382 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
383 	    ETHER_ADDR_LEN);
384 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
385 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
386 
387 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
388 	du->ldu_sph.sph_version = 1;
389 
390 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
391 	du->ldu_actor = lp->lp_actor;
392 
393 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
394 	    sizeof(du->ldu_partner));
395 	du->ldu_partner = lp->lp_partner;
396 
397 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
398 	    sizeof(du->ldu_collector));
399 	du->ldu_collector.lci_maxdelay = 0;
400 
401 	if (V_lacp_debug > 0) {
402 		lacp_dprintf(lp, "lacpdu transmit\n");
403 		lacp_dump_lacpdu(du);
404 	}
405 
406 	m->m_flags |= M_MCAST;
407 
408 	/*
409 	 * XXX should use higher priority queue.
410 	 * otherwise network congestion can break aggregation.
411 	 */
412 
413 	error = lagg_enqueue(lp->lp_ifp, m);
414 	return (error);
415 }
416 
417 static int
418 lacp_xmit_marker(struct lacp_port *lp)
419 {
420 	struct lagg_port *lgp = lp->lp_lagg;
421 	struct mbuf *m;
422 	struct markerdu *mdu;
423 	int error;
424 
425 	LACP_LOCK_ASSERT(lp->lp_lsc);
426 
427 	m = m_gethdr(M_NOWAIT, MT_DATA);
428 	if (m == NULL) {
429 		return (ENOMEM);
430 	}
431 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
432 
433 	mdu = mtod(m, struct markerdu *);
434 	memset(mdu, 0, sizeof(*mdu));
435 
436 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
437 	    ETHER_ADDR_LEN);
438 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
439 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
440 
441 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
442 	mdu->mdu_sph.sph_version = 1;
443 
444 	/* Bump the transaction id and copy over the marker info */
445 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
446 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
447 	mdu->mdu_info = lp->lp_marker;
448 
449 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
450 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
451 	    ntohl(mdu->mdu_info.mi_rq_xid)));
452 
453 	m->m_flags |= M_MCAST;
454 	error = lagg_enqueue(lp->lp_ifp, m);
455 	return (error);
456 }
457 
458 void
459 lacp_linkstate(struct lagg_port *lgp)
460 {
461 	struct lacp_port *lp = LACP_PORT(lgp);
462 	struct lacp_softc *lsc = lp->lp_lsc;
463 	struct ifnet *ifp = lgp->lp_ifp;
464 	struct ifmediareq ifmr;
465 	int error = 0;
466 	u_int media;
467 	uint8_t old_state;
468 	uint16_t old_key;
469 
470 	bzero((char *)&ifmr, sizeof(ifmr));
471 	error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr);
472 	if (error != 0) {
473 		bzero((char *)&ifmr, sizeof(ifmr));
474 		error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
475 	}
476 	if (error != 0)
477 		return;
478 
479 	LACP_LOCK(lsc);
480 	media = ifmr.ifm_active;
481 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
482 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
483 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
484 	old_state = lp->lp_state;
485 	old_key = lp->lp_key;
486 
487 	lp->lp_media = media;
488 	/*
489 	 * If the port is not an active full duplex Ethernet link then it can
490 	 * not be aggregated.
491 	 */
492 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
493 	    ifp->if_link_state != LINK_STATE_UP) {
494 		lacp_port_disable(lp);
495 	} else {
496 		lacp_port_enable(lp);
497 	}
498 	lp->lp_key = lacp_compose_key(lp);
499 
500 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
501 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
502 		lp->lp_selected = LACP_UNSELECTED;
503 	}
504 	LACP_UNLOCK(lsc);
505 }
506 
507 static void
508 lacp_tick(void *arg)
509 {
510 	struct lacp_softc *lsc = arg;
511 	struct lacp_port *lp;
512 
513 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
514 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
515 			continue;
516 
517 		CURVNET_SET(lp->lp_ifp->if_vnet);
518 		lacp_run_timers(lp);
519 
520 		lacp_select(lp);
521 		lacp_sm_mux(lp);
522 		lacp_sm_tx(lp);
523 		lacp_sm_ptx_tx_schedule(lp);
524 		CURVNET_RESTORE();
525 	}
526 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
527 }
528 
529 int
530 lacp_port_create(struct lagg_port *lgp)
531 {
532 	struct lagg_softc *sc = lgp->lp_softc;
533 	struct lacp_softc *lsc = LACP_SOFTC(sc);
534 	struct lacp_port *lp;
535 	struct ifnet *ifp = lgp->lp_ifp;
536 	struct sockaddr_dl sdl;
537 	struct ifmultiaddr *rifma = NULL;
538 	int error;
539 
540 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
541 	sdl.sdl_alen = ETHER_ADDR_LEN;
542 
543 	bcopy(&ethermulticastaddr_slowprotocols,
544 	    LLADDR(&sdl), ETHER_ADDR_LEN);
545 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
546 	if (error) {
547 		printf("%s: ADDMULTI failed on %s\n", __func__,
548 		    lgp->lp_ifp->if_xname);
549 		return (error);
550 	}
551 
552 	lp = malloc(sizeof(struct lacp_port),
553 	    M_DEVBUF, M_NOWAIT|M_ZERO);
554 	if (lp == NULL)
555 		return (ENOMEM);
556 
557 	LACP_LOCK(lsc);
558 	lgp->lp_psc = lp;
559 	lp->lp_ifp = ifp;
560 	lp->lp_lagg = lgp;
561 	lp->lp_lsc = lsc;
562 	lp->lp_ifma = rifma;
563 
564 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
565 
566 	lacp_fill_actorinfo(lp, &lp->lp_actor);
567 	lacp_fill_markerinfo(lp, &lp->lp_marker);
568 	lp->lp_state = LACP_STATE_ACTIVITY;
569 	lp->lp_aggregator = NULL;
570 	lacp_sm_rx_set_expired(lp);
571 	LACP_UNLOCK(lsc);
572 	lacp_linkstate(lgp);
573 
574 	return (0);
575 }
576 
577 void
578 lacp_port_destroy(struct lagg_port *lgp)
579 {
580 	struct lacp_port *lp = LACP_PORT(lgp);
581 	struct lacp_softc *lsc = lp->lp_lsc;
582 	int i;
583 
584 	LACP_LOCK(lsc);
585 	for (i = 0; i < LACP_NTIMER; i++) {
586 		LACP_TIMER_DISARM(lp, i);
587 	}
588 
589 	lacp_disable_collecting(lp);
590 	lacp_disable_distributing(lp);
591 	lacp_unselect(lp);
592 
593 	LIST_REMOVE(lp, lp_next);
594 	LACP_UNLOCK(lsc);
595 
596 	/* The address may have already been removed by if_purgemaddrs() */
597 	if (!lgp->lp_detaching)
598 		if_delmulti_ifma(lp->lp_ifma);
599 
600 	free(lp, M_DEVBUF);
601 }
602 
603 void
604 lacp_req(struct lagg_softc *sc, void *data)
605 {
606 	struct lacp_opreq *req = (struct lacp_opreq *)data;
607 	struct lacp_softc *lsc = LACP_SOFTC(sc);
608 	struct lacp_aggregator *la;
609 
610 	bzero(req, sizeof(struct lacp_opreq));
611 
612 	/*
613 	 * If the LACP softc is NULL, return with the opreq structure full of
614 	 * zeros.  It is normal for the softc to be NULL while the lagg is
615 	 * being destroyed.
616 	 */
617 	if (NULL == lsc)
618 		return;
619 
620 	la = lsc->lsc_active_aggregator;
621 	LACP_LOCK(lsc);
622 	if (la != NULL) {
623 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
624 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
625 		    ETHER_ADDR_LEN);
626 		req->actor_key = ntohs(la->la_actor.lip_key);
627 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
628 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
629 		req->actor_state = la->la_actor.lip_state;
630 
631 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
632 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
633 		    ETHER_ADDR_LEN);
634 		req->partner_key = ntohs(la->la_partner.lip_key);
635 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
636 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
637 		req->partner_state = la->la_partner.lip_state;
638 	}
639 	LACP_UNLOCK(lsc);
640 }
641 
642 void
643 lacp_portreq(struct lagg_port *lgp, void *data)
644 {
645 	struct lacp_opreq *req = (struct lacp_opreq *)data;
646 	struct lacp_port *lp = LACP_PORT(lgp);
647 	struct lacp_softc *lsc = lp->lp_lsc;
648 
649 	LACP_LOCK(lsc);
650 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
651 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
652 	    ETHER_ADDR_LEN);
653 	req->actor_key = ntohs(lp->lp_actor.lip_key);
654 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
655 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
656 	req->actor_state = lp->lp_actor.lip_state;
657 
658 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
659 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
660 	    ETHER_ADDR_LEN);
661 	req->partner_key = ntohs(lp->lp_partner.lip_key);
662 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
663 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
664 	req->partner_state = lp->lp_partner.lip_state;
665 	LACP_UNLOCK(lsc);
666 }
667 
668 static void
669 lacp_disable_collecting(struct lacp_port *lp)
670 {
671 	LACP_DPRINTF((lp, "collecting disabled\n"));
672 	lp->lp_state &= ~LACP_STATE_COLLECTING;
673 }
674 
675 static void
676 lacp_enable_collecting(struct lacp_port *lp)
677 {
678 	LACP_DPRINTF((lp, "collecting enabled\n"));
679 	lp->lp_state |= LACP_STATE_COLLECTING;
680 }
681 
682 static void
683 lacp_disable_distributing(struct lacp_port *lp)
684 {
685 	struct lacp_aggregator *la = lp->lp_aggregator;
686 	struct lacp_softc *lsc = lp->lp_lsc;
687 	struct lagg_softc *sc = lsc->lsc_softc;
688 	char buf[LACP_LAGIDSTR_MAX+1];
689 
690 	LACP_LOCK_ASSERT(lsc);
691 
692 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
693 		return;
694 	}
695 
696 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
697 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
698 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
699 
700 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
701 	    "nports %d -> %d\n",
702 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
703 	    la->la_nports, la->la_nports - 1));
704 
705 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
706 	la->la_nports--;
707 	sc->sc_active = la->la_nports;
708 
709 	if (lsc->lsc_active_aggregator == la) {
710 		lacp_suppress_distributing(lsc, la);
711 		lacp_select_active_aggregator(lsc);
712 		/* regenerate the port map, the active aggregator has changed */
713 		lacp_update_portmap(lsc);
714 	}
715 
716 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
717 	if_link_state_change(sc->sc_ifp,
718 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
719 }
720 
721 static void
722 lacp_enable_distributing(struct lacp_port *lp)
723 {
724 	struct lacp_aggregator *la = lp->lp_aggregator;
725 	struct lacp_softc *lsc = lp->lp_lsc;
726 	struct lagg_softc *sc = lsc->lsc_softc;
727 	char buf[LACP_LAGIDSTR_MAX+1];
728 
729 	LACP_LOCK_ASSERT(lsc);
730 
731 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
732 		return;
733 	}
734 
735 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
736 	    "nports %d -> %d\n",
737 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
738 	    la->la_nports, la->la_nports + 1));
739 
740 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
741 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
742 	la->la_nports++;
743 	sc->sc_active = la->la_nports;
744 
745 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
746 
747 	if (lsc->lsc_active_aggregator == la) {
748 		lacp_suppress_distributing(lsc, la);
749 		lacp_update_portmap(lsc);
750 	} else
751 		/* try to become the active aggregator */
752 		lacp_select_active_aggregator(lsc);
753 
754 	if_link_state_change(sc->sc_ifp,
755 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
756 }
757 
758 static void
759 lacp_transit_expire(void *vp)
760 {
761 	struct lacp_softc *lsc = vp;
762 
763 	LACP_LOCK_ASSERT(lsc);
764 
765 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
766 	LACP_TRACE(NULL);
767 	CURVNET_RESTORE();
768 
769 	lsc->lsc_suppress_distributing = FALSE;
770 }
771 
772 void
773 lacp_attach(struct lagg_softc *sc)
774 {
775 	struct lacp_softc *lsc;
776 
777 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
778 
779 	sc->sc_psc = lsc;
780 	lsc->lsc_softc = sc;
781 
782 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
783 	lsc->lsc_active_aggregator = NULL;
784 	lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
785 	LACP_LOCK_INIT(lsc);
786 	TAILQ_INIT(&lsc->lsc_aggregators);
787 	LIST_INIT(&lsc->lsc_ports);
788 
789 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
790 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
791 
792 	/* if the lagg is already up then do the same */
793 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
794 		lacp_init(sc);
795 }
796 
797 void
798 lacp_detach(void *psc)
799 {
800 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
801 
802 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
803 	    ("aggregators still active"));
804 	KASSERT(lsc->lsc_active_aggregator == NULL,
805 	    ("aggregator still attached"));
806 
807 	callout_drain(&lsc->lsc_transit_callout);
808 	callout_drain(&lsc->lsc_callout);
809 
810 	LACP_LOCK_DESTROY(lsc);
811 	free(lsc, M_DEVBUF);
812 }
813 
814 void
815 lacp_init(struct lagg_softc *sc)
816 {
817 	struct lacp_softc *lsc = LACP_SOFTC(sc);
818 
819 	LACP_LOCK(lsc);
820 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
821 	LACP_UNLOCK(lsc);
822 }
823 
824 void
825 lacp_stop(struct lagg_softc *sc)
826 {
827 	struct lacp_softc *lsc = LACP_SOFTC(sc);
828 
829 	LACP_LOCK(lsc);
830 	callout_stop(&lsc->lsc_transit_callout);
831 	callout_stop(&lsc->lsc_callout);
832 	LACP_UNLOCK(lsc);
833 }
834 
835 struct lagg_port *
836 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t hash,
837     uint8_t numa_domain, int *err)
838 {
839 	struct lacp_softc *lsc = LACP_SOFTC(sc);
840 	struct lacp_portmap *pm;
841 	struct lacp_port *lp;
842 	struct lacp_port **map;
843 	int count;
844 
845 	if (__predict_false(lsc->lsc_suppress_distributing)) {
846 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
847 		*err = ENOBUFS;
848 		return (NULL);
849 	}
850 
851 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
852 	if (pm->pm_count == 0) {
853 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
854 		*err = ENETDOWN;
855 		return (NULL);
856 	}
857 
858 #ifdef NUMA
859 	if ((sc->sc_opts & LAGG_OPT_USE_NUMA) &&
860 	    pm->pm_num_dom > 1 && numa_domain < MAXMEMDOM) {
861 		count = pm->pm_numa[numa_domain].count;
862 		if (count > 0) {
863 			map = pm->pm_numa[numa_domain].map;
864 		} else {
865 			/* No ports on this domain; use global hash. */
866 			map = pm->pm_map;
867 			count = pm->pm_count;
868 		}
869 	} else
870 #endif
871 	{
872 		map = pm->pm_map;
873 		count = pm->pm_count;
874 	}
875 
876 	hash %= count;
877 	lp = map[hash];
878 
879 	KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
880 	    ("aggregated port is not distributing"));
881 
882 	return (lp->lp_lagg);
883 }
884 
885 struct lagg_port *
886 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m, int *err)
887 {
888 	struct lacp_softc *lsc = LACP_SOFTC(sc);
889 	uint32_t hash;
890 	uint8_t numa_domain;
891 
892 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
893 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
894 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
895 	else
896 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
897 
898 	numa_domain = m->m_pkthdr.numa_domain;
899 	return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, err));
900 }
901 
902 /*
903  * lacp_suppress_distributing: drop transmit packets for a while
904  * to preserve packet ordering.
905  */
906 
907 static void
908 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
909 {
910 	struct lacp_port *lp;
911 
912 	if (lsc->lsc_active_aggregator != la) {
913 		return;
914 	}
915 
916 	LACP_TRACE(NULL);
917 
918 	lsc->lsc_suppress_distributing = TRUE;
919 
920 	/* send a marker frame down each port to verify the queues are empty */
921 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
922 		lp->lp_flags |= LACP_PORT_MARK;
923 		if (lacp_xmit_marker(lp) != 0)
924 			lp->lp_flags &= ~LACP_PORT_MARK;
925 	}
926 
927 	/* set a timeout for the marker frames */
928 	callout_reset(&lsc->lsc_transit_callout,
929 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
930 }
931 
932 static int
933 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
934     const struct lacp_peerinfo *b)
935 {
936 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
937 }
938 
939 static int
940 lacp_compare_systemid(const struct lacp_systemid *a,
941     const struct lacp_systemid *b)
942 {
943 	return (memcmp(a, b, sizeof(*a)));
944 }
945 
946 #if 0	/* unused */
947 static int
948 lacp_compare_portid(const struct lacp_portid *a,
949     const struct lacp_portid *b)
950 {
951 	return (memcmp(a, b, sizeof(*a)));
952 }
953 #endif
954 
955 static uint64_t
956 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
957 {
958 	struct lacp_port *lp;
959 	uint64_t speed;
960 
961 	lp = TAILQ_FIRST(&la->la_ports);
962 	if (lp == NULL) {
963 		return (0);
964 	}
965 
966 	speed = ifmedia_baudrate(lp->lp_media);
967 	speed *= la->la_nports;
968 	if (speed == 0) {
969 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
970 		    lp->lp_media, la->la_nports));
971 	}
972 
973 	return (speed);
974 }
975 
976 /*
977  * lacp_select_active_aggregator: select an aggregator to be used to transmit
978  * packets from lagg(4) interface.
979  */
980 
981 static void
982 lacp_select_active_aggregator(struct lacp_softc *lsc)
983 {
984 	struct lacp_aggregator *la;
985 	struct lacp_aggregator *best_la = NULL;
986 	uint64_t best_speed = 0;
987 	char buf[LACP_LAGIDSTR_MAX+1];
988 
989 	LACP_TRACE(NULL);
990 
991 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
992 		uint64_t speed;
993 
994 		if (la->la_nports == 0) {
995 			continue;
996 		}
997 
998 		speed = lacp_aggregator_bandwidth(la);
999 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
1000 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
1001 		    speed, la->la_nports));
1002 
1003 		/*
1004 		 * This aggregator is chosen if the partner has a better
1005 		 * system priority or, the total aggregated speed is higher
1006 		 * or, it is already the chosen aggregator
1007 		 */
1008 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
1009 		    LACP_SYS_PRI(best_la->la_partner)) ||
1010 		    speed > best_speed ||
1011 		    (speed == best_speed &&
1012 		    la == lsc->lsc_active_aggregator)) {
1013 			best_la = la;
1014 			best_speed = speed;
1015 		}
1016 	}
1017 
1018 	KASSERT(best_la == NULL || best_la->la_nports > 0,
1019 	    ("invalid aggregator refcnt"));
1020 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1021 	    ("invalid aggregator list"));
1022 
1023 	if (lsc->lsc_active_aggregator != best_la) {
1024 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
1025 		LACP_DPRINTF((NULL, "old %s\n",
1026 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1027 		    buf, sizeof(buf))));
1028 	} else {
1029 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1030 	}
1031 	LACP_DPRINTF((NULL, "new %s\n",
1032 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1033 
1034 	if (lsc->lsc_active_aggregator != best_la) {
1035 		lsc->lsc_active_aggregator = best_la;
1036 		lacp_update_portmap(lsc);
1037 		if (best_la) {
1038 			lacp_suppress_distributing(lsc, best_la);
1039 		}
1040 	}
1041 }
1042 
1043 /*
1044  * Updated the inactive portmap array with the new list of ports and
1045  * make it live.
1046  */
1047 static void
1048 lacp_update_portmap(struct lacp_softc *lsc)
1049 {
1050 	struct lagg_softc *sc = lsc->lsc_softc;
1051 	struct lacp_aggregator *la;
1052 	struct lacp_portmap *p;
1053 	struct lacp_port *lp;
1054 	uint64_t speed;
1055 	u_int newmap;
1056 	int i;
1057 #ifdef NUMA
1058 	int count;
1059 	uint8_t domain;
1060 #endif
1061 
1062 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1063 	p = &lsc->lsc_pmap[newmap];
1064 	la = lsc->lsc_active_aggregator;
1065 	speed = 0;
1066 	bzero(p, sizeof(struct lacp_portmap));
1067 
1068 	if (la != NULL && la->la_nports > 0) {
1069 		p->pm_count = la->la_nports;
1070 		i = 0;
1071 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) {
1072 			p->pm_map[i++] = lp;
1073 #ifdef NUMA
1074 			domain = lp->lp_ifp->if_numa_domain;
1075 			if (domain >= MAXMEMDOM)
1076 				continue;
1077 			count = p->pm_numa[domain].count;
1078 			p->pm_numa[domain].map[count] = lp;
1079 			p->pm_numa[domain].count++;
1080 #endif
1081 		}
1082 		KASSERT(i == p->pm_count, ("Invalid port count"));
1083 
1084 #ifdef NUMA
1085 		for (i = 0; i < MAXMEMDOM; i++) {
1086 			if (p->pm_numa[i].count != 0)
1087 				p->pm_num_dom++;
1088 		}
1089 #endif
1090 		speed = lacp_aggregator_bandwidth(la);
1091 	}
1092 	sc->sc_ifp->if_baudrate = speed;
1093 	EVENTHANDLER_INVOKE(ifnet_event, sc->sc_ifp,
1094 	    IFNET_EVENT_UPDATE_BAUDRATE);
1095 
1096 	/* switch the active portmap over */
1097 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1098 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1099 		    lsc->lsc_activemap,
1100 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1101 }
1102 
1103 static uint16_t
1104 lacp_compose_key(struct lacp_port *lp)
1105 {
1106 	struct lagg_port *lgp = lp->lp_lagg;
1107 	struct lagg_softc *sc = lgp->lp_softc;
1108 	u_int media = lp->lp_media;
1109 	uint16_t key;
1110 
1111 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1112 		/*
1113 		 * non-aggregatable links should have unique keys.
1114 		 *
1115 		 * XXX this isn't really unique as if_index is 16 bit.
1116 		 */
1117 
1118 		/* bit 0..14:	(some bits of) if_index of this port */
1119 		key = lp->lp_ifp->if_index;
1120 		/* bit 15:	1 */
1121 		key |= 0x8000;
1122 	} else {
1123 		u_int subtype = IFM_SUBTYPE(media);
1124 
1125 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1126 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1127 
1128 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1129 		switch (subtype) {
1130 		case IFM_10_T:
1131 		case IFM_10_2:
1132 		case IFM_10_5:
1133 		case IFM_10_STP:
1134 		case IFM_10_FL:
1135 			key = IFM_10_T;
1136 			break;
1137 		case IFM_100_TX:
1138 		case IFM_100_FX:
1139 		case IFM_100_T4:
1140 		case IFM_100_VG:
1141 		case IFM_100_T2:
1142 		case IFM_100_T:
1143 		case IFM_100_SGMII:
1144 			key = IFM_100_TX;
1145 			break;
1146 		case IFM_1000_SX:
1147 		case IFM_1000_LX:
1148 		case IFM_1000_CX:
1149 		case IFM_1000_T:
1150 		case IFM_1000_KX:
1151 		case IFM_1000_SGMII:
1152 		case IFM_1000_CX_SGMII:
1153 			key = IFM_1000_SX;
1154 			break;
1155 		case IFM_10G_LR:
1156 		case IFM_10G_SR:
1157 		case IFM_10G_CX4:
1158 		case IFM_10G_TWINAX:
1159 		case IFM_10G_TWINAX_LONG:
1160 		case IFM_10G_LRM:
1161 		case IFM_10G_T:
1162 		case IFM_10G_KX4:
1163 		case IFM_10G_KR:
1164 		case IFM_10G_CR1:
1165 		case IFM_10G_ER:
1166 		case IFM_10G_SFI:
1167 		case IFM_10G_AOC:
1168 			key = IFM_10G_LR;
1169 			break;
1170 		case IFM_20G_KR2:
1171 			key = IFM_20G_KR2;
1172 			break;
1173 		case IFM_2500_KX:
1174 		case IFM_2500_T:
1175 		case IFM_2500_X:
1176 			key = IFM_2500_KX;
1177 			break;
1178 		case IFM_5000_T:
1179 		case IFM_5000_KR:
1180 		case IFM_5000_KR_S:
1181 		case IFM_5000_KR1:
1182 			key = IFM_5000_T;
1183 			break;
1184 		case IFM_50G_PCIE:
1185 		case IFM_50G_CR2:
1186 		case IFM_50G_KR2:
1187 		case IFM_50G_KR4:
1188 		case IFM_50G_SR2:
1189 		case IFM_50G_LR2:
1190 		case IFM_50G_LAUI2_AC:
1191 		case IFM_50G_LAUI2:
1192 		case IFM_50G_AUI2_AC:
1193 		case IFM_50G_AUI2:
1194 		case IFM_50G_CP:
1195 		case IFM_50G_SR:
1196 		case IFM_50G_LR:
1197 		case IFM_50G_FR:
1198 		case IFM_50G_KR_PAM4:
1199 		case IFM_50G_AUI1_AC:
1200 		case IFM_50G_AUI1:
1201 			key = IFM_50G_PCIE;
1202 			break;
1203 		case IFM_56G_R4:
1204 			key = IFM_56G_R4;
1205 			break;
1206 		case IFM_25G_PCIE:
1207 		case IFM_25G_CR:
1208 		case IFM_25G_KR:
1209 		case IFM_25G_SR:
1210 		case IFM_25G_LR:
1211 		case IFM_25G_ACC:
1212 		case IFM_25G_AOC:
1213 		case IFM_25G_T:
1214 		case IFM_25G_CR_S:
1215 		case IFM_25G_CR1:
1216 		case IFM_25G_KR_S:
1217 		case IFM_25G_AUI:
1218 		case IFM_25G_KR1:
1219 			key = IFM_25G_PCIE;
1220 			break;
1221 		case IFM_40G_CR4:
1222 		case IFM_40G_SR4:
1223 		case IFM_40G_LR4:
1224 		case IFM_40G_LM4:
1225 		case IFM_40G_XLPPI:
1226 		case IFM_40G_KR4:
1227 		case IFM_40G_XLAUI:
1228 		case IFM_40G_XLAUI_AC:
1229 		case IFM_40G_ER4:
1230 			key = IFM_40G_CR4;
1231 			break;
1232 		case IFM_100G_CR4:
1233 		case IFM_100G_SR4:
1234 		case IFM_100G_KR4:
1235 		case IFM_100G_LR4:
1236 		case IFM_100G_CAUI4_AC:
1237 		case IFM_100G_CAUI4:
1238 		case IFM_100G_AUI4_AC:
1239 		case IFM_100G_AUI4:
1240 		case IFM_100G_CR_PAM4:
1241 		case IFM_100G_KR_PAM4:
1242 		case IFM_100G_CP2:
1243 		case IFM_100G_SR2:
1244 		case IFM_100G_DR:
1245 		case IFM_100G_KR2_PAM4:
1246 		case IFM_100G_CAUI2_AC:
1247 		case IFM_100G_CAUI2:
1248 		case IFM_100G_AUI2_AC:
1249 		case IFM_100G_AUI2:
1250 			key = IFM_100G_CR4;
1251 			break;
1252 		case IFM_200G_CR4_PAM4:
1253 		case IFM_200G_SR4:
1254 		case IFM_200G_FR4:
1255 		case IFM_200G_LR4:
1256 		case IFM_200G_DR4:
1257 		case IFM_200G_KR4_PAM4:
1258 		case IFM_200G_AUI4_AC:
1259 		case IFM_200G_AUI4:
1260 		case IFM_200G_AUI8_AC:
1261 		case IFM_200G_AUI8:
1262 			key = IFM_200G_CR4_PAM4;
1263 			break;
1264 		case IFM_400G_FR8:
1265 		case IFM_400G_LR8:
1266 		case IFM_400G_DR4:
1267 		case IFM_400G_AUI8_AC:
1268 		case IFM_400G_AUI8:
1269 			key = IFM_400G_FR8;
1270 			break;
1271 		default:
1272 			key = subtype;
1273 			break;
1274 		}
1275 		/* bit 5..14:	(some bits of) if_index of lagg device */
1276 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1277 		/* bit 15:	0 */
1278 	}
1279 	return (htons(key));
1280 }
1281 
1282 static void
1283 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1284 {
1285 	char buf[LACP_LAGIDSTR_MAX+1];
1286 
1287 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1288 	    __func__,
1289 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1290 	    buf, sizeof(buf)),
1291 	    la->la_refcnt, la->la_refcnt + 1));
1292 
1293 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1294 	la->la_refcnt++;
1295 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1296 }
1297 
1298 static void
1299 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1300 {
1301 	char buf[LACP_LAGIDSTR_MAX+1];
1302 
1303 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1304 	    __func__,
1305 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1306 	    buf, sizeof(buf)),
1307 	    la->la_refcnt, la->la_refcnt - 1));
1308 
1309 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1310 	la->la_refcnt--;
1311 	if (la->la_refcnt > 0) {
1312 		return;
1313 	}
1314 
1315 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1316 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1317 
1318 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1319 
1320 	free(la, M_DEVBUF);
1321 }
1322 
1323 /*
1324  * lacp_aggregator_get: allocate an aggregator.
1325  */
1326 
1327 static struct lacp_aggregator *
1328 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1329 {
1330 	struct lacp_aggregator *la;
1331 
1332 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1333 	if (la) {
1334 		la->la_refcnt = 1;
1335 		la->la_nports = 0;
1336 		TAILQ_INIT(&la->la_ports);
1337 		la->la_pending = 0;
1338 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1339 	}
1340 
1341 	return (la);
1342 }
1343 
1344 /*
1345  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1346  */
1347 
1348 static void
1349 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1350 {
1351 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1352 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1353 
1354 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1355 }
1356 
1357 static void
1358 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1359     const struct lacp_peerinfo *lpi_port)
1360 {
1361 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1362 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1363 	lpi_aggr->lip_key = lpi_port->lip_key;
1364 }
1365 
1366 /*
1367  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1368  */
1369 
1370 static int
1371 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1372     const struct lacp_port *lp)
1373 {
1374 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1375 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1376 		return (0);
1377 	}
1378 
1379 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1380 		return (0);
1381 	}
1382 
1383 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1384 		return (0);
1385 	}
1386 
1387 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1388 		return (0);
1389 	}
1390 
1391 	return (1);
1392 }
1393 
1394 static int
1395 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1396     const struct lacp_peerinfo *b)
1397 {
1398 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1399 	    sizeof(a->lip_systemid))) {
1400 		return (0);
1401 	}
1402 
1403 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1404 		return (0);
1405 	}
1406 
1407 	return (1);
1408 }
1409 
1410 static void
1411 lacp_port_enable(struct lacp_port *lp)
1412 {
1413 	lp->lp_state |= LACP_STATE_AGGREGATION;
1414 }
1415 
1416 static void
1417 lacp_port_disable(struct lacp_port *lp)
1418 {
1419 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1420 
1421 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1422 	lp->lp_selected = LACP_UNSELECTED;
1423 	lacp_sm_rx_record_default(lp);
1424 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1425 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1426 }
1427 
1428 /*
1429  * lacp_select: select an aggregator.  create one if necessary.
1430  */
1431 static void
1432 lacp_select(struct lacp_port *lp)
1433 {
1434 	struct lacp_softc *lsc = lp->lp_lsc;
1435 	struct lacp_aggregator *la;
1436 	char buf[LACP_LAGIDSTR_MAX+1];
1437 
1438 	if (lp->lp_aggregator) {
1439 		return;
1440 	}
1441 
1442 	/* If we haven't heard from our peer, skip this step. */
1443 	if (lp->lp_state & LACP_STATE_DEFAULTED)
1444 		return;
1445 
1446 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1447 	    ("timer_wait_while still active"));
1448 
1449 	LACP_DPRINTF((lp, "port lagid=%s\n",
1450 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1451 	    buf, sizeof(buf))));
1452 
1453 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1454 		if (lacp_aggregator_is_compatible(la, lp)) {
1455 			break;
1456 		}
1457 	}
1458 
1459 	if (la == NULL) {
1460 		la = lacp_aggregator_get(lsc, lp);
1461 		if (la == NULL) {
1462 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1463 
1464 			/*
1465 			 * will retry on the next tick.
1466 			 */
1467 
1468 			return;
1469 		}
1470 		lacp_fill_aggregator_id(la, lp);
1471 		LACP_DPRINTF((lp, "aggregator created\n"));
1472 	} else {
1473 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1474 		if (la->la_refcnt == LACP_MAX_PORTS)
1475 			return;
1476 		lacp_aggregator_addref(lsc, la);
1477 	}
1478 
1479 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1480 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1481 	    buf, sizeof(buf))));
1482 
1483 	lp->lp_aggregator = la;
1484 	lp->lp_selected = LACP_SELECTED;
1485 }
1486 
1487 /*
1488  * lacp_unselect: finish unselect/detach process.
1489  */
1490 
1491 static void
1492 lacp_unselect(struct lacp_port *lp)
1493 {
1494 	struct lacp_softc *lsc = lp->lp_lsc;
1495 	struct lacp_aggregator *la = lp->lp_aggregator;
1496 
1497 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1498 	    ("timer_wait_while still active"));
1499 
1500 	if (la == NULL) {
1501 		return;
1502 	}
1503 
1504 	lp->lp_aggregator = NULL;
1505 	lacp_aggregator_delref(lsc, la);
1506 }
1507 
1508 /* mux machine */
1509 
1510 static void
1511 lacp_sm_mux(struct lacp_port *lp)
1512 {
1513 	struct lagg_port *lgp = lp->lp_lagg;
1514 	struct lagg_softc *sc = lgp->lp_softc;
1515 	enum lacp_mux_state new_state;
1516 	boolean_t p_sync =
1517 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1518 	boolean_t p_collecting =
1519 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1520 	enum lacp_selected selected = lp->lp_selected;
1521 	struct lacp_aggregator *la;
1522 
1523 	if (V_lacp_debug > 1)
1524 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1525 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1526 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1527 
1528 re_eval:
1529 	la = lp->lp_aggregator;
1530 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1531 	    ("MUX not detached"));
1532 	new_state = lp->lp_mux_state;
1533 	switch (lp->lp_mux_state) {
1534 	case LACP_MUX_DETACHED:
1535 		if (selected != LACP_UNSELECTED) {
1536 			new_state = LACP_MUX_WAITING;
1537 		}
1538 		break;
1539 	case LACP_MUX_WAITING:
1540 		KASSERT(la->la_pending > 0 ||
1541 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1542 		    ("timer_wait_while still active"));
1543 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1544 			new_state = LACP_MUX_ATTACHED;
1545 		} else if (selected == LACP_UNSELECTED) {
1546 			new_state = LACP_MUX_DETACHED;
1547 		}
1548 		break;
1549 	case LACP_MUX_ATTACHED:
1550 		if (selected == LACP_SELECTED && p_sync) {
1551 			new_state = LACP_MUX_COLLECTING;
1552 		} else if (selected != LACP_SELECTED) {
1553 			new_state = LACP_MUX_DETACHED;
1554 		}
1555 		break;
1556 	case LACP_MUX_COLLECTING:
1557 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1558 			new_state = LACP_MUX_DISTRIBUTING;
1559 		} else if (selected != LACP_SELECTED || !p_sync) {
1560 			new_state = LACP_MUX_ATTACHED;
1561 		}
1562 		break;
1563 	case LACP_MUX_DISTRIBUTING:
1564 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1565 			new_state = LACP_MUX_COLLECTING;
1566 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1567 			sc->sc_flapping++;
1568 		}
1569 		break;
1570 	default:
1571 		panic("%s: unknown state", __func__);
1572 	}
1573 
1574 	if (lp->lp_mux_state == new_state) {
1575 		return;
1576 	}
1577 
1578 	lacp_set_mux(lp, new_state);
1579 	goto re_eval;
1580 }
1581 
1582 static void
1583 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1584 {
1585 	struct lacp_aggregator *la = lp->lp_aggregator;
1586 
1587 	if (lp->lp_mux_state == new_state) {
1588 		return;
1589 	}
1590 
1591 	switch (new_state) {
1592 	case LACP_MUX_DETACHED:
1593 		lp->lp_state &= ~LACP_STATE_SYNC;
1594 		lacp_disable_distributing(lp);
1595 		lacp_disable_collecting(lp);
1596 		lacp_sm_assert_ntt(lp);
1597 		/* cancel timer */
1598 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1599 			KASSERT(la->la_pending > 0,
1600 			    ("timer_wait_while not active"));
1601 			la->la_pending--;
1602 		}
1603 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1604 		lacp_unselect(lp);
1605 		break;
1606 	case LACP_MUX_WAITING:
1607 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1608 		    LACP_AGGREGATE_WAIT_TIME);
1609 		la->la_pending++;
1610 		break;
1611 	case LACP_MUX_ATTACHED:
1612 		lp->lp_state |= LACP_STATE_SYNC;
1613 		lacp_disable_collecting(lp);
1614 		lacp_sm_assert_ntt(lp);
1615 		break;
1616 	case LACP_MUX_COLLECTING:
1617 		lacp_enable_collecting(lp);
1618 		lacp_disable_distributing(lp);
1619 		lacp_sm_assert_ntt(lp);
1620 		break;
1621 	case LACP_MUX_DISTRIBUTING:
1622 		lacp_enable_distributing(lp);
1623 		break;
1624 	default:
1625 		panic("%s: unknown state", __func__);
1626 	}
1627 
1628 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1629 
1630 	lp->lp_mux_state = new_state;
1631 }
1632 
1633 static void
1634 lacp_sm_mux_timer(struct lacp_port *lp)
1635 {
1636 	struct lacp_aggregator *la = lp->lp_aggregator;
1637 	char buf[LACP_LAGIDSTR_MAX+1];
1638 
1639 	KASSERT(la->la_pending > 0, ("no pending event"));
1640 
1641 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1642 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1643 	    buf, sizeof(buf)),
1644 	    la->la_pending, la->la_pending - 1));
1645 
1646 	la->la_pending--;
1647 }
1648 
1649 /* periodic transmit machine */
1650 
1651 static void
1652 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1653 {
1654 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1655 	    LACP_STATE_TIMEOUT)) {
1656 		return;
1657 	}
1658 
1659 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1660 
1661 	/*
1662 	 * FAST_PERIODIC -> SLOW_PERIODIC
1663 	 * or
1664 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1665 	 *
1666 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1667 	 */
1668 
1669 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1670 
1671 	/*
1672 	 * if timeout has been shortened, assert NTT.
1673 	 */
1674 
1675 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1676 		lacp_sm_assert_ntt(lp);
1677 	}
1678 }
1679 
1680 static void
1681 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1682 {
1683 	int timeout;
1684 
1685 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1686 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1687 		/*
1688 		 * NO_PERIODIC
1689 		 */
1690 
1691 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1692 		return;
1693 	}
1694 
1695 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1696 		return;
1697 	}
1698 
1699 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1700 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1701 
1702 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1703 }
1704 
1705 static void
1706 lacp_sm_ptx_timer(struct lacp_port *lp)
1707 {
1708 	lacp_sm_assert_ntt(lp);
1709 }
1710 
1711 static void
1712 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1713 {
1714 	int timeout;
1715 
1716 	/*
1717 	 * check LACP_DISABLED first
1718 	 */
1719 
1720 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1721 		return;
1722 	}
1723 
1724 	/*
1725 	 * check loopback condition.
1726 	 */
1727 
1728 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1729 	    &lp->lp_actor.lip_systemid)) {
1730 		return;
1731 	}
1732 
1733 	/*
1734 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1735 	 */
1736 
1737 	microuptime(&lp->lp_last_lacpdu_rx);
1738 	lacp_sm_rx_update_selected(lp, du);
1739 	lacp_sm_rx_update_ntt(lp, du);
1740 	lacp_sm_rx_record_pdu(lp, du);
1741 
1742 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1743 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1744 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1745 
1746 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1747 
1748 	/*
1749 	 * kick transmit machine without waiting the next tick.
1750 	 */
1751 
1752 	lacp_sm_tx(lp);
1753 }
1754 
1755 static void
1756 lacp_sm_rx_set_expired(struct lacp_port *lp)
1757 {
1758 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1759 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1760 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1761 	lp->lp_state |= LACP_STATE_EXPIRED;
1762 }
1763 
1764 static void
1765 lacp_sm_rx_timer(struct lacp_port *lp)
1766 {
1767 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1768 		/* CURRENT -> EXPIRED */
1769 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1770 		lacp_sm_rx_set_expired(lp);
1771 	} else {
1772 		/* EXPIRED -> DEFAULTED */
1773 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1774 		lacp_sm_rx_update_default_selected(lp);
1775 		lacp_sm_rx_record_default(lp);
1776 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1777 	}
1778 }
1779 
1780 static void
1781 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1782 {
1783 	boolean_t active;
1784 	uint8_t oldpstate;
1785 	char buf[LACP_STATESTR_MAX+1];
1786 
1787 	LACP_TRACE(lp);
1788 
1789 	oldpstate = lp->lp_partner.lip_state;
1790 
1791 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1792 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1793 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1794 
1795 	lp->lp_partner = du->ldu_actor;
1796 	if (active &&
1797 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1798 	    LACP_STATE_AGGREGATION) &&
1799 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1800 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1801 		/*
1802 		 * XXX Maintain legacy behavior of leaving the
1803 		 * LACP_STATE_SYNC bit unchanged from the partner's
1804 		 * advertisement if lsc_strict_mode is false.
1805 		 * TODO: We should re-examine the concept of the "strict mode"
1806 		 * to ensure it makes sense to maintain a non-strict mode.
1807 		 */
1808 		if (lp->lp_lsc->lsc_strict_mode)
1809 			lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1810 	} else {
1811 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1812 	}
1813 
1814 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1815 
1816 	if (oldpstate != lp->lp_partner.lip_state) {
1817 		LACP_DPRINTF((lp, "old pstate %s\n",
1818 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1819 		LACP_DPRINTF((lp, "new pstate %s\n",
1820 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1821 		    sizeof(buf))));
1822 	}
1823 
1824 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1825 }
1826 
1827 static void
1828 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1829 {
1830 
1831 	LACP_TRACE(lp);
1832 
1833 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1834 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1835 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1836 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1837 		lacp_sm_assert_ntt(lp);
1838 	}
1839 }
1840 
1841 static void
1842 lacp_sm_rx_record_default(struct lacp_port *lp)
1843 {
1844 	uint8_t oldpstate;
1845 
1846 	LACP_TRACE(lp);
1847 
1848 	oldpstate = lp->lp_partner.lip_state;
1849 	if (lp->lp_lsc->lsc_strict_mode)
1850 		lp->lp_partner = lacp_partner_admin_strict;
1851 	else
1852 		lp->lp_partner = lacp_partner_admin_optimistic;
1853 	lp->lp_state |= LACP_STATE_DEFAULTED;
1854 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1855 }
1856 
1857 static void
1858 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1859     const struct lacp_peerinfo *info)
1860 {
1861 
1862 	LACP_TRACE(lp);
1863 
1864 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1865 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1866 	    LACP_STATE_AGGREGATION)) {
1867 		lp->lp_selected = LACP_UNSELECTED;
1868 		/* mux machine will clean up lp->lp_aggregator */
1869 	}
1870 }
1871 
1872 static void
1873 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1874 {
1875 
1876 	LACP_TRACE(lp);
1877 
1878 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1879 }
1880 
1881 static void
1882 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1883 {
1884 
1885 	LACP_TRACE(lp);
1886 
1887 	if (lp->lp_lsc->lsc_strict_mode)
1888 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1889 		    &lacp_partner_admin_strict);
1890 	else
1891 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1892 		    &lacp_partner_admin_optimistic);
1893 }
1894 
1895 /* transmit machine */
1896 
1897 static void
1898 lacp_sm_tx(struct lacp_port *lp)
1899 {
1900 	int error = 0;
1901 
1902 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1903 #if 1
1904 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1905 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1906 #endif
1907 	    ) {
1908 		lp->lp_flags &= ~LACP_PORT_NTT;
1909 	}
1910 
1911 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1912 		return;
1913 	}
1914 
1915 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1916 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1917 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1918 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1919 		return;
1920 	}
1921 
1922 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1923 		error = lacp_xmit_lacpdu(lp);
1924 	} else {
1925 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1926 	}
1927 
1928 	if (error == 0) {
1929 		lp->lp_flags &= ~LACP_PORT_NTT;
1930 	} else {
1931 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1932 		    error));
1933 	}
1934 }
1935 
1936 static void
1937 lacp_sm_assert_ntt(struct lacp_port *lp)
1938 {
1939 
1940 	lp->lp_flags |= LACP_PORT_NTT;
1941 }
1942 
1943 static void
1944 lacp_run_timers(struct lacp_port *lp)
1945 {
1946 	int i;
1947 	struct timeval time_diff;
1948 
1949 	for (i = 0; i < LACP_NTIMER; i++) {
1950 		KASSERT(lp->lp_timer[i] >= 0,
1951 		    ("invalid timer value %d", lp->lp_timer[i]));
1952 		if (lp->lp_timer[i] == 0) {
1953 			continue;
1954 		} else {
1955 			if (i == LACP_TIMER_CURRENT_WHILE) {
1956 				microuptime(&time_diff);
1957 				timevalsub(&time_diff, &lp->lp_last_lacpdu_rx);
1958 				if (time_diff.tv_sec) {
1959 					/* At least one sec has elapsed since last LACP packet. */
1960 					--lp->lp_timer[i];
1961 				}
1962 			} else {
1963 				--lp->lp_timer[i];
1964 			}
1965 
1966 			if ((lp->lp_timer[i] <= 0) && (lacp_timer_funcs[i])) {
1967 				(*lacp_timer_funcs[i])(lp);
1968 			}
1969 		}
1970 	}
1971 }
1972 
1973 int
1974 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1975 {
1976 	struct lacp_softc *lsc = lp->lp_lsc;
1977 	struct lagg_port *lgp = lp->lp_lagg;
1978 	struct lacp_port *lp2;
1979 	struct markerdu *mdu;
1980 	int error = 0;
1981 	int pending = 0;
1982 
1983 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1984 		goto bad;
1985 	}
1986 
1987 	if ((m->m_flags & M_MCAST) == 0) {
1988 		goto bad;
1989 	}
1990 
1991 	if (m->m_len < sizeof(*mdu)) {
1992 		m = m_pullup(m, sizeof(*mdu));
1993 		if (m == NULL) {
1994 			return (ENOMEM);
1995 		}
1996 	}
1997 
1998 	mdu = mtod(m, struct markerdu *);
1999 
2000 	if (memcmp(&mdu->mdu_eh.ether_dhost,
2001 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
2002 		goto bad;
2003 	}
2004 
2005 	if (mdu->mdu_sph.sph_version != 1) {
2006 		goto bad;
2007 	}
2008 
2009 	switch (mdu->mdu_tlv.tlv_type) {
2010 	case MARKER_TYPE_INFO:
2011 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2012 		    marker_info_tlv_template, TRUE)) {
2013 			goto bad;
2014 		}
2015 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
2016 		memcpy(&mdu->mdu_eh.ether_dhost,
2017 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
2018 		memcpy(&mdu->mdu_eh.ether_shost,
2019 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
2020 		error = lagg_enqueue(lp->lp_ifp, m);
2021 		break;
2022 
2023 	case MARKER_TYPE_RESPONSE:
2024 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2025 		    marker_response_tlv_template, TRUE)) {
2026 			goto bad;
2027 		}
2028 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
2029 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
2030 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
2031 
2032 		/* Verify that it is the last marker we sent out */
2033 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
2034 		    sizeof(struct lacp_markerinfo)))
2035 			goto bad;
2036 
2037 		LACP_LOCK(lsc);
2038 		lp->lp_flags &= ~LACP_PORT_MARK;
2039 
2040 		if (lsc->lsc_suppress_distributing) {
2041 			/* Check if any ports are waiting for a response */
2042 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
2043 				if (lp2->lp_flags & LACP_PORT_MARK) {
2044 					pending = 1;
2045 					break;
2046 				}
2047 			}
2048 
2049 			if (pending == 0) {
2050 				/* All interface queues are clear */
2051 				LACP_DPRINTF((NULL, "queue flush complete\n"));
2052 				lsc->lsc_suppress_distributing = FALSE;
2053 			}
2054 		}
2055 		LACP_UNLOCK(lsc);
2056 		m_freem(m);
2057 		break;
2058 
2059 	default:
2060 		goto bad;
2061 	}
2062 
2063 	return (error);
2064 
2065 bad:
2066 	LACP_DPRINTF((lp, "bad marker frame\n"));
2067 	m_freem(m);
2068 	return (EINVAL);
2069 }
2070 
2071 static int
2072 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
2073     const struct tlv_template *tmpl, boolean_t check_type)
2074 {
2075 	while (/* CONSTCOND */ 1) {
2076 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
2077 			return (EINVAL);
2078 		}
2079 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
2080 		    tlv->tlv_length != tmpl->tmpl_length) {
2081 			return (EINVAL);
2082 		}
2083 		if (tmpl->tmpl_type == 0) {
2084 			break;
2085 		}
2086 		tlv = (const struct tlvhdr *)
2087 		    ((const char *)tlv + tlv->tlv_length);
2088 		tmpl++;
2089 	}
2090 
2091 	return (0);
2092 }
2093 
2094 /* Debugging */
2095 const char *
2096 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
2097 {
2098 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
2099 	    (int)mac[0],
2100 	    (int)mac[1],
2101 	    (int)mac[2],
2102 	    (int)mac[3],
2103 	    (int)mac[4],
2104 	    (int)mac[5]);
2105 
2106 	return (buf);
2107 }
2108 
2109 const char *
2110 lacp_format_systemid(const struct lacp_systemid *sysid,
2111     char *buf, size_t buflen)
2112 {
2113 	char macbuf[LACP_MACSTR_MAX+1];
2114 
2115 	snprintf(buf, buflen, "%04X,%s",
2116 	    ntohs(sysid->lsi_prio),
2117 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
2118 
2119 	return (buf);
2120 }
2121 
2122 const char *
2123 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
2124 {
2125 	snprintf(buf, buflen, "%04X,%04X",
2126 	    ntohs(portid->lpi_prio),
2127 	    ntohs(portid->lpi_portno));
2128 
2129 	return (buf);
2130 }
2131 
2132 const char *
2133 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2134 {
2135 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
2136 	char portid[LACP_PORTIDSTR_MAX+1];
2137 
2138 	snprintf(buf, buflen, "(%s,%04X,%s)",
2139 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2140 	    ntohs(peer->lip_key),
2141 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2142 
2143 	return (buf);
2144 }
2145 
2146 const char *
2147 lacp_format_lagid(const struct lacp_peerinfo *a,
2148     const struct lacp_peerinfo *b, char *buf, size_t buflen)
2149 {
2150 	char astr[LACP_PARTNERSTR_MAX+1];
2151 	char bstr[LACP_PARTNERSTR_MAX+1];
2152 
2153 #if 0
2154 	/*
2155 	 * there's a convention to display small numbered peer
2156 	 * in the left.
2157 	 */
2158 
2159 	if (lacp_compare_peerinfo(a, b) > 0) {
2160 		const struct lacp_peerinfo *t;
2161 
2162 		t = a;
2163 		a = b;
2164 		b = t;
2165 	}
2166 #endif
2167 
2168 	snprintf(buf, buflen, "[%s,%s]",
2169 	    lacp_format_partner(a, astr, sizeof(astr)),
2170 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2171 
2172 	return (buf);
2173 }
2174 
2175 const char *
2176 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2177     char *buf, size_t buflen)
2178 {
2179 	if (la == NULL) {
2180 		return ("(none)");
2181 	}
2182 
2183 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2184 }
2185 
2186 const char *
2187 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2188 {
2189 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2190 	return (buf);
2191 }
2192 
2193 static void
2194 lacp_dump_lacpdu(const struct lacpdu *du)
2195 {
2196 	char buf[LACP_PARTNERSTR_MAX+1];
2197 	char buf2[LACP_STATESTR_MAX+1];
2198 
2199 	printf("actor=%s\n",
2200 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2201 	printf("actor.state=%s\n",
2202 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2203 	printf("partner=%s\n",
2204 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2205 	printf("partner.state=%s\n",
2206 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2207 
2208 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2209 }
2210 
2211 static void
2212 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2213 {
2214 	va_list va;
2215 
2216 	if (lp) {
2217 		printf("%s: ", lp->lp_ifp->if_xname);
2218 	}
2219 
2220 	va_start(va, fmt);
2221 	vprintf(fmt, va);
2222 	va_end(va);
2223 }
2224