xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * Copyright (c)2005 YAMAMOTO Takashi,
5  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/callout.h>
35 #include <sys/eventhandler.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h> /* hz */
40 #include <sys/socket.h> /* for net/if.h */
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <machine/stdarg.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/taskqueue.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_dl.h>
51 #include <net/ethernet.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 
55 #include <net/if_lagg.h>
56 #include <net/ieee8023ad_lacp.h>
57 
58 /*
59  * actor system priority and port priority.
60  * XXX should be configurable.
61  */
62 
63 #define	LACP_SYSTEM_PRIO	0x8000
64 #define	LACP_PORT_PRIO		0x8000
65 
66 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
67     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
68 
69 static const struct tlv_template lacp_info_tlv_template[] = {
70 	{ LACP_TYPE_ACTORINFO,
71 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
72 	{ LACP_TYPE_PARTNERINFO,
73 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
74 	{ LACP_TYPE_COLLECTORINFO,
75 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
76 	{ 0, 0 },
77 };
78 
79 static const struct tlv_template marker_info_tlv_template[] = {
80 	{ MARKER_TYPE_INFO,
81 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
82 	{ 0, 0 },
83 };
84 
85 static const struct tlv_template marker_response_tlv_template[] = {
86 	{ MARKER_TYPE_RESPONSE,
87 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
88 	{ 0, 0 },
89 };
90 
91 typedef void (*lacp_timer_func_t)(struct lacp_port *);
92 
93 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
94 static void	lacp_fill_markerinfo(struct lacp_port *,
95 		    struct lacp_markerinfo *);
96 
97 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
98 static void	lacp_suppress_distributing(struct lacp_softc *,
99 		    struct lacp_aggregator *);
100 static void	lacp_transit_expire(void *);
101 static void	lacp_update_portmap(struct lacp_softc *);
102 static void	lacp_select_active_aggregator(struct lacp_softc *);
103 static uint16_t	lacp_compose_key(struct lacp_port *);
104 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
105 		    const struct tlv_template *, boolean_t);
106 static void	lacp_tick(void *);
107 
108 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
109 		    const struct lacp_port *);
110 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
111 		    const struct lacp_peerinfo *);
112 static int	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
113 		    const struct lacp_port *);
114 static int	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
115 		    const struct lacp_peerinfo *);
116 
117 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
118 		    struct lacp_port *);
119 static void	lacp_aggregator_addref(struct lacp_softc *,
120 		    struct lacp_aggregator *);
121 static void	lacp_aggregator_delref(struct lacp_softc *,
122 		    struct lacp_aggregator *);
123 
124 /* receive machine */
125 
126 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
127 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
128 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
129 static void	lacp_sm_rx_timer(struct lacp_port *);
130 static void	lacp_sm_rx_set_expired(struct lacp_port *);
131 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
132 		    const struct lacpdu *);
133 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
134 		    const struct lacpdu *);
135 static void	lacp_sm_rx_update_selected(struct lacp_port *,
136 		    const struct lacpdu *);
137 static void	lacp_sm_rx_record_default(struct lacp_port *);
138 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
139 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
140 		    const struct lacp_peerinfo *);
141 
142 /* mux machine */
143 
144 static void	lacp_sm_mux(struct lacp_port *);
145 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
146 static void	lacp_sm_mux_timer(struct lacp_port *);
147 
148 /* periodic transmit machine */
149 
150 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
151 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
152 static void	lacp_sm_ptx_timer(struct lacp_port *);
153 
154 /* transmit machine */
155 
156 static void	lacp_sm_tx(struct lacp_port *);
157 static void	lacp_sm_assert_ntt(struct lacp_port *);
158 
159 static void	lacp_run_timers(struct lacp_port *);
160 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
161 		    const struct lacp_peerinfo *);
162 static int	lacp_compare_systemid(const struct lacp_systemid *,
163 		    const struct lacp_systemid *);
164 static void	lacp_port_enable(struct lacp_port *);
165 static void	lacp_port_disable(struct lacp_port *);
166 static void	lacp_select(struct lacp_port *);
167 static void	lacp_unselect(struct lacp_port *);
168 static void	lacp_disable_collecting(struct lacp_port *);
169 static void	lacp_enable_collecting(struct lacp_port *);
170 static void	lacp_disable_distributing(struct lacp_port *);
171 static void	lacp_enable_distributing(struct lacp_port *);
172 static int	lacp_xmit_lacpdu(struct lacp_port *);
173 static int	lacp_xmit_marker(struct lacp_port *);
174 
175 /* Debugging */
176 
177 static void	lacp_dump_lacpdu(const struct lacpdu *);
178 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
179 		    size_t);
180 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
181 		    const struct lacp_peerinfo *, char *, size_t);
182 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
183 		    char *, size_t);
184 static const char *lacp_format_state(uint8_t, char *, size_t);
185 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
186 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
187 		    size_t);
188 static const char *lacp_format_portid(const struct lacp_portid *, char *,
189 		    size_t);
190 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
191 		    __attribute__((__format__(__printf__, 2, 3)));
192 
193 static VNET_DEFINE(int, lacp_debug);
194 #define	V_lacp_debug	VNET(lacp_debug)
195 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad");
196 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
197     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
198 
199 static VNET_DEFINE(int, lacp_default_strict_mode) = 1;
200 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, CTLFLAG_RWTUN,
201     &VNET_NAME(lacp_default_strict_mode), 0,
202     "LACP strict protocol compliance default");
203 
204 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
205 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
206 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
207 
208 /*
209  * partner administration variables.
210  * XXX should be configurable.
211  */
212 
213 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
214 	.lip_systemid = { .lsi_prio = 0xffff },
215 	.lip_portid = { .lpi_prio = 0xffff },
216 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
217 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
218 };
219 
220 static const struct lacp_peerinfo lacp_partner_admin_strict = {
221 	.lip_systemid = { .lsi_prio = 0xffff },
222 	.lip_portid = { .lpi_prio = 0xffff },
223 	.lip_state = 0,
224 };
225 
226 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
227 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
228 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
229 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
230 };
231 
232 struct mbuf *
233 lacp_input(struct lagg_port *lgp, struct mbuf *m)
234 {
235 	struct lacp_port *lp = LACP_PORT(lgp);
236 	uint8_t subtype;
237 
238 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
239 		m_freem(m);
240 		return (NULL);
241 	}
242 
243 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
244 	switch (subtype) {
245 		case SLOWPROTOCOLS_SUBTYPE_LACP:
246 			lacp_pdu_input(lp, m);
247 			return (NULL);
248 
249 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
250 			lacp_marker_input(lp, m);
251 			return (NULL);
252 	}
253 
254 	/* Not a subtype we are interested in */
255 	return (m);
256 }
257 
258 /*
259  * lacp_pdu_input: process lacpdu
260  */
261 static int
262 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
263 {
264 	struct lacp_softc *lsc = lp->lp_lsc;
265 	struct lacpdu *du;
266 	int error = 0;
267 
268 	if (m->m_pkthdr.len != sizeof(*du)) {
269 		goto bad;
270 	}
271 
272 	if ((m->m_flags & M_MCAST) == 0) {
273 		goto bad;
274 	}
275 
276 	if (m->m_len < sizeof(*du)) {
277 		m = m_pullup(m, sizeof(*du));
278 		if (m == NULL) {
279 			return (ENOMEM);
280 		}
281 	}
282 
283 	du = mtod(m, struct lacpdu *);
284 
285 	if (memcmp(&du->ldu_eh.ether_dhost,
286 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
287 		goto bad;
288 	}
289 
290 	/*
291 	 * ignore the version for compatibility with
292 	 * the future protocol revisions.
293 	 */
294 #if 0
295 	if (du->ldu_sph.sph_version != 1) {
296 		goto bad;
297 	}
298 #endif
299 
300 	/*
301 	 * ignore tlv types for compatibility with
302 	 * the future protocol revisions.
303 	 */
304 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
305 	    lacp_info_tlv_template, FALSE)) {
306 		goto bad;
307 	}
308 
309         if (V_lacp_debug > 0) {
310 		lacp_dprintf(lp, "lacpdu receive\n");
311 		lacp_dump_lacpdu(du);
312 	}
313 
314 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
315 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
316 		goto bad;
317 	}
318 
319 	LACP_LOCK(lsc);
320 	lacp_sm_rx(lp, du);
321 	LACP_UNLOCK(lsc);
322 
323 	m_freem(m);
324 	return (error);
325 
326 bad:
327 	m_freem(m);
328 	return (EINVAL);
329 }
330 
331 static void
332 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
333 {
334 	struct lagg_port *lgp = lp->lp_lagg;
335 	struct lagg_softc *sc = lgp->lp_softc;
336 
337 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
338 	memcpy(&info->lip_systemid.lsi_mac,
339 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
340 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
341 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
342 	info->lip_state = lp->lp_state;
343 }
344 
345 static void
346 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
347 {
348 	struct ifnet *ifp = lp->lp_ifp;
349 
350 	/* Fill in the port index and system id (encoded as the MAC) */
351 	info->mi_rq_port = htons(ifp->if_index);
352 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
353 	info->mi_rq_xid = htonl(0);
354 }
355 
356 static int
357 lacp_xmit_lacpdu(struct lacp_port *lp)
358 {
359 	struct lagg_port *lgp = lp->lp_lagg;
360 	struct mbuf *m;
361 	struct lacpdu *du;
362 	int error;
363 
364 	LACP_LOCK_ASSERT(lp->lp_lsc);
365 
366 	m = m_gethdr(M_NOWAIT, MT_DATA);
367 	if (m == NULL) {
368 		return (ENOMEM);
369 	}
370 	m->m_len = m->m_pkthdr.len = sizeof(*du);
371 
372 	du = mtod(m, struct lacpdu *);
373 	memset(du, 0, sizeof(*du));
374 
375 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
376 	    ETHER_ADDR_LEN);
377 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
378 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
379 
380 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
381 	du->ldu_sph.sph_version = 1;
382 
383 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
384 	du->ldu_actor = lp->lp_actor;
385 
386 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
387 	    sizeof(du->ldu_partner));
388 	du->ldu_partner = lp->lp_partner;
389 
390 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
391 	    sizeof(du->ldu_collector));
392 	du->ldu_collector.lci_maxdelay = 0;
393 
394 	if (V_lacp_debug > 0) {
395 		lacp_dprintf(lp, "lacpdu transmit\n");
396 		lacp_dump_lacpdu(du);
397 	}
398 
399 	m->m_flags |= M_MCAST;
400 
401 	/*
402 	 * XXX should use higher priority queue.
403 	 * otherwise network congestion can break aggregation.
404 	 */
405 
406 	error = lagg_enqueue(lp->lp_ifp, m);
407 	return (error);
408 }
409 
410 static int
411 lacp_xmit_marker(struct lacp_port *lp)
412 {
413 	struct lagg_port *lgp = lp->lp_lagg;
414 	struct mbuf *m;
415 	struct markerdu *mdu;
416 	int error;
417 
418 	LACP_LOCK_ASSERT(lp->lp_lsc);
419 
420 	m = m_gethdr(M_NOWAIT, MT_DATA);
421 	if (m == NULL) {
422 		return (ENOMEM);
423 	}
424 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
425 
426 	mdu = mtod(m, struct markerdu *);
427 	memset(mdu, 0, sizeof(*mdu));
428 
429 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
430 	    ETHER_ADDR_LEN);
431 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
432 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
433 
434 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
435 	mdu->mdu_sph.sph_version = 1;
436 
437 	/* Bump the transaction id and copy over the marker info */
438 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
439 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
440 	mdu->mdu_info = lp->lp_marker;
441 
442 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
443 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
444 	    ntohl(mdu->mdu_info.mi_rq_xid)));
445 
446 	m->m_flags |= M_MCAST;
447 	error = lagg_enqueue(lp->lp_ifp, m);
448 	return (error);
449 }
450 
451 void
452 lacp_linkstate(struct lagg_port *lgp)
453 {
454 	struct lacp_port *lp = LACP_PORT(lgp);
455 	struct lacp_softc *lsc = lp->lp_lsc;
456 	struct ifnet *ifp = lgp->lp_ifp;
457 	struct ifmediareq ifmr;
458 	int error = 0;
459 	u_int media;
460 	uint8_t old_state;
461 	uint16_t old_key;
462 
463 	bzero((char *)&ifmr, sizeof(ifmr));
464 	error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
465 	if (error != 0)
466 		return;
467 
468 	LACP_LOCK(lsc);
469 	media = ifmr.ifm_active;
470 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
471 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
472 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
473 	old_state = lp->lp_state;
474 	old_key = lp->lp_key;
475 
476 	lp->lp_media = media;
477 	/*
478 	 * If the port is not an active full duplex Ethernet link then it can
479 	 * not be aggregated.
480 	 */
481 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
482 	    ifp->if_link_state != LINK_STATE_UP) {
483 		lacp_port_disable(lp);
484 	} else {
485 		lacp_port_enable(lp);
486 	}
487 	lp->lp_key = lacp_compose_key(lp);
488 
489 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
490 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
491 		lp->lp_selected = LACP_UNSELECTED;
492 	}
493 	LACP_UNLOCK(lsc);
494 }
495 
496 static void
497 lacp_tick(void *arg)
498 {
499 	struct lacp_softc *lsc = arg;
500 	struct lacp_port *lp;
501 
502 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
503 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
504 			continue;
505 
506 		CURVNET_SET(lp->lp_ifp->if_vnet);
507 		lacp_run_timers(lp);
508 
509 		lacp_select(lp);
510 		lacp_sm_mux(lp);
511 		lacp_sm_tx(lp);
512 		lacp_sm_ptx_tx_schedule(lp);
513 		CURVNET_RESTORE();
514 	}
515 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
516 }
517 
518 int
519 lacp_port_create(struct lagg_port *lgp)
520 {
521 	struct lagg_softc *sc = lgp->lp_softc;
522 	struct lacp_softc *lsc = LACP_SOFTC(sc);
523 	struct lacp_port *lp;
524 	struct ifnet *ifp = lgp->lp_ifp;
525 	struct sockaddr_dl sdl;
526 	struct ifmultiaddr *rifma = NULL;
527 	int error;
528 
529 	boolean_t active = TRUE; /* XXX should be configurable */
530 	boolean_t fast = FALSE; /* Configurable via ioctl */
531 
532 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
533 	sdl.sdl_alen = ETHER_ADDR_LEN;
534 
535 	bcopy(&ethermulticastaddr_slowprotocols,
536 	    LLADDR(&sdl), ETHER_ADDR_LEN);
537 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
538 	if (error) {
539 		printf("%s: ADDMULTI failed on %s\n", __func__,
540 		    lgp->lp_ifp->if_xname);
541 		return (error);
542 	}
543 
544 	lp = malloc(sizeof(struct lacp_port),
545 	    M_DEVBUF, M_NOWAIT|M_ZERO);
546 	if (lp == NULL)
547 		return (ENOMEM);
548 
549 	LACP_LOCK(lsc);
550 	lgp->lp_psc = lp;
551 	lp->lp_ifp = ifp;
552 	lp->lp_lagg = lgp;
553 	lp->lp_lsc = lsc;
554 	lp->lp_ifma = rifma;
555 
556 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
557 
558 	lacp_fill_actorinfo(lp, &lp->lp_actor);
559 	lacp_fill_markerinfo(lp, &lp->lp_marker);
560 	lp->lp_state =
561 	    (active ? LACP_STATE_ACTIVITY : 0) |
562 	    (fast ? LACP_STATE_TIMEOUT : 0);
563 	lp->lp_aggregator = NULL;
564 	lacp_sm_rx_set_expired(lp);
565 	LACP_UNLOCK(lsc);
566 	lacp_linkstate(lgp);
567 
568 	return (0);
569 }
570 
571 void
572 lacp_port_destroy(struct lagg_port *lgp)
573 {
574 	struct lacp_port *lp = LACP_PORT(lgp);
575 	struct lacp_softc *lsc = lp->lp_lsc;
576 	int i;
577 
578 	LACP_LOCK(lsc);
579 	for (i = 0; i < LACP_NTIMER; i++) {
580 		LACP_TIMER_DISARM(lp, i);
581 	}
582 
583 	lacp_disable_collecting(lp);
584 	lacp_disable_distributing(lp);
585 	lacp_unselect(lp);
586 
587 	LIST_REMOVE(lp, lp_next);
588 	LACP_UNLOCK(lsc);
589 
590 	/* The address may have already been removed by if_purgemaddrs() */
591 	if (!lgp->lp_detaching)
592 		if_delmulti_ifma(lp->lp_ifma);
593 
594 	free(lp, M_DEVBUF);
595 }
596 
597 void
598 lacp_req(struct lagg_softc *sc, void *data)
599 {
600 	struct lacp_opreq *req = (struct lacp_opreq *)data;
601 	struct lacp_softc *lsc = LACP_SOFTC(sc);
602 	struct lacp_aggregator *la;
603 
604 	bzero(req, sizeof(struct lacp_opreq));
605 
606 	/*
607 	 * If the LACP softc is NULL, return with the opreq structure full of
608 	 * zeros.  It is normal for the softc to be NULL while the lagg is
609 	 * being destroyed.
610 	 */
611 	if (NULL == lsc)
612 		return;
613 
614 	la = lsc->lsc_active_aggregator;
615 	LACP_LOCK(lsc);
616 	if (la != NULL) {
617 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
618 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
619 		    ETHER_ADDR_LEN);
620 		req->actor_key = ntohs(la->la_actor.lip_key);
621 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
622 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
623 		req->actor_state = la->la_actor.lip_state;
624 
625 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
626 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
627 		    ETHER_ADDR_LEN);
628 		req->partner_key = ntohs(la->la_partner.lip_key);
629 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
630 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
631 		req->partner_state = la->la_partner.lip_state;
632 	}
633 	LACP_UNLOCK(lsc);
634 }
635 
636 void
637 lacp_portreq(struct lagg_port *lgp, void *data)
638 {
639 	struct lacp_opreq *req = (struct lacp_opreq *)data;
640 	struct lacp_port *lp = LACP_PORT(lgp);
641 	struct lacp_softc *lsc = lp->lp_lsc;
642 
643 	LACP_LOCK(lsc);
644 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
645 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
646 	    ETHER_ADDR_LEN);
647 	req->actor_key = ntohs(lp->lp_actor.lip_key);
648 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
649 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
650 	req->actor_state = lp->lp_actor.lip_state;
651 
652 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
653 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
654 	    ETHER_ADDR_LEN);
655 	req->partner_key = ntohs(lp->lp_partner.lip_key);
656 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
657 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
658 	req->partner_state = lp->lp_partner.lip_state;
659 	LACP_UNLOCK(lsc);
660 }
661 
662 static void
663 lacp_disable_collecting(struct lacp_port *lp)
664 {
665 	LACP_DPRINTF((lp, "collecting disabled\n"));
666 	lp->lp_state &= ~LACP_STATE_COLLECTING;
667 }
668 
669 static void
670 lacp_enable_collecting(struct lacp_port *lp)
671 {
672 	LACP_DPRINTF((lp, "collecting enabled\n"));
673 	lp->lp_state |= LACP_STATE_COLLECTING;
674 }
675 
676 static void
677 lacp_disable_distributing(struct lacp_port *lp)
678 {
679 	struct lacp_aggregator *la = lp->lp_aggregator;
680 	struct lacp_softc *lsc = lp->lp_lsc;
681 	struct lagg_softc *sc = lsc->lsc_softc;
682 	char buf[LACP_LAGIDSTR_MAX+1];
683 
684 	LACP_LOCK_ASSERT(lsc);
685 
686 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
687 		return;
688 	}
689 
690 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
691 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
692 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
693 
694 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
695 	    "nports %d -> %d\n",
696 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
697 	    la->la_nports, la->la_nports - 1));
698 
699 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
700 	la->la_nports--;
701 	sc->sc_active = la->la_nports;
702 
703 	if (lsc->lsc_active_aggregator == la) {
704 		lacp_suppress_distributing(lsc, la);
705 		lacp_select_active_aggregator(lsc);
706 		/* regenerate the port map, the active aggregator has changed */
707 		lacp_update_portmap(lsc);
708 	}
709 
710 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
711 }
712 
713 static void
714 lacp_enable_distributing(struct lacp_port *lp)
715 {
716 	struct lacp_aggregator *la = lp->lp_aggregator;
717 	struct lacp_softc *lsc = lp->lp_lsc;
718 	struct lagg_softc *sc = lsc->lsc_softc;
719 	char buf[LACP_LAGIDSTR_MAX+1];
720 
721 	LACP_LOCK_ASSERT(lsc);
722 
723 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
724 		return;
725 	}
726 
727 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
728 	    "nports %d -> %d\n",
729 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
730 	    la->la_nports, la->la_nports + 1));
731 
732 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
733 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
734 	la->la_nports++;
735 	sc->sc_active = la->la_nports;
736 
737 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
738 
739 	if (lsc->lsc_active_aggregator == la) {
740 		lacp_suppress_distributing(lsc, la);
741 		lacp_update_portmap(lsc);
742 	} else
743 		/* try to become the active aggregator */
744 		lacp_select_active_aggregator(lsc);
745 }
746 
747 static void
748 lacp_transit_expire(void *vp)
749 {
750 	struct lacp_softc *lsc = vp;
751 
752 	LACP_LOCK_ASSERT(lsc);
753 
754 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
755 	LACP_TRACE(NULL);
756 	CURVNET_RESTORE();
757 
758 	lsc->lsc_suppress_distributing = FALSE;
759 }
760 
761 void
762 lacp_attach(struct lagg_softc *sc)
763 {
764 	struct lacp_softc *lsc;
765 
766 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
767 
768 	sc->sc_psc = lsc;
769 	lsc->lsc_softc = sc;
770 
771 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
772 	lsc->lsc_active_aggregator = NULL;
773 	lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
774 	LACP_LOCK_INIT(lsc);
775 	TAILQ_INIT(&lsc->lsc_aggregators);
776 	LIST_INIT(&lsc->lsc_ports);
777 
778 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
779 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
780 
781 	/* if the lagg is already up then do the same */
782 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
783 		lacp_init(sc);
784 }
785 
786 void
787 lacp_detach(void *psc)
788 {
789 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
790 
791 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
792 	    ("aggregators still active"));
793 	KASSERT(lsc->lsc_active_aggregator == NULL,
794 	    ("aggregator still attached"));
795 
796 	callout_drain(&lsc->lsc_transit_callout);
797 	callout_drain(&lsc->lsc_callout);
798 
799 	LACP_LOCK_DESTROY(lsc);
800 	free(lsc, M_DEVBUF);
801 }
802 
803 void
804 lacp_init(struct lagg_softc *sc)
805 {
806 	struct lacp_softc *lsc = LACP_SOFTC(sc);
807 
808 	LACP_LOCK(lsc);
809 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
810 	LACP_UNLOCK(lsc);
811 }
812 
813 void
814 lacp_stop(struct lagg_softc *sc)
815 {
816 	struct lacp_softc *lsc = LACP_SOFTC(sc);
817 
818 	LACP_LOCK(lsc);
819 	callout_stop(&lsc->lsc_transit_callout);
820 	callout_stop(&lsc->lsc_callout);
821 	LACP_UNLOCK(lsc);
822 }
823 
824 struct lagg_port *
825 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
826 {
827 	struct lacp_softc *lsc = LACP_SOFTC(sc);
828 	struct lacp_portmap *pm;
829 	struct lacp_port *lp;
830 	uint32_t hash;
831 
832 	if (__predict_false(lsc->lsc_suppress_distributing)) {
833 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
834 		return (NULL);
835 	}
836 
837 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
838 	if (pm->pm_count == 0) {
839 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
840 		return (NULL);
841 	}
842 
843 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
844 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
845 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
846 	else
847 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
848 	hash %= pm->pm_count;
849 	lp = pm->pm_map[hash];
850 
851 	KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
852 	    ("aggregated port is not distributing"));
853 
854 	return (lp->lp_lagg);
855 }
856 /*
857  * lacp_suppress_distributing: drop transmit packets for a while
858  * to preserve packet ordering.
859  */
860 
861 static void
862 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
863 {
864 	struct lacp_port *lp;
865 
866 	if (lsc->lsc_active_aggregator != la) {
867 		return;
868 	}
869 
870 	LACP_TRACE(NULL);
871 
872 	lsc->lsc_suppress_distributing = TRUE;
873 
874 	/* send a marker frame down each port to verify the queues are empty */
875 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
876 		lp->lp_flags |= LACP_PORT_MARK;
877 		lacp_xmit_marker(lp);
878 	}
879 
880 	/* set a timeout for the marker frames */
881 	callout_reset(&lsc->lsc_transit_callout,
882 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
883 }
884 
885 static int
886 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
887     const struct lacp_peerinfo *b)
888 {
889 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
890 }
891 
892 static int
893 lacp_compare_systemid(const struct lacp_systemid *a,
894     const struct lacp_systemid *b)
895 {
896 	return (memcmp(a, b, sizeof(*a)));
897 }
898 
899 #if 0	/* unused */
900 static int
901 lacp_compare_portid(const struct lacp_portid *a,
902     const struct lacp_portid *b)
903 {
904 	return (memcmp(a, b, sizeof(*a)));
905 }
906 #endif
907 
908 static uint64_t
909 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
910 {
911 	struct lacp_port *lp;
912 	uint64_t speed;
913 
914 	lp = TAILQ_FIRST(&la->la_ports);
915 	if (lp == NULL) {
916 		return (0);
917 	}
918 
919 	speed = ifmedia_baudrate(lp->lp_media);
920 	speed *= la->la_nports;
921 	if (speed == 0) {
922 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
923 		    lp->lp_media, la->la_nports));
924 	}
925 
926 	return (speed);
927 }
928 
929 /*
930  * lacp_select_active_aggregator: select an aggregator to be used to transmit
931  * packets from lagg(4) interface.
932  */
933 
934 static void
935 lacp_select_active_aggregator(struct lacp_softc *lsc)
936 {
937 	struct lacp_aggregator *la;
938 	struct lacp_aggregator *best_la = NULL;
939 	uint64_t best_speed = 0;
940 	char buf[LACP_LAGIDSTR_MAX+1];
941 
942 	LACP_TRACE(NULL);
943 
944 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
945 		uint64_t speed;
946 
947 		if (la->la_nports == 0) {
948 			continue;
949 		}
950 
951 		speed = lacp_aggregator_bandwidth(la);
952 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
953 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
954 		    speed, la->la_nports));
955 
956 		/*
957 		 * This aggregator is chosen if the partner has a better
958 		 * system priority or, the total aggregated speed is higher
959 		 * or, it is already the chosen aggregator
960 		 */
961 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
962 		    LACP_SYS_PRI(best_la->la_partner)) ||
963 		    speed > best_speed ||
964 		    (speed == best_speed &&
965 		    la == lsc->lsc_active_aggregator)) {
966 			best_la = la;
967 			best_speed = speed;
968 		}
969 	}
970 
971 	KASSERT(best_la == NULL || best_la->la_nports > 0,
972 	    ("invalid aggregator refcnt"));
973 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
974 	    ("invalid aggregator list"));
975 
976 	if (lsc->lsc_active_aggregator != best_la) {
977 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
978 		LACP_DPRINTF((NULL, "old %s\n",
979 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
980 		    buf, sizeof(buf))));
981 	} else {
982 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
983 	}
984 	LACP_DPRINTF((NULL, "new %s\n",
985 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
986 
987 	if (lsc->lsc_active_aggregator != best_la) {
988 		lsc->lsc_active_aggregator = best_la;
989 		lacp_update_portmap(lsc);
990 		if (best_la) {
991 			lacp_suppress_distributing(lsc, best_la);
992 		}
993 	}
994 }
995 
996 /*
997  * Updated the inactive portmap array with the new list of ports and
998  * make it live.
999  */
1000 static void
1001 lacp_update_portmap(struct lacp_softc *lsc)
1002 {
1003 	struct lagg_softc *sc = lsc->lsc_softc;
1004 	struct lacp_aggregator *la;
1005 	struct lacp_portmap *p;
1006 	struct lacp_port *lp;
1007 	uint64_t speed;
1008 	u_int newmap;
1009 	int i;
1010 
1011 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1012 	p = &lsc->lsc_pmap[newmap];
1013 	la = lsc->lsc_active_aggregator;
1014 	speed = 0;
1015 	bzero(p, sizeof(struct lacp_portmap));
1016 
1017 	if (la != NULL && la->la_nports > 0) {
1018 		p->pm_count = la->la_nports;
1019 		i = 0;
1020 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q)
1021 			p->pm_map[i++] = lp;
1022 		KASSERT(i == p->pm_count, ("Invalid port count"));
1023 		speed = lacp_aggregator_bandwidth(la);
1024 	}
1025 	sc->sc_ifp->if_baudrate = speed;
1026 
1027 	/* switch the active portmap over */
1028 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1029 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1030 		    lsc->lsc_activemap,
1031 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1032 }
1033 
1034 static uint16_t
1035 lacp_compose_key(struct lacp_port *lp)
1036 {
1037 	struct lagg_port *lgp = lp->lp_lagg;
1038 	struct lagg_softc *sc = lgp->lp_softc;
1039 	u_int media = lp->lp_media;
1040 	uint16_t key;
1041 
1042 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1043 
1044 		/*
1045 		 * non-aggregatable links should have unique keys.
1046 		 *
1047 		 * XXX this isn't really unique as if_index is 16 bit.
1048 		 */
1049 
1050 		/* bit 0..14:	(some bits of) if_index of this port */
1051 		key = lp->lp_ifp->if_index;
1052 		/* bit 15:	1 */
1053 		key |= 0x8000;
1054 	} else {
1055 		u_int subtype = IFM_SUBTYPE(media);
1056 
1057 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1058 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1059 
1060 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1061 		switch (subtype) {
1062 		case IFM_10_T:
1063 		case IFM_10_2:
1064 		case IFM_10_5:
1065 		case IFM_10_STP:
1066 		case IFM_10_FL:
1067 			key = IFM_10_T;
1068 			break;
1069 		case IFM_100_TX:
1070 		case IFM_100_FX:
1071 		case IFM_100_T4:
1072 		case IFM_100_VG:
1073 		case IFM_100_T2:
1074 		case IFM_100_T:
1075 			key = IFM_100_TX;
1076 			break;
1077 		case IFM_1000_SX:
1078 		case IFM_1000_LX:
1079 		case IFM_1000_CX:
1080 		case IFM_1000_T:
1081 		case IFM_1000_KX:
1082 		case IFM_1000_SGMII:
1083 		case IFM_1000_CX_SGMII:
1084 			key = IFM_1000_SX;
1085 			break;
1086 		case IFM_10G_LR:
1087 		case IFM_10G_SR:
1088 		case IFM_10G_CX4:
1089 		case IFM_10G_TWINAX:
1090 		case IFM_10G_TWINAX_LONG:
1091 		case IFM_10G_LRM:
1092 		case IFM_10G_T:
1093 		case IFM_10G_KX4:
1094 		case IFM_10G_KR:
1095 		case IFM_10G_CR1:
1096 		case IFM_10G_ER:
1097 		case IFM_10G_SFI:
1098 			key = IFM_10G_LR;
1099 			break;
1100 		case IFM_20G_KR2:
1101 			key = IFM_20G_KR2;
1102 			break;
1103 		case IFM_2500_KX:
1104 		case IFM_2500_T:
1105 			key = IFM_2500_KX;
1106 			break;
1107 		case IFM_5000_T:
1108 			key = IFM_5000_T;
1109 			break;
1110 		case IFM_50G_PCIE:
1111 		case IFM_50G_CR2:
1112 		case IFM_50G_KR2:
1113 			key = IFM_50G_PCIE;
1114 			break;
1115 		case IFM_56G_R4:
1116 			key = IFM_56G_R4;
1117 			break;
1118 		case IFM_25G_PCIE:
1119 		case IFM_25G_CR:
1120 		case IFM_25G_KR:
1121 		case IFM_25G_SR:
1122 			key = IFM_25G_PCIE;
1123 			break;
1124 		case IFM_40G_CR4:
1125 		case IFM_40G_SR4:
1126 		case IFM_40G_LR4:
1127 		case IFM_40G_XLPPI:
1128 		case IFM_40G_KR4:
1129 			key = IFM_40G_CR4;
1130 			break;
1131 		case IFM_100G_CR4:
1132 		case IFM_100G_SR4:
1133 		case IFM_100G_KR4:
1134 		case IFM_100G_LR4:
1135 			key = IFM_100G_CR4;
1136 			break;
1137 		default:
1138 			key = subtype;
1139 			break;
1140 		}
1141 		/* bit 5..14:	(some bits of) if_index of lagg device */
1142 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1143 		/* bit 15:	0 */
1144 	}
1145 	return (htons(key));
1146 }
1147 
1148 static void
1149 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1150 {
1151 	char buf[LACP_LAGIDSTR_MAX+1];
1152 
1153 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1154 	    __func__,
1155 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1156 	    buf, sizeof(buf)),
1157 	    la->la_refcnt, la->la_refcnt + 1));
1158 
1159 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1160 	la->la_refcnt++;
1161 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1162 }
1163 
1164 static void
1165 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1166 {
1167 	char buf[LACP_LAGIDSTR_MAX+1];
1168 
1169 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1170 	    __func__,
1171 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1172 	    buf, sizeof(buf)),
1173 	    la->la_refcnt, la->la_refcnt - 1));
1174 
1175 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1176 	la->la_refcnt--;
1177 	if (la->la_refcnt > 0) {
1178 		return;
1179 	}
1180 
1181 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1182 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1183 
1184 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1185 
1186 	free(la, M_DEVBUF);
1187 }
1188 
1189 /*
1190  * lacp_aggregator_get: allocate an aggregator.
1191  */
1192 
1193 static struct lacp_aggregator *
1194 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1195 {
1196 	struct lacp_aggregator *la;
1197 
1198 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1199 	if (la) {
1200 		la->la_refcnt = 1;
1201 		la->la_nports = 0;
1202 		TAILQ_INIT(&la->la_ports);
1203 		la->la_pending = 0;
1204 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1205 	}
1206 
1207 	return (la);
1208 }
1209 
1210 /*
1211  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1212  */
1213 
1214 static void
1215 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1216 {
1217 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1218 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1219 
1220 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1221 }
1222 
1223 static void
1224 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1225     const struct lacp_peerinfo *lpi_port)
1226 {
1227 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1228 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1229 	lpi_aggr->lip_key = lpi_port->lip_key;
1230 }
1231 
1232 /*
1233  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1234  */
1235 
1236 static int
1237 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1238     const struct lacp_port *lp)
1239 {
1240 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1241 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1242 		return (0);
1243 	}
1244 
1245 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1246 		return (0);
1247 	}
1248 
1249 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1250 		return (0);
1251 	}
1252 
1253 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1254 		return (0);
1255 	}
1256 
1257 	return (1);
1258 }
1259 
1260 static int
1261 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1262     const struct lacp_peerinfo *b)
1263 {
1264 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1265 	    sizeof(a->lip_systemid))) {
1266 		return (0);
1267 	}
1268 
1269 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1270 		return (0);
1271 	}
1272 
1273 	return (1);
1274 }
1275 
1276 static void
1277 lacp_port_enable(struct lacp_port *lp)
1278 {
1279 	lp->lp_state |= LACP_STATE_AGGREGATION;
1280 }
1281 
1282 static void
1283 lacp_port_disable(struct lacp_port *lp)
1284 {
1285 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1286 
1287 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1288 	lp->lp_selected = LACP_UNSELECTED;
1289 	lacp_sm_rx_record_default(lp);
1290 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1291 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1292 }
1293 
1294 /*
1295  * lacp_select: select an aggregator.  create one if necessary.
1296  */
1297 static void
1298 lacp_select(struct lacp_port *lp)
1299 {
1300 	struct lacp_softc *lsc = lp->lp_lsc;
1301 	struct lacp_aggregator *la;
1302 	char buf[LACP_LAGIDSTR_MAX+1];
1303 
1304 	if (lp->lp_aggregator) {
1305 		return;
1306 	}
1307 
1308 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1309 	    ("timer_wait_while still active"));
1310 
1311 	LACP_DPRINTF((lp, "port lagid=%s\n",
1312 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1313 	    buf, sizeof(buf))));
1314 
1315 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1316 		if (lacp_aggregator_is_compatible(la, lp)) {
1317 			break;
1318 		}
1319 	}
1320 
1321 	if (la == NULL) {
1322 		la = lacp_aggregator_get(lsc, lp);
1323 		if (la == NULL) {
1324 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1325 
1326 			/*
1327 			 * will retry on the next tick.
1328 			 */
1329 
1330 			return;
1331 		}
1332 		lacp_fill_aggregator_id(la, lp);
1333 		LACP_DPRINTF((lp, "aggregator created\n"));
1334 	} else {
1335 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1336 		if (la->la_refcnt == LACP_MAX_PORTS)
1337 			return;
1338 		lacp_aggregator_addref(lsc, la);
1339 	}
1340 
1341 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1342 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1343 	    buf, sizeof(buf))));
1344 
1345 	lp->lp_aggregator = la;
1346 	lp->lp_selected = LACP_SELECTED;
1347 }
1348 
1349 /*
1350  * lacp_unselect: finish unselect/detach process.
1351  */
1352 
1353 static void
1354 lacp_unselect(struct lacp_port *lp)
1355 {
1356 	struct lacp_softc *lsc = lp->lp_lsc;
1357 	struct lacp_aggregator *la = lp->lp_aggregator;
1358 
1359 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1360 	    ("timer_wait_while still active"));
1361 
1362 	if (la == NULL) {
1363 		return;
1364 	}
1365 
1366 	lp->lp_aggregator = NULL;
1367 	lacp_aggregator_delref(lsc, la);
1368 }
1369 
1370 /* mux machine */
1371 
1372 static void
1373 lacp_sm_mux(struct lacp_port *lp)
1374 {
1375 	struct lagg_port *lgp = lp->lp_lagg;
1376 	struct lagg_softc *sc = lgp->lp_softc;
1377 	enum lacp_mux_state new_state;
1378 	boolean_t p_sync =
1379 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1380 	boolean_t p_collecting =
1381 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1382 	enum lacp_selected selected = lp->lp_selected;
1383 	struct lacp_aggregator *la;
1384 
1385 	if (V_lacp_debug > 1)
1386 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1387 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1388 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1389 
1390 re_eval:
1391 	la = lp->lp_aggregator;
1392 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1393 	    ("MUX not detached"));
1394 	new_state = lp->lp_mux_state;
1395 	switch (lp->lp_mux_state) {
1396 	case LACP_MUX_DETACHED:
1397 		if (selected != LACP_UNSELECTED) {
1398 			new_state = LACP_MUX_WAITING;
1399 		}
1400 		break;
1401 	case LACP_MUX_WAITING:
1402 		KASSERT(la->la_pending > 0 ||
1403 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1404 		    ("timer_wait_while still active"));
1405 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1406 			new_state = LACP_MUX_ATTACHED;
1407 		} else if (selected == LACP_UNSELECTED) {
1408 			new_state = LACP_MUX_DETACHED;
1409 		}
1410 		break;
1411 	case LACP_MUX_ATTACHED:
1412 		if (selected == LACP_SELECTED && p_sync) {
1413 			new_state = LACP_MUX_COLLECTING;
1414 		} else if (selected != LACP_SELECTED) {
1415 			new_state = LACP_MUX_DETACHED;
1416 		}
1417 		break;
1418 	case LACP_MUX_COLLECTING:
1419 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1420 			new_state = LACP_MUX_DISTRIBUTING;
1421 		} else if (selected != LACP_SELECTED || !p_sync) {
1422 			new_state = LACP_MUX_ATTACHED;
1423 		}
1424 		break;
1425 	case LACP_MUX_DISTRIBUTING:
1426 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1427 			new_state = LACP_MUX_COLLECTING;
1428 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1429 			sc->sc_flapping++;
1430 		}
1431 		break;
1432 	default:
1433 		panic("%s: unknown state", __func__);
1434 	}
1435 
1436 	if (lp->lp_mux_state == new_state) {
1437 		return;
1438 	}
1439 
1440 	lacp_set_mux(lp, new_state);
1441 	goto re_eval;
1442 }
1443 
1444 static void
1445 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1446 {
1447 	struct lacp_aggregator *la = lp->lp_aggregator;
1448 
1449 	if (lp->lp_mux_state == new_state) {
1450 		return;
1451 	}
1452 
1453 	switch (new_state) {
1454 	case LACP_MUX_DETACHED:
1455 		lp->lp_state &= ~LACP_STATE_SYNC;
1456 		lacp_disable_distributing(lp);
1457 		lacp_disable_collecting(lp);
1458 		lacp_sm_assert_ntt(lp);
1459 		/* cancel timer */
1460 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1461 			KASSERT(la->la_pending > 0,
1462 			    ("timer_wait_while not active"));
1463 			la->la_pending--;
1464 		}
1465 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1466 		lacp_unselect(lp);
1467 		break;
1468 	case LACP_MUX_WAITING:
1469 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1470 		    LACP_AGGREGATE_WAIT_TIME);
1471 		la->la_pending++;
1472 		break;
1473 	case LACP_MUX_ATTACHED:
1474 		lp->lp_state |= LACP_STATE_SYNC;
1475 		lacp_disable_collecting(lp);
1476 		lacp_sm_assert_ntt(lp);
1477 		break;
1478 	case LACP_MUX_COLLECTING:
1479 		lacp_enable_collecting(lp);
1480 		lacp_disable_distributing(lp);
1481 		lacp_sm_assert_ntt(lp);
1482 		break;
1483 	case LACP_MUX_DISTRIBUTING:
1484 		lacp_enable_distributing(lp);
1485 		break;
1486 	default:
1487 		panic("%s: unknown state", __func__);
1488 	}
1489 
1490 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1491 
1492 	lp->lp_mux_state = new_state;
1493 }
1494 
1495 static void
1496 lacp_sm_mux_timer(struct lacp_port *lp)
1497 {
1498 	struct lacp_aggregator *la = lp->lp_aggregator;
1499 	char buf[LACP_LAGIDSTR_MAX+1];
1500 
1501 	KASSERT(la->la_pending > 0, ("no pending event"));
1502 
1503 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1504 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1505 	    buf, sizeof(buf)),
1506 	    la->la_pending, la->la_pending - 1));
1507 
1508 	la->la_pending--;
1509 }
1510 
1511 /* periodic transmit machine */
1512 
1513 static void
1514 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1515 {
1516 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1517 	    LACP_STATE_TIMEOUT)) {
1518 		return;
1519 	}
1520 
1521 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1522 
1523 	/*
1524 	 * FAST_PERIODIC -> SLOW_PERIODIC
1525 	 * or
1526 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1527 	 *
1528 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1529 	 */
1530 
1531 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1532 
1533 	/*
1534 	 * if timeout has been shortened, assert NTT.
1535 	 */
1536 
1537 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1538 		lacp_sm_assert_ntt(lp);
1539 	}
1540 }
1541 
1542 static void
1543 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1544 {
1545 	int timeout;
1546 
1547 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1548 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1549 
1550 		/*
1551 		 * NO_PERIODIC
1552 		 */
1553 
1554 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1555 		return;
1556 	}
1557 
1558 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1559 		return;
1560 	}
1561 
1562 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1563 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1564 
1565 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1566 }
1567 
1568 static void
1569 lacp_sm_ptx_timer(struct lacp_port *lp)
1570 {
1571 	lacp_sm_assert_ntt(lp);
1572 }
1573 
1574 static void
1575 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1576 {
1577 	int timeout;
1578 
1579 	/*
1580 	 * check LACP_DISABLED first
1581 	 */
1582 
1583 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1584 		return;
1585 	}
1586 
1587 	/*
1588 	 * check loopback condition.
1589 	 */
1590 
1591 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1592 	    &lp->lp_actor.lip_systemid)) {
1593 		return;
1594 	}
1595 
1596 	/*
1597 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1598 	 */
1599 
1600 	lacp_sm_rx_update_selected(lp, du);
1601 	lacp_sm_rx_update_ntt(lp, du);
1602 	lacp_sm_rx_record_pdu(lp, du);
1603 
1604 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1605 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1606 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1607 
1608 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1609 
1610 	/*
1611 	 * kick transmit machine without waiting the next tick.
1612 	 */
1613 
1614 	lacp_sm_tx(lp);
1615 }
1616 
1617 static void
1618 lacp_sm_rx_set_expired(struct lacp_port *lp)
1619 {
1620 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1621 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1622 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1623 	lp->lp_state |= LACP_STATE_EXPIRED;
1624 }
1625 
1626 static void
1627 lacp_sm_rx_timer(struct lacp_port *lp)
1628 {
1629 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1630 		/* CURRENT -> EXPIRED */
1631 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1632 		lacp_sm_rx_set_expired(lp);
1633 	} else {
1634 		/* EXPIRED -> DEFAULTED */
1635 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1636 		lacp_sm_rx_update_default_selected(lp);
1637 		lacp_sm_rx_record_default(lp);
1638 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1639 	}
1640 }
1641 
1642 static void
1643 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1644 {
1645 	boolean_t active;
1646 	uint8_t oldpstate;
1647 	char buf[LACP_STATESTR_MAX+1];
1648 
1649 	LACP_TRACE(lp);
1650 
1651 	oldpstate = lp->lp_partner.lip_state;
1652 
1653 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1654 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1655 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1656 
1657 	lp->lp_partner = du->ldu_actor;
1658 	if (active &&
1659 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1660 	    LACP_STATE_AGGREGATION) &&
1661 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1662 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1663 		/* XXX nothing? */
1664 	} else {
1665 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1666 	}
1667 
1668 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1669 
1670 	if (oldpstate != lp->lp_partner.lip_state) {
1671 		LACP_DPRINTF((lp, "old pstate %s\n",
1672 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1673 		LACP_DPRINTF((lp, "new pstate %s\n",
1674 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1675 		    sizeof(buf))));
1676 	}
1677 
1678 	/* XXX Hack, still need to implement 5.4.9 para 2,3,4 */
1679 	if (lp->lp_lsc->lsc_strict_mode)
1680 		lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1681 
1682 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1683 }
1684 
1685 static void
1686 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1687 {
1688 
1689 	LACP_TRACE(lp);
1690 
1691 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1692 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1693 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1694 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1695 		lacp_sm_assert_ntt(lp);
1696 	}
1697 }
1698 
1699 static void
1700 lacp_sm_rx_record_default(struct lacp_port *lp)
1701 {
1702 	uint8_t oldpstate;
1703 
1704 	LACP_TRACE(lp);
1705 
1706 	oldpstate = lp->lp_partner.lip_state;
1707 	if (lp->lp_lsc->lsc_strict_mode)
1708 		lp->lp_partner = lacp_partner_admin_strict;
1709 	else
1710 		lp->lp_partner = lacp_partner_admin_optimistic;
1711 	lp->lp_state |= LACP_STATE_DEFAULTED;
1712 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1713 }
1714 
1715 static void
1716 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1717     const struct lacp_peerinfo *info)
1718 {
1719 
1720 	LACP_TRACE(lp);
1721 
1722 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1723 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1724 	    LACP_STATE_AGGREGATION)) {
1725 		lp->lp_selected = LACP_UNSELECTED;
1726 		/* mux machine will clean up lp->lp_aggregator */
1727 	}
1728 }
1729 
1730 static void
1731 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1732 {
1733 
1734 	LACP_TRACE(lp);
1735 
1736 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1737 }
1738 
1739 static void
1740 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1741 {
1742 
1743 	LACP_TRACE(lp);
1744 
1745 	if (lp->lp_lsc->lsc_strict_mode)
1746 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1747 		    &lacp_partner_admin_strict);
1748 	else
1749 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1750 		    &lacp_partner_admin_optimistic);
1751 }
1752 
1753 /* transmit machine */
1754 
1755 static void
1756 lacp_sm_tx(struct lacp_port *lp)
1757 {
1758 	int error = 0;
1759 
1760 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1761 #if 1
1762 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1763 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1764 #endif
1765 	    ) {
1766 		lp->lp_flags &= ~LACP_PORT_NTT;
1767 	}
1768 
1769 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1770 		return;
1771 	}
1772 
1773 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1774 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1775 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1776 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1777 		return;
1778 	}
1779 
1780 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1781 		error = lacp_xmit_lacpdu(lp);
1782 	} else {
1783 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1784 	}
1785 
1786 	if (error == 0) {
1787 		lp->lp_flags &= ~LACP_PORT_NTT;
1788 	} else {
1789 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1790 		    error));
1791 	}
1792 }
1793 
1794 static void
1795 lacp_sm_assert_ntt(struct lacp_port *lp)
1796 {
1797 
1798 	lp->lp_flags |= LACP_PORT_NTT;
1799 }
1800 
1801 static void
1802 lacp_run_timers(struct lacp_port *lp)
1803 {
1804 	int i;
1805 
1806 	for (i = 0; i < LACP_NTIMER; i++) {
1807 		KASSERT(lp->lp_timer[i] >= 0,
1808 		    ("invalid timer value %d", lp->lp_timer[i]));
1809 		if (lp->lp_timer[i] == 0) {
1810 			continue;
1811 		} else if (--lp->lp_timer[i] <= 0) {
1812 			if (lacp_timer_funcs[i]) {
1813 				(*lacp_timer_funcs[i])(lp);
1814 			}
1815 		}
1816 	}
1817 }
1818 
1819 int
1820 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1821 {
1822 	struct lacp_softc *lsc = lp->lp_lsc;
1823 	struct lagg_port *lgp = lp->lp_lagg;
1824 	struct lacp_port *lp2;
1825 	struct markerdu *mdu;
1826 	int error = 0;
1827 	int pending = 0;
1828 
1829 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1830 		goto bad;
1831 	}
1832 
1833 	if ((m->m_flags & M_MCAST) == 0) {
1834 		goto bad;
1835 	}
1836 
1837 	if (m->m_len < sizeof(*mdu)) {
1838 		m = m_pullup(m, sizeof(*mdu));
1839 		if (m == NULL) {
1840 			return (ENOMEM);
1841 		}
1842 	}
1843 
1844 	mdu = mtod(m, struct markerdu *);
1845 
1846 	if (memcmp(&mdu->mdu_eh.ether_dhost,
1847 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1848 		goto bad;
1849 	}
1850 
1851 	if (mdu->mdu_sph.sph_version != 1) {
1852 		goto bad;
1853 	}
1854 
1855 	switch (mdu->mdu_tlv.tlv_type) {
1856 	case MARKER_TYPE_INFO:
1857 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1858 		    marker_info_tlv_template, TRUE)) {
1859 			goto bad;
1860 		}
1861 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
1862 		memcpy(&mdu->mdu_eh.ether_dhost,
1863 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
1864 		memcpy(&mdu->mdu_eh.ether_shost,
1865 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
1866 		error = lagg_enqueue(lp->lp_ifp, m);
1867 		break;
1868 
1869 	case MARKER_TYPE_RESPONSE:
1870 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1871 		    marker_response_tlv_template, TRUE)) {
1872 			goto bad;
1873 		}
1874 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
1875 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
1876 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
1877 
1878 		/* Verify that it is the last marker we sent out */
1879 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
1880 		    sizeof(struct lacp_markerinfo)))
1881 			goto bad;
1882 
1883 		LACP_LOCK(lsc);
1884 		lp->lp_flags &= ~LACP_PORT_MARK;
1885 
1886 		if (lsc->lsc_suppress_distributing) {
1887 			/* Check if any ports are waiting for a response */
1888 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
1889 				if (lp2->lp_flags & LACP_PORT_MARK) {
1890 					pending = 1;
1891 					break;
1892 				}
1893 			}
1894 
1895 			if (pending == 0) {
1896 				/* All interface queues are clear */
1897 				LACP_DPRINTF((NULL, "queue flush complete\n"));
1898 				lsc->lsc_suppress_distributing = FALSE;
1899 			}
1900 		}
1901 		LACP_UNLOCK(lsc);
1902 		m_freem(m);
1903 		break;
1904 
1905 	default:
1906 		goto bad;
1907 	}
1908 
1909 	return (error);
1910 
1911 bad:
1912 	LACP_DPRINTF((lp, "bad marker frame\n"));
1913 	m_freem(m);
1914 	return (EINVAL);
1915 }
1916 
1917 static int
1918 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
1919     const struct tlv_template *tmpl, boolean_t check_type)
1920 {
1921 	while (/* CONSTCOND */ 1) {
1922 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
1923 			return (EINVAL);
1924 		}
1925 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
1926 		    tlv->tlv_length != tmpl->tmpl_length) {
1927 			return (EINVAL);
1928 		}
1929 		if (tmpl->tmpl_type == 0) {
1930 			break;
1931 		}
1932 		tlv = (const struct tlvhdr *)
1933 		    ((const char *)tlv + tlv->tlv_length);
1934 		tmpl++;
1935 	}
1936 
1937 	return (0);
1938 }
1939 
1940 /* Debugging */
1941 const char *
1942 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
1943 {
1944 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
1945 	    (int)mac[0],
1946 	    (int)mac[1],
1947 	    (int)mac[2],
1948 	    (int)mac[3],
1949 	    (int)mac[4],
1950 	    (int)mac[5]);
1951 
1952 	return (buf);
1953 }
1954 
1955 const char *
1956 lacp_format_systemid(const struct lacp_systemid *sysid,
1957     char *buf, size_t buflen)
1958 {
1959 	char macbuf[LACP_MACSTR_MAX+1];
1960 
1961 	snprintf(buf, buflen, "%04X,%s",
1962 	    ntohs(sysid->lsi_prio),
1963 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
1964 
1965 	return (buf);
1966 }
1967 
1968 const char *
1969 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
1970 {
1971 	snprintf(buf, buflen, "%04X,%04X",
1972 	    ntohs(portid->lpi_prio),
1973 	    ntohs(portid->lpi_portno));
1974 
1975 	return (buf);
1976 }
1977 
1978 const char *
1979 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
1980 {
1981 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
1982 	char portid[LACP_PORTIDSTR_MAX+1];
1983 
1984 	snprintf(buf, buflen, "(%s,%04X,%s)",
1985 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
1986 	    ntohs(peer->lip_key),
1987 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
1988 
1989 	return (buf);
1990 }
1991 
1992 const char *
1993 lacp_format_lagid(const struct lacp_peerinfo *a,
1994     const struct lacp_peerinfo *b, char *buf, size_t buflen)
1995 {
1996 	char astr[LACP_PARTNERSTR_MAX+1];
1997 	char bstr[LACP_PARTNERSTR_MAX+1];
1998 
1999 #if 0
2000 	/*
2001 	 * there's a convention to display small numbered peer
2002 	 * in the left.
2003 	 */
2004 
2005 	if (lacp_compare_peerinfo(a, b) > 0) {
2006 		const struct lacp_peerinfo *t;
2007 
2008 		t = a;
2009 		a = b;
2010 		b = t;
2011 	}
2012 #endif
2013 
2014 	snprintf(buf, buflen, "[%s,%s]",
2015 	    lacp_format_partner(a, astr, sizeof(astr)),
2016 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2017 
2018 	return (buf);
2019 }
2020 
2021 const char *
2022 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2023     char *buf, size_t buflen)
2024 {
2025 	if (la == NULL) {
2026 		return ("(none)");
2027 	}
2028 
2029 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2030 }
2031 
2032 const char *
2033 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2034 {
2035 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2036 	return (buf);
2037 }
2038 
2039 static void
2040 lacp_dump_lacpdu(const struct lacpdu *du)
2041 {
2042 	char buf[LACP_PARTNERSTR_MAX+1];
2043 	char buf2[LACP_STATESTR_MAX+1];
2044 
2045 	printf("actor=%s\n",
2046 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2047 	printf("actor.state=%s\n",
2048 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2049 	printf("partner=%s\n",
2050 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2051 	printf("partner.state=%s\n",
2052 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2053 
2054 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2055 }
2056 
2057 static void
2058 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2059 {
2060 	va_list va;
2061 
2062 	if (lp) {
2063 		printf("%s: ", lp->lp_ifp->if_xname);
2064 	}
2065 
2066 	va_start(va, fmt);
2067 	vprintf(fmt, va);
2068 	va_end(va);
2069 }
2070