xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c)2005 YAMAMOTO Takashi,
7  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_ratelimit.h"
36 
37 #include <sys/param.h>
38 #include <sys/callout.h>
39 #include <sys/eventhandler.h>
40 #include <sys/mbuf.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h> /* hz */
44 #include <sys/socket.h> /* for net/if.h */
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <machine/stdarg.h>
48 #include <sys/lock.h>
49 #include <sys/rwlock.h>
50 #include <sys/taskqueue.h>
51 
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_dl.h>
55 #include <net/ethernet.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 
59 #include <net/if_lagg.h>
60 #include <net/ieee8023ad_lacp.h>
61 
62 /*
63  * actor system priority and port priority.
64  * XXX should be configurable.
65  */
66 
67 #define	LACP_SYSTEM_PRIO	0x8000
68 #define	LACP_PORT_PRIO		0x8000
69 
70 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
71     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
72 
73 static const struct tlv_template lacp_info_tlv_template[] = {
74 	{ LACP_TYPE_ACTORINFO,
75 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
76 	{ LACP_TYPE_PARTNERINFO,
77 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
78 	{ LACP_TYPE_COLLECTORINFO,
79 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
80 	{ 0, 0 },
81 };
82 
83 static const struct tlv_template marker_info_tlv_template[] = {
84 	{ MARKER_TYPE_INFO,
85 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
86 	{ 0, 0 },
87 };
88 
89 static const struct tlv_template marker_response_tlv_template[] = {
90 	{ MARKER_TYPE_RESPONSE,
91 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
92 	{ 0, 0 },
93 };
94 
95 typedef void (*lacp_timer_func_t)(struct lacp_port *);
96 
97 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
98 static void	lacp_fill_markerinfo(struct lacp_port *,
99 		    struct lacp_markerinfo *);
100 
101 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
102 static void	lacp_suppress_distributing(struct lacp_softc *,
103 		    struct lacp_aggregator *);
104 static void	lacp_transit_expire(void *);
105 static void	lacp_update_portmap(struct lacp_softc *);
106 static void	lacp_select_active_aggregator(struct lacp_softc *);
107 static uint16_t	lacp_compose_key(struct lacp_port *);
108 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
109 		    const struct tlv_template *, boolean_t);
110 static void	lacp_tick(void *);
111 
112 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
113 		    const struct lacp_port *);
114 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
115 		    const struct lacp_peerinfo *);
116 static int	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
117 		    const struct lacp_port *);
118 static int	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
119 		    const struct lacp_peerinfo *);
120 
121 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
122 		    struct lacp_port *);
123 static void	lacp_aggregator_addref(struct lacp_softc *,
124 		    struct lacp_aggregator *);
125 static void	lacp_aggregator_delref(struct lacp_softc *,
126 		    struct lacp_aggregator *);
127 
128 /* receive machine */
129 
130 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
131 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
132 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
133 static void	lacp_sm_rx_timer(struct lacp_port *);
134 static void	lacp_sm_rx_set_expired(struct lacp_port *);
135 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
136 		    const struct lacpdu *);
137 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
138 		    const struct lacpdu *);
139 static void	lacp_sm_rx_update_selected(struct lacp_port *,
140 		    const struct lacpdu *);
141 static void	lacp_sm_rx_record_default(struct lacp_port *);
142 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
143 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
144 		    const struct lacp_peerinfo *);
145 
146 /* mux machine */
147 
148 static void	lacp_sm_mux(struct lacp_port *);
149 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
150 static void	lacp_sm_mux_timer(struct lacp_port *);
151 
152 /* periodic transmit machine */
153 
154 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
155 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
156 static void	lacp_sm_ptx_timer(struct lacp_port *);
157 
158 /* transmit machine */
159 
160 static void	lacp_sm_tx(struct lacp_port *);
161 static void	lacp_sm_assert_ntt(struct lacp_port *);
162 
163 static void	lacp_run_timers(struct lacp_port *);
164 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
165 		    const struct lacp_peerinfo *);
166 static int	lacp_compare_systemid(const struct lacp_systemid *,
167 		    const struct lacp_systemid *);
168 static void	lacp_port_enable(struct lacp_port *);
169 static void	lacp_port_disable(struct lacp_port *);
170 static void	lacp_select(struct lacp_port *);
171 static void	lacp_unselect(struct lacp_port *);
172 static void	lacp_disable_collecting(struct lacp_port *);
173 static void	lacp_enable_collecting(struct lacp_port *);
174 static void	lacp_disable_distributing(struct lacp_port *);
175 static void	lacp_enable_distributing(struct lacp_port *);
176 static int	lacp_xmit_lacpdu(struct lacp_port *);
177 static int	lacp_xmit_marker(struct lacp_port *);
178 
179 /* Debugging */
180 
181 static void	lacp_dump_lacpdu(const struct lacpdu *);
182 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
183 		    size_t);
184 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
185 		    const struct lacp_peerinfo *, char *, size_t);
186 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
187 		    char *, size_t);
188 static const char *lacp_format_state(uint8_t, char *, size_t);
189 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
190 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
191 		    size_t);
192 static const char *lacp_format_portid(const struct lacp_portid *, char *,
193 		    size_t);
194 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
195 		    __attribute__((__format__(__printf__, 2, 3)));
196 
197 VNET_DEFINE_STATIC(int, lacp_debug);
198 #define	V_lacp_debug	VNET(lacp_debug)
199 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad");
200 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
201     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
202 
203 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1;
204 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode,
205     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0,
206     "LACP strict protocol compliance default");
207 
208 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
209 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
210 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
211 
212 /*
213  * partner administration variables.
214  * XXX should be configurable.
215  */
216 
217 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
218 	.lip_systemid = { .lsi_prio = 0xffff },
219 	.lip_portid = { .lpi_prio = 0xffff },
220 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
221 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
222 };
223 
224 static const struct lacp_peerinfo lacp_partner_admin_strict = {
225 	.lip_systemid = { .lsi_prio = 0xffff },
226 	.lip_portid = { .lpi_prio = 0xffff },
227 	.lip_state = 0,
228 };
229 
230 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
231 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
232 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
233 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
234 };
235 
236 struct mbuf *
237 lacp_input(struct lagg_port *lgp, struct mbuf *m)
238 {
239 	struct lacp_port *lp = LACP_PORT(lgp);
240 	uint8_t subtype;
241 
242 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
243 		m_freem(m);
244 		return (NULL);
245 	}
246 
247 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
248 	switch (subtype) {
249 		case SLOWPROTOCOLS_SUBTYPE_LACP:
250 			lacp_pdu_input(lp, m);
251 			return (NULL);
252 
253 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
254 			lacp_marker_input(lp, m);
255 			return (NULL);
256 	}
257 
258 	/* Not a subtype we are interested in */
259 	return (m);
260 }
261 
262 /*
263  * lacp_pdu_input: process lacpdu
264  */
265 static int
266 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
267 {
268 	struct lacp_softc *lsc = lp->lp_lsc;
269 	struct lacpdu *du;
270 	int error = 0;
271 
272 	if (m->m_pkthdr.len != sizeof(*du)) {
273 		goto bad;
274 	}
275 
276 	if ((m->m_flags & M_MCAST) == 0) {
277 		goto bad;
278 	}
279 
280 	if (m->m_len < sizeof(*du)) {
281 		m = m_pullup(m, sizeof(*du));
282 		if (m == NULL) {
283 			return (ENOMEM);
284 		}
285 	}
286 
287 	du = mtod(m, struct lacpdu *);
288 
289 	if (memcmp(&du->ldu_eh.ether_dhost,
290 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
291 		goto bad;
292 	}
293 
294 	/*
295 	 * ignore the version for compatibility with
296 	 * the future protocol revisions.
297 	 */
298 #if 0
299 	if (du->ldu_sph.sph_version != 1) {
300 		goto bad;
301 	}
302 #endif
303 
304 	/*
305 	 * ignore tlv types for compatibility with
306 	 * the future protocol revisions.
307 	 */
308 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
309 	    lacp_info_tlv_template, FALSE)) {
310 		goto bad;
311 	}
312 
313         if (V_lacp_debug > 0) {
314 		lacp_dprintf(lp, "lacpdu receive\n");
315 		lacp_dump_lacpdu(du);
316 	}
317 
318 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
319 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
320 		goto bad;
321 	}
322 
323 	LACP_LOCK(lsc);
324 	lacp_sm_rx(lp, du);
325 	LACP_UNLOCK(lsc);
326 
327 	m_freem(m);
328 	return (error);
329 
330 bad:
331 	m_freem(m);
332 	return (EINVAL);
333 }
334 
335 static void
336 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
337 {
338 	struct lagg_port *lgp = lp->lp_lagg;
339 	struct lagg_softc *sc = lgp->lp_softc;
340 
341 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
342 	memcpy(&info->lip_systemid.lsi_mac,
343 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
344 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
345 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
346 	info->lip_state = lp->lp_state;
347 }
348 
349 static void
350 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
351 {
352 	struct ifnet *ifp = lp->lp_ifp;
353 
354 	/* Fill in the port index and system id (encoded as the MAC) */
355 	info->mi_rq_port = htons(ifp->if_index);
356 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
357 	info->mi_rq_xid = htonl(0);
358 }
359 
360 static int
361 lacp_xmit_lacpdu(struct lacp_port *lp)
362 {
363 	struct lagg_port *lgp = lp->lp_lagg;
364 	struct mbuf *m;
365 	struct lacpdu *du;
366 	int error;
367 
368 	LACP_LOCK_ASSERT(lp->lp_lsc);
369 
370 	m = m_gethdr(M_NOWAIT, MT_DATA);
371 	if (m == NULL) {
372 		return (ENOMEM);
373 	}
374 	m->m_len = m->m_pkthdr.len = sizeof(*du);
375 
376 	du = mtod(m, struct lacpdu *);
377 	memset(du, 0, sizeof(*du));
378 
379 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
380 	    ETHER_ADDR_LEN);
381 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
382 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
383 
384 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
385 	du->ldu_sph.sph_version = 1;
386 
387 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
388 	du->ldu_actor = lp->lp_actor;
389 
390 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
391 	    sizeof(du->ldu_partner));
392 	du->ldu_partner = lp->lp_partner;
393 
394 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
395 	    sizeof(du->ldu_collector));
396 	du->ldu_collector.lci_maxdelay = 0;
397 
398 	if (V_lacp_debug > 0) {
399 		lacp_dprintf(lp, "lacpdu transmit\n");
400 		lacp_dump_lacpdu(du);
401 	}
402 
403 	m->m_flags |= M_MCAST;
404 
405 	/*
406 	 * XXX should use higher priority queue.
407 	 * otherwise network congestion can break aggregation.
408 	 */
409 
410 	error = lagg_enqueue(lp->lp_ifp, m);
411 	return (error);
412 }
413 
414 static int
415 lacp_xmit_marker(struct lacp_port *lp)
416 {
417 	struct lagg_port *lgp = lp->lp_lagg;
418 	struct mbuf *m;
419 	struct markerdu *mdu;
420 	int error;
421 
422 	LACP_LOCK_ASSERT(lp->lp_lsc);
423 
424 	m = m_gethdr(M_NOWAIT, MT_DATA);
425 	if (m == NULL) {
426 		return (ENOMEM);
427 	}
428 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
429 
430 	mdu = mtod(m, struct markerdu *);
431 	memset(mdu, 0, sizeof(*mdu));
432 
433 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
434 	    ETHER_ADDR_LEN);
435 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
436 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
437 
438 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
439 	mdu->mdu_sph.sph_version = 1;
440 
441 	/* Bump the transaction id and copy over the marker info */
442 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
443 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
444 	mdu->mdu_info = lp->lp_marker;
445 
446 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
447 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
448 	    ntohl(mdu->mdu_info.mi_rq_xid)));
449 
450 	m->m_flags |= M_MCAST;
451 	error = lagg_enqueue(lp->lp_ifp, m);
452 	return (error);
453 }
454 
455 void
456 lacp_linkstate(struct lagg_port *lgp)
457 {
458 	struct lacp_port *lp = LACP_PORT(lgp);
459 	struct lacp_softc *lsc = lp->lp_lsc;
460 	struct ifnet *ifp = lgp->lp_ifp;
461 	struct ifmediareq ifmr;
462 	int error = 0;
463 	u_int media;
464 	uint8_t old_state;
465 	uint16_t old_key;
466 
467 	bzero((char *)&ifmr, sizeof(ifmr));
468 	error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr);
469 	if (error != 0) {
470 		bzero((char *)&ifmr, sizeof(ifmr));
471 		error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
472 	}
473 	if (error != 0)
474 		return;
475 
476 	LACP_LOCK(lsc);
477 	media = ifmr.ifm_active;
478 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
479 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
480 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
481 	old_state = lp->lp_state;
482 	old_key = lp->lp_key;
483 
484 	lp->lp_media = media;
485 	/*
486 	 * If the port is not an active full duplex Ethernet link then it can
487 	 * not be aggregated.
488 	 */
489 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
490 	    ifp->if_link_state != LINK_STATE_UP) {
491 		lacp_port_disable(lp);
492 	} else {
493 		lacp_port_enable(lp);
494 	}
495 	lp->lp_key = lacp_compose_key(lp);
496 
497 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
498 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
499 		lp->lp_selected = LACP_UNSELECTED;
500 	}
501 	LACP_UNLOCK(lsc);
502 }
503 
504 static void
505 lacp_tick(void *arg)
506 {
507 	struct lacp_softc *lsc = arg;
508 	struct lacp_port *lp;
509 
510 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
511 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
512 			continue;
513 
514 		CURVNET_SET(lp->lp_ifp->if_vnet);
515 		lacp_run_timers(lp);
516 
517 		lacp_select(lp);
518 		lacp_sm_mux(lp);
519 		lacp_sm_tx(lp);
520 		lacp_sm_ptx_tx_schedule(lp);
521 		CURVNET_RESTORE();
522 	}
523 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
524 }
525 
526 int
527 lacp_port_create(struct lagg_port *lgp)
528 {
529 	struct lagg_softc *sc = lgp->lp_softc;
530 	struct lacp_softc *lsc = LACP_SOFTC(sc);
531 	struct lacp_port *lp;
532 	struct ifnet *ifp = lgp->lp_ifp;
533 	struct sockaddr_dl sdl;
534 	struct ifmultiaddr *rifma = NULL;
535 	int error;
536 
537 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
538 	sdl.sdl_alen = ETHER_ADDR_LEN;
539 
540 	bcopy(&ethermulticastaddr_slowprotocols,
541 	    LLADDR(&sdl), ETHER_ADDR_LEN);
542 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
543 	if (error) {
544 		printf("%s: ADDMULTI failed on %s\n", __func__,
545 		    lgp->lp_ifp->if_xname);
546 		return (error);
547 	}
548 
549 	lp = malloc(sizeof(struct lacp_port),
550 	    M_DEVBUF, M_NOWAIT|M_ZERO);
551 	if (lp == NULL)
552 		return (ENOMEM);
553 
554 	LACP_LOCK(lsc);
555 	lgp->lp_psc = lp;
556 	lp->lp_ifp = ifp;
557 	lp->lp_lagg = lgp;
558 	lp->lp_lsc = lsc;
559 	lp->lp_ifma = rifma;
560 
561 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
562 
563 	lacp_fill_actorinfo(lp, &lp->lp_actor);
564 	lacp_fill_markerinfo(lp, &lp->lp_marker);
565 	lp->lp_state = LACP_STATE_ACTIVITY;
566 	lp->lp_aggregator = NULL;
567 	lacp_sm_rx_set_expired(lp);
568 	LACP_UNLOCK(lsc);
569 	lacp_linkstate(lgp);
570 
571 	return (0);
572 }
573 
574 void
575 lacp_port_destroy(struct lagg_port *lgp)
576 {
577 	struct lacp_port *lp = LACP_PORT(lgp);
578 	struct lacp_softc *lsc = lp->lp_lsc;
579 	int i;
580 
581 	LACP_LOCK(lsc);
582 	for (i = 0; i < LACP_NTIMER; i++) {
583 		LACP_TIMER_DISARM(lp, i);
584 	}
585 
586 	lacp_disable_collecting(lp);
587 	lacp_disable_distributing(lp);
588 	lacp_unselect(lp);
589 
590 	LIST_REMOVE(lp, lp_next);
591 	LACP_UNLOCK(lsc);
592 
593 	/* The address may have already been removed by if_purgemaddrs() */
594 	if (!lgp->lp_detaching)
595 		if_delmulti_ifma(lp->lp_ifma);
596 
597 	free(lp, M_DEVBUF);
598 }
599 
600 void
601 lacp_req(struct lagg_softc *sc, void *data)
602 {
603 	struct lacp_opreq *req = (struct lacp_opreq *)data;
604 	struct lacp_softc *lsc = LACP_SOFTC(sc);
605 	struct lacp_aggregator *la;
606 
607 	bzero(req, sizeof(struct lacp_opreq));
608 
609 	/*
610 	 * If the LACP softc is NULL, return with the opreq structure full of
611 	 * zeros.  It is normal for the softc to be NULL while the lagg is
612 	 * being destroyed.
613 	 */
614 	if (NULL == lsc)
615 		return;
616 
617 	la = lsc->lsc_active_aggregator;
618 	LACP_LOCK(lsc);
619 	if (la != NULL) {
620 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
621 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
622 		    ETHER_ADDR_LEN);
623 		req->actor_key = ntohs(la->la_actor.lip_key);
624 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
625 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
626 		req->actor_state = la->la_actor.lip_state;
627 
628 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
629 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
630 		    ETHER_ADDR_LEN);
631 		req->partner_key = ntohs(la->la_partner.lip_key);
632 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
633 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
634 		req->partner_state = la->la_partner.lip_state;
635 	}
636 	LACP_UNLOCK(lsc);
637 }
638 
639 void
640 lacp_portreq(struct lagg_port *lgp, void *data)
641 {
642 	struct lacp_opreq *req = (struct lacp_opreq *)data;
643 	struct lacp_port *lp = LACP_PORT(lgp);
644 	struct lacp_softc *lsc = lp->lp_lsc;
645 
646 	LACP_LOCK(lsc);
647 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
648 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
649 	    ETHER_ADDR_LEN);
650 	req->actor_key = ntohs(lp->lp_actor.lip_key);
651 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
652 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
653 	req->actor_state = lp->lp_actor.lip_state;
654 
655 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
656 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
657 	    ETHER_ADDR_LEN);
658 	req->partner_key = ntohs(lp->lp_partner.lip_key);
659 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
660 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
661 	req->partner_state = lp->lp_partner.lip_state;
662 	LACP_UNLOCK(lsc);
663 }
664 
665 static void
666 lacp_disable_collecting(struct lacp_port *lp)
667 {
668 	LACP_DPRINTF((lp, "collecting disabled\n"));
669 	lp->lp_state &= ~LACP_STATE_COLLECTING;
670 }
671 
672 static void
673 lacp_enable_collecting(struct lacp_port *lp)
674 {
675 	LACP_DPRINTF((lp, "collecting enabled\n"));
676 	lp->lp_state |= LACP_STATE_COLLECTING;
677 }
678 
679 static void
680 lacp_disable_distributing(struct lacp_port *lp)
681 {
682 	struct lacp_aggregator *la = lp->lp_aggregator;
683 	struct lacp_softc *lsc = lp->lp_lsc;
684 	struct lagg_softc *sc = lsc->lsc_softc;
685 	char buf[LACP_LAGIDSTR_MAX+1];
686 
687 	LACP_LOCK_ASSERT(lsc);
688 
689 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
690 		return;
691 	}
692 
693 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
694 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
695 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
696 
697 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
698 	    "nports %d -> %d\n",
699 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
700 	    la->la_nports, la->la_nports - 1));
701 
702 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
703 	la->la_nports--;
704 	sc->sc_active = la->la_nports;
705 
706 	if (lsc->lsc_active_aggregator == la) {
707 		lacp_suppress_distributing(lsc, la);
708 		lacp_select_active_aggregator(lsc);
709 		/* regenerate the port map, the active aggregator has changed */
710 		lacp_update_portmap(lsc);
711 	}
712 
713 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
714 	if_link_state_change(sc->sc_ifp,
715 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
716 }
717 
718 static void
719 lacp_enable_distributing(struct lacp_port *lp)
720 {
721 	struct lacp_aggregator *la = lp->lp_aggregator;
722 	struct lacp_softc *lsc = lp->lp_lsc;
723 	struct lagg_softc *sc = lsc->lsc_softc;
724 	char buf[LACP_LAGIDSTR_MAX+1];
725 
726 	LACP_LOCK_ASSERT(lsc);
727 
728 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
729 		return;
730 	}
731 
732 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
733 	    "nports %d -> %d\n",
734 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
735 	    la->la_nports, la->la_nports + 1));
736 
737 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
738 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
739 	la->la_nports++;
740 	sc->sc_active = la->la_nports;
741 
742 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
743 
744 	if (lsc->lsc_active_aggregator == la) {
745 		lacp_suppress_distributing(lsc, la);
746 		lacp_update_portmap(lsc);
747 	} else
748 		/* try to become the active aggregator */
749 		lacp_select_active_aggregator(lsc);
750 
751 	if_link_state_change(sc->sc_ifp,
752 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
753 }
754 
755 static void
756 lacp_transit_expire(void *vp)
757 {
758 	struct lacp_softc *lsc = vp;
759 
760 	LACP_LOCK_ASSERT(lsc);
761 
762 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
763 	LACP_TRACE(NULL);
764 	CURVNET_RESTORE();
765 
766 	lsc->lsc_suppress_distributing = FALSE;
767 }
768 
769 void
770 lacp_attach(struct lagg_softc *sc)
771 {
772 	struct lacp_softc *lsc;
773 
774 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
775 
776 	sc->sc_psc = lsc;
777 	lsc->lsc_softc = sc;
778 
779 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
780 	lsc->lsc_active_aggregator = NULL;
781 	lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
782 	LACP_LOCK_INIT(lsc);
783 	TAILQ_INIT(&lsc->lsc_aggregators);
784 	LIST_INIT(&lsc->lsc_ports);
785 
786 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
787 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
788 
789 	/* if the lagg is already up then do the same */
790 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
791 		lacp_init(sc);
792 }
793 
794 void
795 lacp_detach(void *psc)
796 {
797 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
798 
799 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
800 	    ("aggregators still active"));
801 	KASSERT(lsc->lsc_active_aggregator == NULL,
802 	    ("aggregator still attached"));
803 
804 	callout_drain(&lsc->lsc_transit_callout);
805 	callout_drain(&lsc->lsc_callout);
806 
807 	LACP_LOCK_DESTROY(lsc);
808 	free(lsc, M_DEVBUF);
809 }
810 
811 void
812 lacp_init(struct lagg_softc *sc)
813 {
814 	struct lacp_softc *lsc = LACP_SOFTC(sc);
815 
816 	LACP_LOCK(lsc);
817 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
818 	LACP_UNLOCK(lsc);
819 }
820 
821 void
822 lacp_stop(struct lagg_softc *sc)
823 {
824 	struct lacp_softc *lsc = LACP_SOFTC(sc);
825 
826 	LACP_LOCK(lsc);
827 	callout_stop(&lsc->lsc_transit_callout);
828 	callout_stop(&lsc->lsc_callout);
829 	LACP_UNLOCK(lsc);
830 }
831 
832 struct lagg_port *
833 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
834 {
835 	struct lacp_softc *lsc = LACP_SOFTC(sc);
836 	struct lacp_portmap *pm;
837 	struct lacp_port *lp;
838 	struct lacp_port **map;
839 	uint32_t hash;
840 	int count;
841 
842 	if (__predict_false(lsc->lsc_suppress_distributing)) {
843 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
844 		return (NULL);
845 	}
846 
847 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
848 	if (pm->pm_count == 0) {
849 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
850 		return (NULL);
851 	}
852 
853 #ifdef NUMA
854 	if ((sc->sc_opts & LAGG_OPT_USE_NUMA) &&
855 	    pm->pm_num_dom > 1 && m->m_pkthdr.numa_domain < MAXMEMDOM) {
856 		count = pm->pm_numa[m->m_pkthdr.numa_domain].count;
857 		if (count > 0) {
858 			map = pm->pm_numa[m->m_pkthdr.numa_domain].map;
859 		} else {
860 			/* No ports on this domain; use global hash. */
861 			map = pm->pm_map;
862 			count = pm->pm_count;
863 		}
864 	} else
865 #endif
866 	{
867 		map = pm->pm_map;
868 		count = pm->pm_count;
869 	}
870 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
871 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
872 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
873 	else
874 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
875 
876 	hash %= count;
877 	lp = map[hash];
878 
879 	KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
880 	    ("aggregated port is not distributing"));
881 
882 	return (lp->lp_lagg);
883 }
884 
885 #ifdef RATELIMIT
886 struct lagg_port *
887 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid)
888 {
889 	struct lacp_softc *lsc = LACP_SOFTC(sc);
890 	struct lacp_portmap *pm;
891 	struct lacp_port *lp;
892 	uint32_t hash;
893 
894 	if (__predict_false(lsc->lsc_suppress_distributing)) {
895 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
896 		return (NULL);
897 	}
898 
899 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
900 	if (pm->pm_count == 0) {
901 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
902 		return (NULL);
903 	}
904 
905 	hash = flowid >> sc->flowid_shift;
906 	hash %= pm->pm_count;
907 	lp = pm->pm_map[hash];
908 
909 	return (lp->lp_lagg);
910 }
911 #endif
912 
913 /*
914  * lacp_suppress_distributing: drop transmit packets for a while
915  * to preserve packet ordering.
916  */
917 
918 static void
919 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
920 {
921 	struct lacp_port *lp;
922 
923 	if (lsc->lsc_active_aggregator != la) {
924 		return;
925 	}
926 
927 	LACP_TRACE(NULL);
928 
929 	lsc->lsc_suppress_distributing = TRUE;
930 
931 	/* send a marker frame down each port to verify the queues are empty */
932 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
933 		lp->lp_flags |= LACP_PORT_MARK;
934 		lacp_xmit_marker(lp);
935 	}
936 
937 	/* set a timeout for the marker frames */
938 	callout_reset(&lsc->lsc_transit_callout,
939 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
940 }
941 
942 static int
943 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
944     const struct lacp_peerinfo *b)
945 {
946 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
947 }
948 
949 static int
950 lacp_compare_systemid(const struct lacp_systemid *a,
951     const struct lacp_systemid *b)
952 {
953 	return (memcmp(a, b, sizeof(*a)));
954 }
955 
956 #if 0	/* unused */
957 static int
958 lacp_compare_portid(const struct lacp_portid *a,
959     const struct lacp_portid *b)
960 {
961 	return (memcmp(a, b, sizeof(*a)));
962 }
963 #endif
964 
965 static uint64_t
966 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
967 {
968 	struct lacp_port *lp;
969 	uint64_t speed;
970 
971 	lp = TAILQ_FIRST(&la->la_ports);
972 	if (lp == NULL) {
973 		return (0);
974 	}
975 
976 	speed = ifmedia_baudrate(lp->lp_media);
977 	speed *= la->la_nports;
978 	if (speed == 0) {
979 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
980 		    lp->lp_media, la->la_nports));
981 	}
982 
983 	return (speed);
984 }
985 
986 /*
987  * lacp_select_active_aggregator: select an aggregator to be used to transmit
988  * packets from lagg(4) interface.
989  */
990 
991 static void
992 lacp_select_active_aggregator(struct lacp_softc *lsc)
993 {
994 	struct lacp_aggregator *la;
995 	struct lacp_aggregator *best_la = NULL;
996 	uint64_t best_speed = 0;
997 	char buf[LACP_LAGIDSTR_MAX+1];
998 
999 	LACP_TRACE(NULL);
1000 
1001 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1002 		uint64_t speed;
1003 
1004 		if (la->la_nports == 0) {
1005 			continue;
1006 		}
1007 
1008 		speed = lacp_aggregator_bandwidth(la);
1009 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
1010 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
1011 		    speed, la->la_nports));
1012 
1013 		/*
1014 		 * This aggregator is chosen if the partner has a better
1015 		 * system priority or, the total aggregated speed is higher
1016 		 * or, it is already the chosen aggregator
1017 		 */
1018 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
1019 		    LACP_SYS_PRI(best_la->la_partner)) ||
1020 		    speed > best_speed ||
1021 		    (speed == best_speed &&
1022 		    la == lsc->lsc_active_aggregator)) {
1023 			best_la = la;
1024 			best_speed = speed;
1025 		}
1026 	}
1027 
1028 	KASSERT(best_la == NULL || best_la->la_nports > 0,
1029 	    ("invalid aggregator refcnt"));
1030 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1031 	    ("invalid aggregator list"));
1032 
1033 	if (lsc->lsc_active_aggregator != best_la) {
1034 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
1035 		LACP_DPRINTF((NULL, "old %s\n",
1036 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1037 		    buf, sizeof(buf))));
1038 	} else {
1039 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1040 	}
1041 	LACP_DPRINTF((NULL, "new %s\n",
1042 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1043 
1044 	if (lsc->lsc_active_aggregator != best_la) {
1045 		lsc->lsc_active_aggregator = best_la;
1046 		lacp_update_portmap(lsc);
1047 		if (best_la) {
1048 			lacp_suppress_distributing(lsc, best_la);
1049 		}
1050 	}
1051 }
1052 
1053 /*
1054  * Updated the inactive portmap array with the new list of ports and
1055  * make it live.
1056  */
1057 static void
1058 lacp_update_portmap(struct lacp_softc *lsc)
1059 {
1060 	struct lagg_softc *sc = lsc->lsc_softc;
1061 	struct lacp_aggregator *la;
1062 	struct lacp_portmap *p;
1063 	struct lacp_port *lp;
1064 	uint64_t speed;
1065 	u_int newmap;
1066 	int i;
1067 #ifdef NUMA
1068 	int count;
1069 	uint8_t domain;
1070 #endif
1071 
1072 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1073 	p = &lsc->lsc_pmap[newmap];
1074 	la = lsc->lsc_active_aggregator;
1075 	speed = 0;
1076 	bzero(p, sizeof(struct lacp_portmap));
1077 
1078 	if (la != NULL && la->la_nports > 0) {
1079 		p->pm_count = la->la_nports;
1080 		i = 0;
1081 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) {
1082 			p->pm_map[i++] = lp;
1083 #ifdef NUMA
1084 			domain = lp->lp_ifp->if_numa_domain;
1085 			if (domain >= MAXMEMDOM)
1086 				continue;
1087 			count = p->pm_numa[domain].count;
1088 			p->pm_numa[domain].map[count] = lp;
1089 			p->pm_numa[domain].count++;
1090 #endif
1091 		}
1092 		KASSERT(i == p->pm_count, ("Invalid port count"));
1093 
1094 #ifdef NUMA
1095 		for (i = 0; i < MAXMEMDOM; i++) {
1096 			if (p->pm_numa[i].count != 0)
1097 				p->pm_num_dom++;
1098 		}
1099 #endif
1100 		speed = lacp_aggregator_bandwidth(la);
1101 	}
1102 	sc->sc_ifp->if_baudrate = speed;
1103 
1104 	/* switch the active portmap over */
1105 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1106 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1107 		    lsc->lsc_activemap,
1108 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1109 }
1110 
1111 static uint16_t
1112 lacp_compose_key(struct lacp_port *lp)
1113 {
1114 	struct lagg_port *lgp = lp->lp_lagg;
1115 	struct lagg_softc *sc = lgp->lp_softc;
1116 	u_int media = lp->lp_media;
1117 	uint16_t key;
1118 
1119 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1120 
1121 		/*
1122 		 * non-aggregatable links should have unique keys.
1123 		 *
1124 		 * XXX this isn't really unique as if_index is 16 bit.
1125 		 */
1126 
1127 		/* bit 0..14:	(some bits of) if_index of this port */
1128 		key = lp->lp_ifp->if_index;
1129 		/* bit 15:	1 */
1130 		key |= 0x8000;
1131 	} else {
1132 		u_int subtype = IFM_SUBTYPE(media);
1133 
1134 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1135 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1136 
1137 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1138 		switch (subtype) {
1139 		case IFM_10_T:
1140 		case IFM_10_2:
1141 		case IFM_10_5:
1142 		case IFM_10_STP:
1143 		case IFM_10_FL:
1144 			key = IFM_10_T;
1145 			break;
1146 		case IFM_100_TX:
1147 		case IFM_100_FX:
1148 		case IFM_100_T4:
1149 		case IFM_100_VG:
1150 		case IFM_100_T2:
1151 		case IFM_100_T:
1152 		case IFM_100_SGMII:
1153 			key = IFM_100_TX;
1154 			break;
1155 		case IFM_1000_SX:
1156 		case IFM_1000_LX:
1157 		case IFM_1000_CX:
1158 		case IFM_1000_T:
1159 		case IFM_1000_KX:
1160 		case IFM_1000_SGMII:
1161 		case IFM_1000_CX_SGMII:
1162 			key = IFM_1000_SX;
1163 			break;
1164 		case IFM_10G_LR:
1165 		case IFM_10G_SR:
1166 		case IFM_10G_CX4:
1167 		case IFM_10G_TWINAX:
1168 		case IFM_10G_TWINAX_LONG:
1169 		case IFM_10G_LRM:
1170 		case IFM_10G_T:
1171 		case IFM_10G_KX4:
1172 		case IFM_10G_KR:
1173 		case IFM_10G_CR1:
1174 		case IFM_10G_ER:
1175 		case IFM_10G_SFI:
1176 		case IFM_10G_AOC:
1177 			key = IFM_10G_LR;
1178 			break;
1179 		case IFM_20G_KR2:
1180 			key = IFM_20G_KR2;
1181 			break;
1182 		case IFM_2500_KX:
1183 		case IFM_2500_T:
1184 		case IFM_2500_X:
1185 			key = IFM_2500_KX;
1186 			break;
1187 		case IFM_5000_T:
1188 		case IFM_5000_KR:
1189 		case IFM_5000_KR_S:
1190 		case IFM_5000_KR1:
1191 			key = IFM_5000_T;
1192 			break;
1193 		case IFM_50G_PCIE:
1194 		case IFM_50G_CR2:
1195 		case IFM_50G_KR2:
1196 		case IFM_50G_SR2:
1197 		case IFM_50G_LR2:
1198 		case IFM_50G_LAUI2_AC:
1199 		case IFM_50G_LAUI2:
1200 		case IFM_50G_AUI2_AC:
1201 		case IFM_50G_AUI2:
1202 		case IFM_50G_CP:
1203 		case IFM_50G_SR:
1204 		case IFM_50G_LR:
1205 		case IFM_50G_FR:
1206 		case IFM_50G_KR_PAM4:
1207 		case IFM_50G_AUI1_AC:
1208 		case IFM_50G_AUI1:
1209 			key = IFM_50G_PCIE;
1210 			break;
1211 		case IFM_56G_R4:
1212 			key = IFM_56G_R4;
1213 			break;
1214 		case IFM_25G_PCIE:
1215 		case IFM_25G_CR:
1216 		case IFM_25G_KR:
1217 		case IFM_25G_SR:
1218 		case IFM_25G_LR:
1219 		case IFM_25G_ACC:
1220 		case IFM_25G_AOC:
1221 		case IFM_25G_T:
1222 		case IFM_25G_CR_S:
1223 		case IFM_25G_CR1:
1224 		case IFM_25G_KR_S:
1225 		case IFM_25G_AUI:
1226 		case IFM_25G_KR1:
1227 			key = IFM_25G_PCIE;
1228 			break;
1229 		case IFM_40G_CR4:
1230 		case IFM_40G_SR4:
1231 		case IFM_40G_LR4:
1232 		case IFM_40G_XLPPI:
1233 		case IFM_40G_KR4:
1234 		case IFM_40G_XLAUI:
1235 		case IFM_40G_XLAUI_AC:
1236 		case IFM_40G_ER4:
1237 			key = IFM_40G_CR4;
1238 			break;
1239 		case IFM_100G_CR4:
1240 		case IFM_100G_SR4:
1241 		case IFM_100G_KR4:
1242 		case IFM_100G_LR4:
1243 		case IFM_100G_CAUI4_AC:
1244 		case IFM_100G_CAUI4:
1245 		case IFM_100G_AUI4_AC:
1246 		case IFM_100G_AUI4:
1247 		case IFM_100G_CR_PAM4:
1248 		case IFM_100G_KR_PAM4:
1249 		case IFM_100G_CP2:
1250 		case IFM_100G_SR2:
1251 		case IFM_100G_DR:
1252 		case IFM_100G_KR2_PAM4:
1253 		case IFM_100G_CAUI2_AC:
1254 		case IFM_100G_CAUI2:
1255 		case IFM_100G_AUI2_AC:
1256 		case IFM_100G_AUI2:
1257 			key = IFM_100G_CR4;
1258 			break;
1259 		case IFM_200G_CR4_PAM4:
1260 		case IFM_200G_SR4:
1261 		case IFM_200G_FR4:
1262 		case IFM_200G_LR4:
1263 		case IFM_200G_DR4:
1264 		case IFM_200G_KR4_PAM4:
1265 		case IFM_200G_AUI4_AC:
1266 		case IFM_200G_AUI4:
1267 		case IFM_200G_AUI8_AC:
1268 		case IFM_200G_AUI8:
1269 			key = IFM_200G_CR4_PAM4;
1270 			break;
1271 		case IFM_400G_FR8:
1272 		case IFM_400G_LR8:
1273 		case IFM_400G_DR4:
1274 		case IFM_400G_AUI8_AC:
1275 		case IFM_400G_AUI8:
1276 			key = IFM_400G_FR8;
1277 			break;
1278 		default:
1279 			key = subtype;
1280 			break;
1281 		}
1282 		/* bit 5..14:	(some bits of) if_index of lagg device */
1283 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1284 		/* bit 15:	0 */
1285 	}
1286 	return (htons(key));
1287 }
1288 
1289 static void
1290 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1291 {
1292 	char buf[LACP_LAGIDSTR_MAX+1];
1293 
1294 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1295 	    __func__,
1296 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1297 	    buf, sizeof(buf)),
1298 	    la->la_refcnt, la->la_refcnt + 1));
1299 
1300 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1301 	la->la_refcnt++;
1302 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1303 }
1304 
1305 static void
1306 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1307 {
1308 	char buf[LACP_LAGIDSTR_MAX+1];
1309 
1310 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1311 	    __func__,
1312 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1313 	    buf, sizeof(buf)),
1314 	    la->la_refcnt, la->la_refcnt - 1));
1315 
1316 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1317 	la->la_refcnt--;
1318 	if (la->la_refcnt > 0) {
1319 		return;
1320 	}
1321 
1322 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1323 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1324 
1325 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1326 
1327 	free(la, M_DEVBUF);
1328 }
1329 
1330 /*
1331  * lacp_aggregator_get: allocate an aggregator.
1332  */
1333 
1334 static struct lacp_aggregator *
1335 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1336 {
1337 	struct lacp_aggregator *la;
1338 
1339 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1340 	if (la) {
1341 		la->la_refcnt = 1;
1342 		la->la_nports = 0;
1343 		TAILQ_INIT(&la->la_ports);
1344 		la->la_pending = 0;
1345 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1346 	}
1347 
1348 	return (la);
1349 }
1350 
1351 /*
1352  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1353  */
1354 
1355 static void
1356 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1357 {
1358 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1359 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1360 
1361 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1362 }
1363 
1364 static void
1365 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1366     const struct lacp_peerinfo *lpi_port)
1367 {
1368 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1369 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1370 	lpi_aggr->lip_key = lpi_port->lip_key;
1371 }
1372 
1373 /*
1374  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1375  */
1376 
1377 static int
1378 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1379     const struct lacp_port *lp)
1380 {
1381 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1382 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1383 		return (0);
1384 	}
1385 
1386 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1387 		return (0);
1388 	}
1389 
1390 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1391 		return (0);
1392 	}
1393 
1394 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1395 		return (0);
1396 	}
1397 
1398 	return (1);
1399 }
1400 
1401 static int
1402 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1403     const struct lacp_peerinfo *b)
1404 {
1405 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1406 	    sizeof(a->lip_systemid))) {
1407 		return (0);
1408 	}
1409 
1410 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1411 		return (0);
1412 	}
1413 
1414 	return (1);
1415 }
1416 
1417 static void
1418 lacp_port_enable(struct lacp_port *lp)
1419 {
1420 	lp->lp_state |= LACP_STATE_AGGREGATION;
1421 }
1422 
1423 static void
1424 lacp_port_disable(struct lacp_port *lp)
1425 {
1426 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1427 
1428 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1429 	lp->lp_selected = LACP_UNSELECTED;
1430 	lacp_sm_rx_record_default(lp);
1431 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1432 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1433 }
1434 
1435 /*
1436  * lacp_select: select an aggregator.  create one if necessary.
1437  */
1438 static void
1439 lacp_select(struct lacp_port *lp)
1440 {
1441 	struct lacp_softc *lsc = lp->lp_lsc;
1442 	struct lacp_aggregator *la;
1443 	char buf[LACP_LAGIDSTR_MAX+1];
1444 
1445 	if (lp->lp_aggregator) {
1446 		return;
1447 	}
1448 
1449 	/* If we haven't heard from our peer, skip this step. */
1450 	if (lp->lp_state & LACP_STATE_DEFAULTED)
1451 		return;
1452 
1453 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1454 	    ("timer_wait_while still active"));
1455 
1456 	LACP_DPRINTF((lp, "port lagid=%s\n",
1457 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1458 	    buf, sizeof(buf))));
1459 
1460 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1461 		if (lacp_aggregator_is_compatible(la, lp)) {
1462 			break;
1463 		}
1464 	}
1465 
1466 	if (la == NULL) {
1467 		la = lacp_aggregator_get(lsc, lp);
1468 		if (la == NULL) {
1469 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1470 
1471 			/*
1472 			 * will retry on the next tick.
1473 			 */
1474 
1475 			return;
1476 		}
1477 		lacp_fill_aggregator_id(la, lp);
1478 		LACP_DPRINTF((lp, "aggregator created\n"));
1479 	} else {
1480 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1481 		if (la->la_refcnt == LACP_MAX_PORTS)
1482 			return;
1483 		lacp_aggregator_addref(lsc, la);
1484 	}
1485 
1486 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1487 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1488 	    buf, sizeof(buf))));
1489 
1490 	lp->lp_aggregator = la;
1491 	lp->lp_selected = LACP_SELECTED;
1492 }
1493 
1494 /*
1495  * lacp_unselect: finish unselect/detach process.
1496  */
1497 
1498 static void
1499 lacp_unselect(struct lacp_port *lp)
1500 {
1501 	struct lacp_softc *lsc = lp->lp_lsc;
1502 	struct lacp_aggregator *la = lp->lp_aggregator;
1503 
1504 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1505 	    ("timer_wait_while still active"));
1506 
1507 	if (la == NULL) {
1508 		return;
1509 	}
1510 
1511 	lp->lp_aggregator = NULL;
1512 	lacp_aggregator_delref(lsc, la);
1513 }
1514 
1515 /* mux machine */
1516 
1517 static void
1518 lacp_sm_mux(struct lacp_port *lp)
1519 {
1520 	struct lagg_port *lgp = lp->lp_lagg;
1521 	struct lagg_softc *sc = lgp->lp_softc;
1522 	enum lacp_mux_state new_state;
1523 	boolean_t p_sync =
1524 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1525 	boolean_t p_collecting =
1526 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1527 	enum lacp_selected selected = lp->lp_selected;
1528 	struct lacp_aggregator *la;
1529 
1530 	if (V_lacp_debug > 1)
1531 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1532 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1533 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1534 
1535 re_eval:
1536 	la = lp->lp_aggregator;
1537 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1538 	    ("MUX not detached"));
1539 	new_state = lp->lp_mux_state;
1540 	switch (lp->lp_mux_state) {
1541 	case LACP_MUX_DETACHED:
1542 		if (selected != LACP_UNSELECTED) {
1543 			new_state = LACP_MUX_WAITING;
1544 		}
1545 		break;
1546 	case LACP_MUX_WAITING:
1547 		KASSERT(la->la_pending > 0 ||
1548 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1549 		    ("timer_wait_while still active"));
1550 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1551 			new_state = LACP_MUX_ATTACHED;
1552 		} else if (selected == LACP_UNSELECTED) {
1553 			new_state = LACP_MUX_DETACHED;
1554 		}
1555 		break;
1556 	case LACP_MUX_ATTACHED:
1557 		if (selected == LACP_SELECTED && p_sync) {
1558 			new_state = LACP_MUX_COLLECTING;
1559 		} else if (selected != LACP_SELECTED) {
1560 			new_state = LACP_MUX_DETACHED;
1561 		}
1562 		break;
1563 	case LACP_MUX_COLLECTING:
1564 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1565 			new_state = LACP_MUX_DISTRIBUTING;
1566 		} else if (selected != LACP_SELECTED || !p_sync) {
1567 			new_state = LACP_MUX_ATTACHED;
1568 		}
1569 		break;
1570 	case LACP_MUX_DISTRIBUTING:
1571 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1572 			new_state = LACP_MUX_COLLECTING;
1573 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1574 			sc->sc_flapping++;
1575 		}
1576 		break;
1577 	default:
1578 		panic("%s: unknown state", __func__);
1579 	}
1580 
1581 	if (lp->lp_mux_state == new_state) {
1582 		return;
1583 	}
1584 
1585 	lacp_set_mux(lp, new_state);
1586 	goto re_eval;
1587 }
1588 
1589 static void
1590 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1591 {
1592 	struct lacp_aggregator *la = lp->lp_aggregator;
1593 
1594 	if (lp->lp_mux_state == new_state) {
1595 		return;
1596 	}
1597 
1598 	switch (new_state) {
1599 	case LACP_MUX_DETACHED:
1600 		lp->lp_state &= ~LACP_STATE_SYNC;
1601 		lacp_disable_distributing(lp);
1602 		lacp_disable_collecting(lp);
1603 		lacp_sm_assert_ntt(lp);
1604 		/* cancel timer */
1605 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1606 			KASSERT(la->la_pending > 0,
1607 			    ("timer_wait_while not active"));
1608 			la->la_pending--;
1609 		}
1610 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1611 		lacp_unselect(lp);
1612 		break;
1613 	case LACP_MUX_WAITING:
1614 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1615 		    LACP_AGGREGATE_WAIT_TIME);
1616 		la->la_pending++;
1617 		break;
1618 	case LACP_MUX_ATTACHED:
1619 		lp->lp_state |= LACP_STATE_SYNC;
1620 		lacp_disable_collecting(lp);
1621 		lacp_sm_assert_ntt(lp);
1622 		break;
1623 	case LACP_MUX_COLLECTING:
1624 		lacp_enable_collecting(lp);
1625 		lacp_disable_distributing(lp);
1626 		lacp_sm_assert_ntt(lp);
1627 		break;
1628 	case LACP_MUX_DISTRIBUTING:
1629 		lacp_enable_distributing(lp);
1630 		break;
1631 	default:
1632 		panic("%s: unknown state", __func__);
1633 	}
1634 
1635 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1636 
1637 	lp->lp_mux_state = new_state;
1638 }
1639 
1640 static void
1641 lacp_sm_mux_timer(struct lacp_port *lp)
1642 {
1643 	struct lacp_aggregator *la = lp->lp_aggregator;
1644 	char buf[LACP_LAGIDSTR_MAX+1];
1645 
1646 	KASSERT(la->la_pending > 0, ("no pending event"));
1647 
1648 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1649 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1650 	    buf, sizeof(buf)),
1651 	    la->la_pending, la->la_pending - 1));
1652 
1653 	la->la_pending--;
1654 }
1655 
1656 /* periodic transmit machine */
1657 
1658 static void
1659 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1660 {
1661 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1662 	    LACP_STATE_TIMEOUT)) {
1663 		return;
1664 	}
1665 
1666 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1667 
1668 	/*
1669 	 * FAST_PERIODIC -> SLOW_PERIODIC
1670 	 * or
1671 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1672 	 *
1673 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1674 	 */
1675 
1676 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1677 
1678 	/*
1679 	 * if timeout has been shortened, assert NTT.
1680 	 */
1681 
1682 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1683 		lacp_sm_assert_ntt(lp);
1684 	}
1685 }
1686 
1687 static void
1688 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1689 {
1690 	int timeout;
1691 
1692 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1693 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1694 
1695 		/*
1696 		 * NO_PERIODIC
1697 		 */
1698 
1699 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1700 		return;
1701 	}
1702 
1703 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1704 		return;
1705 	}
1706 
1707 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1708 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1709 
1710 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1711 }
1712 
1713 static void
1714 lacp_sm_ptx_timer(struct lacp_port *lp)
1715 {
1716 	lacp_sm_assert_ntt(lp);
1717 }
1718 
1719 static void
1720 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1721 {
1722 	int timeout;
1723 
1724 	/*
1725 	 * check LACP_DISABLED first
1726 	 */
1727 
1728 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1729 		return;
1730 	}
1731 
1732 	/*
1733 	 * check loopback condition.
1734 	 */
1735 
1736 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1737 	    &lp->lp_actor.lip_systemid)) {
1738 		return;
1739 	}
1740 
1741 	/*
1742 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1743 	 */
1744 
1745 	lacp_sm_rx_update_selected(lp, du);
1746 	lacp_sm_rx_update_ntt(lp, du);
1747 	lacp_sm_rx_record_pdu(lp, du);
1748 
1749 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1750 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1751 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1752 
1753 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1754 
1755 	/*
1756 	 * kick transmit machine without waiting the next tick.
1757 	 */
1758 
1759 	lacp_sm_tx(lp);
1760 }
1761 
1762 static void
1763 lacp_sm_rx_set_expired(struct lacp_port *lp)
1764 {
1765 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1766 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1767 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1768 	lp->lp_state |= LACP_STATE_EXPIRED;
1769 }
1770 
1771 static void
1772 lacp_sm_rx_timer(struct lacp_port *lp)
1773 {
1774 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1775 		/* CURRENT -> EXPIRED */
1776 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1777 		lacp_sm_rx_set_expired(lp);
1778 	} else {
1779 		/* EXPIRED -> DEFAULTED */
1780 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1781 		lacp_sm_rx_update_default_selected(lp);
1782 		lacp_sm_rx_record_default(lp);
1783 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1784 	}
1785 }
1786 
1787 static void
1788 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1789 {
1790 	boolean_t active;
1791 	uint8_t oldpstate;
1792 	char buf[LACP_STATESTR_MAX+1];
1793 
1794 	LACP_TRACE(lp);
1795 
1796 	oldpstate = lp->lp_partner.lip_state;
1797 
1798 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1799 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1800 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1801 
1802 	lp->lp_partner = du->ldu_actor;
1803 	if (active &&
1804 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1805 	    LACP_STATE_AGGREGATION) &&
1806 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1807 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1808 		/*
1809 		 * XXX Maintain legacy behavior of leaving the
1810 		 * LACP_STATE_SYNC bit unchanged from the partner's
1811 		 * advertisement if lsc_strict_mode is false.
1812 		 * TODO: We should re-examine the concept of the "strict mode"
1813 		 * to ensure it makes sense to maintain a non-strict mode.
1814 		 */
1815 		if (lp->lp_lsc->lsc_strict_mode)
1816 			lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1817 	} else {
1818 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1819 	}
1820 
1821 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1822 
1823 	if (oldpstate != lp->lp_partner.lip_state) {
1824 		LACP_DPRINTF((lp, "old pstate %s\n",
1825 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1826 		LACP_DPRINTF((lp, "new pstate %s\n",
1827 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1828 		    sizeof(buf))));
1829 	}
1830 
1831 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1832 }
1833 
1834 static void
1835 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1836 {
1837 
1838 	LACP_TRACE(lp);
1839 
1840 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1841 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1842 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1843 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1844 		lacp_sm_assert_ntt(lp);
1845 	}
1846 }
1847 
1848 static void
1849 lacp_sm_rx_record_default(struct lacp_port *lp)
1850 {
1851 	uint8_t oldpstate;
1852 
1853 	LACP_TRACE(lp);
1854 
1855 	oldpstate = lp->lp_partner.lip_state;
1856 	if (lp->lp_lsc->lsc_strict_mode)
1857 		lp->lp_partner = lacp_partner_admin_strict;
1858 	else
1859 		lp->lp_partner = lacp_partner_admin_optimistic;
1860 	lp->lp_state |= LACP_STATE_DEFAULTED;
1861 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1862 }
1863 
1864 static void
1865 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1866     const struct lacp_peerinfo *info)
1867 {
1868 
1869 	LACP_TRACE(lp);
1870 
1871 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1872 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1873 	    LACP_STATE_AGGREGATION)) {
1874 		lp->lp_selected = LACP_UNSELECTED;
1875 		/* mux machine will clean up lp->lp_aggregator */
1876 	}
1877 }
1878 
1879 static void
1880 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1881 {
1882 
1883 	LACP_TRACE(lp);
1884 
1885 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1886 }
1887 
1888 static void
1889 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1890 {
1891 
1892 	LACP_TRACE(lp);
1893 
1894 	if (lp->lp_lsc->lsc_strict_mode)
1895 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1896 		    &lacp_partner_admin_strict);
1897 	else
1898 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1899 		    &lacp_partner_admin_optimistic);
1900 }
1901 
1902 /* transmit machine */
1903 
1904 static void
1905 lacp_sm_tx(struct lacp_port *lp)
1906 {
1907 	int error = 0;
1908 
1909 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1910 #if 1
1911 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1912 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1913 #endif
1914 	    ) {
1915 		lp->lp_flags &= ~LACP_PORT_NTT;
1916 	}
1917 
1918 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1919 		return;
1920 	}
1921 
1922 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1923 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1924 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1925 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1926 		return;
1927 	}
1928 
1929 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1930 		error = lacp_xmit_lacpdu(lp);
1931 	} else {
1932 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1933 	}
1934 
1935 	if (error == 0) {
1936 		lp->lp_flags &= ~LACP_PORT_NTT;
1937 	} else {
1938 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1939 		    error));
1940 	}
1941 }
1942 
1943 static void
1944 lacp_sm_assert_ntt(struct lacp_port *lp)
1945 {
1946 
1947 	lp->lp_flags |= LACP_PORT_NTT;
1948 }
1949 
1950 static void
1951 lacp_run_timers(struct lacp_port *lp)
1952 {
1953 	int i;
1954 
1955 	for (i = 0; i < LACP_NTIMER; i++) {
1956 		KASSERT(lp->lp_timer[i] >= 0,
1957 		    ("invalid timer value %d", lp->lp_timer[i]));
1958 		if (lp->lp_timer[i] == 0) {
1959 			continue;
1960 		} else if (--lp->lp_timer[i] <= 0) {
1961 			if (lacp_timer_funcs[i]) {
1962 				(*lacp_timer_funcs[i])(lp);
1963 			}
1964 		}
1965 	}
1966 }
1967 
1968 int
1969 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1970 {
1971 	struct lacp_softc *lsc = lp->lp_lsc;
1972 	struct lagg_port *lgp = lp->lp_lagg;
1973 	struct lacp_port *lp2;
1974 	struct markerdu *mdu;
1975 	int error = 0;
1976 	int pending = 0;
1977 
1978 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1979 		goto bad;
1980 	}
1981 
1982 	if ((m->m_flags & M_MCAST) == 0) {
1983 		goto bad;
1984 	}
1985 
1986 	if (m->m_len < sizeof(*mdu)) {
1987 		m = m_pullup(m, sizeof(*mdu));
1988 		if (m == NULL) {
1989 			return (ENOMEM);
1990 		}
1991 	}
1992 
1993 	mdu = mtod(m, struct markerdu *);
1994 
1995 	if (memcmp(&mdu->mdu_eh.ether_dhost,
1996 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1997 		goto bad;
1998 	}
1999 
2000 	if (mdu->mdu_sph.sph_version != 1) {
2001 		goto bad;
2002 	}
2003 
2004 	switch (mdu->mdu_tlv.tlv_type) {
2005 	case MARKER_TYPE_INFO:
2006 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2007 		    marker_info_tlv_template, TRUE)) {
2008 			goto bad;
2009 		}
2010 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
2011 		memcpy(&mdu->mdu_eh.ether_dhost,
2012 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
2013 		memcpy(&mdu->mdu_eh.ether_shost,
2014 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
2015 		error = lagg_enqueue(lp->lp_ifp, m);
2016 		break;
2017 
2018 	case MARKER_TYPE_RESPONSE:
2019 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2020 		    marker_response_tlv_template, TRUE)) {
2021 			goto bad;
2022 		}
2023 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
2024 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
2025 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
2026 
2027 		/* Verify that it is the last marker we sent out */
2028 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
2029 		    sizeof(struct lacp_markerinfo)))
2030 			goto bad;
2031 
2032 		LACP_LOCK(lsc);
2033 		lp->lp_flags &= ~LACP_PORT_MARK;
2034 
2035 		if (lsc->lsc_suppress_distributing) {
2036 			/* Check if any ports are waiting for a response */
2037 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
2038 				if (lp2->lp_flags & LACP_PORT_MARK) {
2039 					pending = 1;
2040 					break;
2041 				}
2042 			}
2043 
2044 			if (pending == 0) {
2045 				/* All interface queues are clear */
2046 				LACP_DPRINTF((NULL, "queue flush complete\n"));
2047 				lsc->lsc_suppress_distributing = FALSE;
2048 			}
2049 		}
2050 		LACP_UNLOCK(lsc);
2051 		m_freem(m);
2052 		break;
2053 
2054 	default:
2055 		goto bad;
2056 	}
2057 
2058 	return (error);
2059 
2060 bad:
2061 	LACP_DPRINTF((lp, "bad marker frame\n"));
2062 	m_freem(m);
2063 	return (EINVAL);
2064 }
2065 
2066 static int
2067 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
2068     const struct tlv_template *tmpl, boolean_t check_type)
2069 {
2070 	while (/* CONSTCOND */ 1) {
2071 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
2072 			return (EINVAL);
2073 		}
2074 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
2075 		    tlv->tlv_length != tmpl->tmpl_length) {
2076 			return (EINVAL);
2077 		}
2078 		if (tmpl->tmpl_type == 0) {
2079 			break;
2080 		}
2081 		tlv = (const struct tlvhdr *)
2082 		    ((const char *)tlv + tlv->tlv_length);
2083 		tmpl++;
2084 	}
2085 
2086 	return (0);
2087 }
2088 
2089 /* Debugging */
2090 const char *
2091 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
2092 {
2093 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
2094 	    (int)mac[0],
2095 	    (int)mac[1],
2096 	    (int)mac[2],
2097 	    (int)mac[3],
2098 	    (int)mac[4],
2099 	    (int)mac[5]);
2100 
2101 	return (buf);
2102 }
2103 
2104 const char *
2105 lacp_format_systemid(const struct lacp_systemid *sysid,
2106     char *buf, size_t buflen)
2107 {
2108 	char macbuf[LACP_MACSTR_MAX+1];
2109 
2110 	snprintf(buf, buflen, "%04X,%s",
2111 	    ntohs(sysid->lsi_prio),
2112 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
2113 
2114 	return (buf);
2115 }
2116 
2117 const char *
2118 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
2119 {
2120 	snprintf(buf, buflen, "%04X,%04X",
2121 	    ntohs(portid->lpi_prio),
2122 	    ntohs(portid->lpi_portno));
2123 
2124 	return (buf);
2125 }
2126 
2127 const char *
2128 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2129 {
2130 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
2131 	char portid[LACP_PORTIDSTR_MAX+1];
2132 
2133 	snprintf(buf, buflen, "(%s,%04X,%s)",
2134 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2135 	    ntohs(peer->lip_key),
2136 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2137 
2138 	return (buf);
2139 }
2140 
2141 const char *
2142 lacp_format_lagid(const struct lacp_peerinfo *a,
2143     const struct lacp_peerinfo *b, char *buf, size_t buflen)
2144 {
2145 	char astr[LACP_PARTNERSTR_MAX+1];
2146 	char bstr[LACP_PARTNERSTR_MAX+1];
2147 
2148 #if 0
2149 	/*
2150 	 * there's a convention to display small numbered peer
2151 	 * in the left.
2152 	 */
2153 
2154 	if (lacp_compare_peerinfo(a, b) > 0) {
2155 		const struct lacp_peerinfo *t;
2156 
2157 		t = a;
2158 		a = b;
2159 		b = t;
2160 	}
2161 #endif
2162 
2163 	snprintf(buf, buflen, "[%s,%s]",
2164 	    lacp_format_partner(a, astr, sizeof(astr)),
2165 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2166 
2167 	return (buf);
2168 }
2169 
2170 const char *
2171 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2172     char *buf, size_t buflen)
2173 {
2174 	if (la == NULL) {
2175 		return ("(none)");
2176 	}
2177 
2178 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2179 }
2180 
2181 const char *
2182 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2183 {
2184 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2185 	return (buf);
2186 }
2187 
2188 static void
2189 lacp_dump_lacpdu(const struct lacpdu *du)
2190 {
2191 	char buf[LACP_PARTNERSTR_MAX+1];
2192 	char buf2[LACP_STATESTR_MAX+1];
2193 
2194 	printf("actor=%s\n",
2195 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2196 	printf("actor.state=%s\n",
2197 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2198 	printf("partner=%s\n",
2199 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2200 	printf("partner.state=%s\n",
2201 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2202 
2203 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2204 }
2205 
2206 static void
2207 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2208 {
2209 	va_list va;
2210 
2211 	if (lp) {
2212 		printf("%s: ", lp->lp_ifp->if_xname);
2213 	}
2214 
2215 	va_start(va, fmt);
2216 	vprintf(fmt, va);
2217 	va_end(va);
2218 }
2219