1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c)2005 YAMAMOTO Takashi,
7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
35
36 #include <sys/param.h>
37 #include <sys/callout.h>
38 #include <sys/eventhandler.h>
39 #include <sys/mbuf.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h> /* hz */
43 #include <sys/socket.h> /* for net/if.h */
44 #include <sys/sockio.h>
45 #include <sys/stdarg.h>
46 #include <sys/sysctl.h>
47 #include <sys/lock.h>
48 #include <sys/rwlock.h>
49 #include <sys/taskqueue.h>
50 #include <sys/time.h>
51
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_private.h>
55 #include <net/if_dl.h>
56 #include <net/ethernet.h>
57 #include <net/infiniband.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60
61 #include <net/if_lagg.h>
62 #include <net/ieee8023ad_lacp.h>
63
64 /*
65 * actor system priority and port priority.
66 * XXX should be configurable.
67 */
68
69 #define LACP_SYSTEM_PRIO 0x8000
70 #define LACP_PORT_PRIO 0x8000
71
72 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
73 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
74
75 static const struct tlv_template lacp_info_tlv_template[] = {
76 { LACP_TYPE_ACTORINFO,
77 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
78 { LACP_TYPE_PARTNERINFO,
79 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
80 { LACP_TYPE_COLLECTORINFO,
81 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
82 { 0, 0 },
83 };
84
85 static const struct tlv_template marker_info_tlv_template[] = {
86 { MARKER_TYPE_INFO,
87 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
88 { 0, 0 },
89 };
90
91 static const struct tlv_template marker_response_tlv_template[] = {
92 { MARKER_TYPE_RESPONSE,
93 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
94 { 0, 0 },
95 };
96
97 typedef void (*lacp_timer_func_t)(struct lacp_port *);
98
99 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
100 static void lacp_fill_markerinfo(struct lacp_port *,
101 struct lacp_markerinfo *);
102
103 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *);
104 static void lacp_suppress_distributing(struct lacp_softc *,
105 struct lacp_aggregator *);
106 static void lacp_transit_expire(void *);
107 static void lacp_update_portmap(struct lacp_softc *);
108 static void lacp_select_active_aggregator(struct lacp_softc *);
109 static uint16_t lacp_compose_key(struct lacp_port *);
110 static int tlv_check(const void *, size_t, const struct tlvhdr *,
111 const struct tlv_template *, boolean_t);
112 static void lacp_tick(void *);
113
114 static void lacp_fill_aggregator_id(struct lacp_aggregator *,
115 const struct lacp_port *);
116 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
117 const struct lacp_peerinfo *);
118 static bool lacp_aggregator_is_compatible(const struct lacp_aggregator *,
119 const struct lacp_port *);
120 static bool lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
121 const struct lacp_peerinfo *);
122
123 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
124 struct lacp_port *);
125 static void lacp_aggregator_addref(struct lacp_softc *,
126 struct lacp_aggregator *);
127 static void lacp_aggregator_delref(struct lacp_softc *,
128 struct lacp_aggregator *);
129
130 /* receive machine */
131
132 static int lacp_pdu_input(struct lacp_port *, struct mbuf *);
133 static int lacp_marker_input(struct lacp_port *, struct mbuf *);
134 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
135 static void lacp_sm_rx_timer(struct lacp_port *);
136 static void lacp_sm_rx_set_expired(struct lacp_port *);
137 static void lacp_sm_rx_update_ntt(struct lacp_port *,
138 const struct lacpdu *);
139 static void lacp_sm_rx_record_pdu(struct lacp_port *,
140 const struct lacpdu *);
141 static void lacp_sm_rx_update_selected(struct lacp_port *,
142 const struct lacpdu *);
143 static void lacp_sm_rx_record_default(struct lacp_port *);
144 static void lacp_sm_rx_update_default_selected(struct lacp_port *);
145 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
146 const struct lacp_peerinfo *);
147
148 /* mux machine */
149
150 static void lacp_sm_mux(struct lacp_port *);
151 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
152 static void lacp_sm_mux_timer(struct lacp_port *);
153
154 /* periodic transmit machine */
155
156 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
157 static void lacp_sm_ptx_tx_schedule(struct lacp_port *);
158 static void lacp_sm_ptx_timer(struct lacp_port *);
159
160 /* transmit machine */
161
162 static void lacp_sm_tx(struct lacp_port *);
163 static void lacp_sm_assert_ntt(struct lacp_port *);
164
165 static void lacp_run_timers(struct lacp_port *);
166 static int lacp_compare_peerinfo(const struct lacp_peerinfo *,
167 const struct lacp_peerinfo *);
168 static int lacp_compare_systemid(const struct lacp_systemid *,
169 const struct lacp_systemid *);
170 static void lacp_port_enable(struct lacp_port *);
171 static void lacp_port_disable(struct lacp_port *);
172 static void lacp_select(struct lacp_port *);
173 static void lacp_unselect(struct lacp_port *);
174 static void lacp_disable_collecting(struct lacp_port *);
175 static void lacp_enable_collecting(struct lacp_port *);
176 static void lacp_disable_distributing(struct lacp_port *);
177 static void lacp_enable_distributing(struct lacp_port *);
178 static int lacp_xmit_lacpdu(struct lacp_port *);
179 static int lacp_xmit_marker(struct lacp_port *);
180
181 /* Debugging */
182
183 static void lacp_dump_lacpdu(const struct lacpdu *);
184 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
185 size_t);
186 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
187 const struct lacp_peerinfo *, char *, size_t);
188 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
189 char *, size_t);
190 static const char *lacp_format_state(uint8_t, char *, size_t);
191 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
192 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
193 size_t);
194 static const char *lacp_format_portid(const struct lacp_portid *, char *,
195 size_t);
196 static void lacp_dprintf(const struct lacp_port *, const char *, ...)
197 __attribute__((__format__(__printf__, 2, 3)));
198
199 VNET_DEFINE_STATIC(int, lacp_debug);
200 #define V_lacp_debug VNET(lacp_debug)
201 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
202 "ieee802.3ad");
203 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
204 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
205
206 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1;
207 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode,
208 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0,
209 "LACP strict protocol compliance default");
210 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
211 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
212 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
213
214 /*
215 * partner administration variables.
216 * XXX should be configurable.
217 */
218
219 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
220 .lip_systemid = { .lsi_prio = 0xffff },
221 .lip_portid = { .lpi_prio = 0xffff },
222 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
223 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
224 };
225
226 static const struct lacp_peerinfo lacp_partner_admin_strict = {
227 .lip_systemid = { .lsi_prio = 0xffff },
228 .lip_portid = { .lpi_prio = 0xffff },
229 .lip_state = 0,
230 };
231
232 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
233 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
234 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
235 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
236 };
237
238 struct mbuf *
lacp_input(struct lagg_port * lgp,struct mbuf * m)239 lacp_input(struct lagg_port *lgp, struct mbuf *m)
240 {
241 struct lacp_port *lp = LACP_PORT(lgp);
242 uint8_t subtype;
243
244 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
245 m_freem(m);
246 return (NULL);
247 }
248
249 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
250 switch (subtype) {
251 case SLOWPROTOCOLS_SUBTYPE_LACP:
252 lacp_pdu_input(lp, m);
253 return (NULL);
254
255 case SLOWPROTOCOLS_SUBTYPE_MARKER:
256 lacp_marker_input(lp, m);
257 return (NULL);
258 }
259
260 /* Not a subtype we are interested in */
261 return (m);
262 }
263
264 /*
265 * lacp_pdu_input: process lacpdu
266 */
267 static int
lacp_pdu_input(struct lacp_port * lp,struct mbuf * m)268 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
269 {
270 struct lacp_softc *lsc = lp->lp_lsc;
271 struct lacpdu *du;
272 int error = 0;
273
274 if (m->m_pkthdr.len != sizeof(*du)) {
275 goto bad;
276 }
277
278 if ((m->m_flags & M_MCAST) == 0) {
279 goto bad;
280 }
281
282 if (m->m_len < sizeof(*du)) {
283 m = m_pullup(m, sizeof(*du));
284 if (m == NULL) {
285 return (ENOMEM);
286 }
287 }
288
289 du = mtod(m, struct lacpdu *);
290
291 if (memcmp(&du->ldu_eh.ether_dhost,
292 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
293 goto bad;
294 }
295
296 /*
297 * ignore the version for compatibility with
298 * the future protocol revisions.
299 */
300 #if 0
301 if (du->ldu_sph.sph_version != 1) {
302 goto bad;
303 }
304 #endif
305
306 /*
307 * ignore tlv types for compatibility with
308 * the future protocol revisions.
309 */
310 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
311 lacp_info_tlv_template, FALSE)) {
312 goto bad;
313 }
314
315 if (V_lacp_debug > 0) {
316 lacp_dprintf(lp, "lacpdu receive\n");
317 lacp_dump_lacpdu(du);
318 }
319
320 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
321 LACP_TPRINTF((lp, "Dropping RX PDU\n"));
322 goto bad;
323 }
324
325 LACP_LOCK(lsc);
326 lacp_sm_rx(lp, du);
327 LACP_UNLOCK(lsc);
328
329 m_freem(m);
330 return (error);
331
332 bad:
333 m_freem(m);
334 return (EINVAL);
335 }
336
337 static void
lacp_fill_actorinfo(struct lacp_port * lp,struct lacp_peerinfo * info)338 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
339 {
340 struct lagg_port *lgp = lp->lp_lagg;
341 struct lagg_softc *sc = lgp->lp_softc;
342
343 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
344 memcpy(&info->lip_systemid.lsi_mac,
345 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
346 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
347 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
348 info->lip_state = lp->lp_state;
349 }
350
351 static void
lacp_fill_markerinfo(struct lacp_port * lp,struct lacp_markerinfo * info)352 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
353 {
354 struct ifnet *ifp = lp->lp_ifp;
355
356 /* Fill in the port index and system id (encoded as the MAC) */
357 info->mi_rq_port = htons(ifp->if_index);
358 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
359 info->mi_rq_xid = htonl(0);
360 }
361
362 static int
lacp_xmit_lacpdu(struct lacp_port * lp)363 lacp_xmit_lacpdu(struct lacp_port *lp)
364 {
365 struct lagg_port *lgp = lp->lp_lagg;
366 struct mbuf *m;
367 struct lacpdu *du;
368 int error;
369
370 LACP_LOCK_ASSERT(lp->lp_lsc);
371
372 m = m_gethdr(M_NOWAIT, MT_DATA);
373 if (m == NULL) {
374 return (ENOMEM);
375 }
376 m->m_len = m->m_pkthdr.len = sizeof(*du);
377
378 du = mtod(m, struct lacpdu *);
379 memset(du, 0, sizeof(*du));
380
381 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
382 ETHER_ADDR_LEN);
383 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
384 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
385
386 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
387 du->ldu_sph.sph_version = 1;
388
389 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
390 du->ldu_actor = lp->lp_actor;
391
392 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
393 sizeof(du->ldu_partner));
394 du->ldu_partner = lp->lp_partner;
395
396 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
397 sizeof(du->ldu_collector));
398 du->ldu_collector.lci_maxdelay = 0;
399
400 if (V_lacp_debug > 0) {
401 lacp_dprintf(lp, "lacpdu transmit\n");
402 lacp_dump_lacpdu(du);
403 }
404
405 m->m_flags |= M_MCAST;
406
407 /*
408 * XXX should use higher priority queue.
409 * otherwise network congestion can break aggregation.
410 */
411
412 error = lagg_enqueue(lp->lp_ifp, m);
413 return (error);
414 }
415
416 static int
lacp_xmit_marker(struct lacp_port * lp)417 lacp_xmit_marker(struct lacp_port *lp)
418 {
419 struct lagg_port *lgp = lp->lp_lagg;
420 struct mbuf *m;
421 struct markerdu *mdu;
422 int error;
423
424 LACP_LOCK_ASSERT(lp->lp_lsc);
425
426 m = m_gethdr(M_NOWAIT, MT_DATA);
427 if (m == NULL) {
428 return (ENOMEM);
429 }
430 m->m_len = m->m_pkthdr.len = sizeof(*mdu);
431
432 mdu = mtod(m, struct markerdu *);
433 memset(mdu, 0, sizeof(*mdu));
434
435 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
436 ETHER_ADDR_LEN);
437 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
438 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
439
440 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
441 mdu->mdu_sph.sph_version = 1;
442
443 /* Bump the transaction id and copy over the marker info */
444 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
445 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
446 mdu->mdu_info = lp->lp_marker;
447
448 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
449 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
450 ntohl(mdu->mdu_info.mi_rq_xid)));
451
452 m->m_flags |= M_MCAST;
453 error = lagg_enqueue(lp->lp_ifp, m);
454 return (error);
455 }
456
457 void
lacp_linkstate(struct lagg_port * lgp)458 lacp_linkstate(struct lagg_port *lgp)
459 {
460 struct lacp_port *lp = LACP_PORT(lgp);
461 struct lacp_softc *lsc = lp->lp_lsc;
462 struct ifnet *ifp = lgp->lp_ifp;
463 struct ifmediareq ifmr;
464 int error = 0;
465 u_int media;
466 uint8_t old_state;
467 uint16_t old_key;
468
469 bzero((char *)&ifmr, sizeof(ifmr));
470 error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr);
471 if (error != 0) {
472 bzero((char *)&ifmr, sizeof(ifmr));
473 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
474 }
475 if (error != 0)
476 return;
477
478 LACP_LOCK(lsc);
479 media = ifmr.ifm_active;
480 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
481 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
482 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
483 old_state = lp->lp_state;
484 old_key = lp->lp_key;
485
486 lp->lp_media = media;
487 /*
488 * If the port is not an active full duplex Ethernet link then it can
489 * not be aggregated.
490 */
491 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
492 ifp->if_link_state != LINK_STATE_UP) {
493 lacp_port_disable(lp);
494 } else {
495 lacp_port_enable(lp);
496 }
497 lp->lp_key = lacp_compose_key(lp);
498
499 if (old_state != lp->lp_state || old_key != lp->lp_key) {
500 LACP_DPRINTF((lp, "-> UNSELECTED\n"));
501 lp->lp_selected = LACP_UNSELECTED;
502 }
503 LACP_UNLOCK(lsc);
504 }
505
506 static void
lacp_tick(void * arg)507 lacp_tick(void *arg)
508 {
509 struct lacp_softc *lsc = arg;
510 struct lacp_port *lp;
511
512 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
513 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
514 continue;
515
516 CURVNET_SET(lp->lp_ifp->if_vnet);
517 lacp_run_timers(lp);
518
519 lacp_select(lp);
520 lacp_sm_mux(lp);
521 lacp_sm_tx(lp);
522 lacp_sm_ptx_tx_schedule(lp);
523 CURVNET_RESTORE();
524 }
525 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
526 }
527
528 int
lacp_port_create(struct lagg_port * lgp)529 lacp_port_create(struct lagg_port *lgp)
530 {
531 struct lagg_softc *sc = lgp->lp_softc;
532 struct lacp_softc *lsc = LACP_SOFTC(sc);
533 struct lacp_port *lp;
534 struct ifnet *ifp = lgp->lp_ifp;
535 struct sockaddr_dl sdl;
536 struct ifmultiaddr *rifma = NULL;
537 int error;
538
539 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
540 sdl.sdl_alen = ETHER_ADDR_LEN;
541
542 bcopy(ðermulticastaddr_slowprotocols,
543 LLADDR(&sdl), ETHER_ADDR_LEN);
544 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
545 if (error) {
546 printf("%s: ADDMULTI failed on %s\n", __func__,
547 lgp->lp_ifp->if_xname);
548 return (error);
549 }
550
551 lp = malloc(sizeof(struct lacp_port),
552 M_DEVBUF, M_NOWAIT|M_ZERO);
553 if (lp == NULL)
554 return (ENOMEM);
555
556 LACP_LOCK(lsc);
557 lgp->lp_psc = lp;
558 lp->lp_ifp = ifp;
559 lp->lp_lagg = lgp;
560 lp->lp_lsc = lsc;
561 lp->lp_ifma = rifma;
562
563 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
564
565 lacp_fill_actorinfo(lp, &lp->lp_actor);
566 lacp_fill_markerinfo(lp, &lp->lp_marker);
567 lp->lp_state = LACP_STATE_ACTIVITY;
568 lp->lp_aggregator = NULL;
569 lacp_sm_rx_set_expired(lp);
570 LACP_UNLOCK(lsc);
571 lacp_linkstate(lgp);
572
573 return (0);
574 }
575
576 void
lacp_port_destroy(struct lagg_port * lgp)577 lacp_port_destroy(struct lagg_port *lgp)
578 {
579 struct lacp_port *lp = LACP_PORT(lgp);
580 struct lacp_softc *lsc = lp->lp_lsc;
581 int i;
582
583 LACP_LOCK(lsc);
584 for (i = 0; i < LACP_NTIMER; i++) {
585 LACP_TIMER_DISARM(lp, i);
586 }
587
588 lacp_disable_collecting(lp);
589 lacp_disable_distributing(lp);
590 lacp_unselect(lp);
591
592 LIST_REMOVE(lp, lp_next);
593 LACP_UNLOCK(lsc);
594
595 /* The address may have already been removed by if_purgemaddrs() */
596 if (!lgp->lp_detaching)
597 if_delmulti_ifma(lp->lp_ifma);
598
599 free(lp, M_DEVBUF);
600 }
601
602 void
lacp_req(struct lagg_softc * sc,void * data)603 lacp_req(struct lagg_softc *sc, void *data)
604 {
605 struct lacp_opreq *req = (struct lacp_opreq *)data;
606 struct lacp_softc *lsc = LACP_SOFTC(sc);
607 struct lacp_aggregator *la;
608
609 bzero(req, sizeof(struct lacp_opreq));
610
611 /*
612 * If the LACP softc is NULL, return with the opreq structure full of
613 * zeros. It is normal for the softc to be NULL while the lagg is
614 * being destroyed.
615 */
616 if (NULL == lsc)
617 return;
618
619 la = lsc->lsc_active_aggregator;
620 LACP_LOCK(lsc);
621 if (la != NULL) {
622 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
623 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
624 ETHER_ADDR_LEN);
625 req->actor_key = ntohs(la->la_actor.lip_key);
626 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
627 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
628 req->actor_state = la->la_actor.lip_state;
629
630 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
631 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
632 ETHER_ADDR_LEN);
633 req->partner_key = ntohs(la->la_partner.lip_key);
634 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
635 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
636 req->partner_state = la->la_partner.lip_state;
637 }
638 LACP_UNLOCK(lsc);
639 }
640
641 void
lacp_portreq(struct lagg_port * lgp,void * data)642 lacp_portreq(struct lagg_port *lgp, void *data)
643 {
644 struct lacp_opreq *req = (struct lacp_opreq *)data;
645 struct lacp_port *lp = LACP_PORT(lgp);
646 struct lacp_softc *lsc = lp->lp_lsc;
647
648 LACP_LOCK(lsc);
649 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
650 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
651 ETHER_ADDR_LEN);
652 req->actor_key = ntohs(lp->lp_actor.lip_key);
653 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
654 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
655 req->actor_state = lp->lp_actor.lip_state;
656
657 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
658 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
659 ETHER_ADDR_LEN);
660 req->partner_key = ntohs(lp->lp_partner.lip_key);
661 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
662 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
663 req->partner_state = lp->lp_partner.lip_state;
664 LACP_UNLOCK(lsc);
665 }
666
667 static void
lacp_disable_collecting(struct lacp_port * lp)668 lacp_disable_collecting(struct lacp_port *lp)
669 {
670 LACP_DPRINTF((lp, "collecting disabled\n"));
671 lp->lp_state &= ~LACP_STATE_COLLECTING;
672 }
673
674 static void
lacp_enable_collecting(struct lacp_port * lp)675 lacp_enable_collecting(struct lacp_port *lp)
676 {
677 LACP_DPRINTF((lp, "collecting enabled\n"));
678 lp->lp_state |= LACP_STATE_COLLECTING;
679 }
680
681 static void
lacp_disable_distributing(struct lacp_port * lp)682 lacp_disable_distributing(struct lacp_port *lp)
683 {
684 struct lacp_aggregator *la = lp->lp_aggregator;
685 struct lacp_softc *lsc = lp->lp_lsc;
686 struct lagg_softc *sc = lsc->lsc_softc;
687 char buf[LACP_LAGIDSTR_MAX+1];
688
689 LACP_LOCK_ASSERT(lsc);
690
691 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
692 return;
693 }
694
695 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
696 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
697 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
698
699 LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
700 "nports %d -> %d\n",
701 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
702 la->la_nports, la->la_nports - 1));
703
704 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
705 la->la_nports--;
706 sc->sc_active = la->la_nports;
707
708 if (lsc->lsc_active_aggregator == la) {
709 lacp_suppress_distributing(lsc, la);
710 lacp_select_active_aggregator(lsc);
711 /* regenerate the port map, the active aggregator has changed */
712 lacp_update_portmap(lsc);
713 }
714
715 lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
716 if_link_state_change(sc->sc_ifp,
717 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
718 }
719
720 static void
lacp_enable_distributing(struct lacp_port * lp)721 lacp_enable_distributing(struct lacp_port *lp)
722 {
723 struct lacp_aggregator *la = lp->lp_aggregator;
724 struct lacp_softc *lsc = lp->lp_lsc;
725 struct lagg_softc *sc = lsc->lsc_softc;
726 char buf[LACP_LAGIDSTR_MAX+1];
727
728 LACP_LOCK_ASSERT(lsc);
729
730 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
731 return;
732 }
733
734 LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
735 "nports %d -> %d\n",
736 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
737 la->la_nports, la->la_nports + 1));
738
739 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
740 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
741 la->la_nports++;
742 sc->sc_active = la->la_nports;
743
744 lp->lp_state |= LACP_STATE_DISTRIBUTING;
745
746 if (lsc->lsc_active_aggregator == la) {
747 lacp_suppress_distributing(lsc, la);
748 lacp_update_portmap(lsc);
749 } else
750 /* try to become the active aggregator */
751 lacp_select_active_aggregator(lsc);
752
753 if_link_state_change(sc->sc_ifp,
754 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
755 }
756
757 static void
lacp_transit_expire(void * vp)758 lacp_transit_expire(void *vp)
759 {
760 struct lacp_softc *lsc = vp;
761
762 LACP_LOCK_ASSERT(lsc);
763
764 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
765 LACP_TRACE(NULL);
766 CURVNET_RESTORE();
767
768 lsc->lsc_suppress_distributing = FALSE;
769 }
770
771 void
lacp_attach(struct lagg_softc * sc)772 lacp_attach(struct lagg_softc *sc)
773 {
774 struct lacp_softc *lsc;
775
776 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
777
778 sc->sc_psc = lsc;
779 lsc->lsc_softc = sc;
780
781 lsc->lsc_hashkey = m_ether_tcpip_hash_init();
782 lsc->lsc_active_aggregator = NULL;
783 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
784 LACP_LOCK_INIT(lsc);
785 TAILQ_INIT(&lsc->lsc_aggregators);
786 LIST_INIT(&lsc->lsc_ports);
787
788 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
789 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
790
791 /* if the lagg is already up then do the same */
792 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
793 lacp_init(sc);
794 }
795
796 void
lacp_detach(void * psc)797 lacp_detach(void *psc)
798 {
799 struct lacp_softc *lsc = (struct lacp_softc *)psc;
800
801 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
802 ("aggregators still active"));
803 KASSERT(lsc->lsc_active_aggregator == NULL,
804 ("aggregator still attached"));
805
806 callout_drain(&lsc->lsc_transit_callout);
807 callout_drain(&lsc->lsc_callout);
808
809 LACP_LOCK_DESTROY(lsc);
810 free(lsc, M_DEVBUF);
811 }
812
813 void
lacp_init(struct lagg_softc * sc)814 lacp_init(struct lagg_softc *sc)
815 {
816 struct lacp_softc *lsc = LACP_SOFTC(sc);
817
818 LACP_LOCK(lsc);
819 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
820 LACP_UNLOCK(lsc);
821 }
822
823 void
lacp_stop(struct lagg_softc * sc)824 lacp_stop(struct lagg_softc *sc)
825 {
826 struct lacp_softc *lsc = LACP_SOFTC(sc);
827
828 LACP_LOCK(lsc);
829 callout_stop(&lsc->lsc_transit_callout);
830 callout_stop(&lsc->lsc_callout);
831 LACP_UNLOCK(lsc);
832 }
833
834 struct lagg_port *
lacp_select_tx_port_by_hash(struct lagg_softc * sc,uint32_t hash,uint8_t numa_domain,int * err)835 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t hash,
836 uint8_t numa_domain, int *err)
837 {
838 struct lacp_softc *lsc = LACP_SOFTC(sc);
839 struct lacp_portmap *pm;
840 struct lacp_port *lp;
841 struct lacp_port **map;
842 int count;
843
844 if (__predict_false(lsc->lsc_suppress_distributing)) {
845 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
846 *err = ENOBUFS;
847 return (NULL);
848 }
849
850 pm = &lsc->lsc_pmap[lsc->lsc_activemap];
851 if (pm->pm_count == 0) {
852 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
853 *err = ENETDOWN;
854 return (NULL);
855 }
856
857 #ifdef NUMA
858 if ((sc->sc_opts & LAGG_OPT_USE_NUMA) &&
859 pm->pm_num_dom > 1 && numa_domain < MAXMEMDOM) {
860 count = pm->pm_numa[numa_domain].count;
861 if (count > 0) {
862 map = pm->pm_numa[numa_domain].map;
863 } else {
864 /* No ports on this domain; use global hash. */
865 map = pm->pm_map;
866 count = pm->pm_count;
867 }
868 } else
869 #endif
870 {
871 map = pm->pm_map;
872 count = pm->pm_count;
873 }
874
875 hash %= count;
876 lp = map[hash];
877
878 return (lp->lp_lagg);
879 }
880
881 struct lagg_port *
lacp_select_tx_port(struct lagg_softc * sc,struct mbuf * m,int * err)882 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m, int *err)
883 {
884 struct lacp_softc *lsc = LACP_SOFTC(sc);
885 uint32_t hash;
886 uint8_t numa_domain;
887
888 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
889 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
890 hash = m->m_pkthdr.flowid >> sc->flowid_shift;
891 else
892 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
893
894 numa_domain = m->m_pkthdr.numa_domain;
895 return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, err));
896 }
897
898 /*
899 * lacp_suppress_distributing: drop transmit packets for a while
900 * to preserve packet ordering.
901 */
902
903 static void
lacp_suppress_distributing(struct lacp_softc * lsc,struct lacp_aggregator * la)904 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
905 {
906 struct lacp_port *lp;
907
908 if (lsc->lsc_active_aggregator != la) {
909 return;
910 }
911
912 LACP_TRACE(NULL);
913
914 lsc->lsc_suppress_distributing = TRUE;
915
916 /* send a marker frame down each port to verify the queues are empty */
917 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
918 lp->lp_flags |= LACP_PORT_MARK;
919 if (lacp_xmit_marker(lp) != 0)
920 lp->lp_flags &= ~LACP_PORT_MARK;
921 }
922
923 /* set a timeout for the marker frames */
924 callout_reset(&lsc->lsc_transit_callout,
925 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
926 }
927
928 static int
lacp_compare_peerinfo(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b)929 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
930 const struct lacp_peerinfo *b)
931 {
932 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
933 }
934
935 static int
lacp_compare_systemid(const struct lacp_systemid * a,const struct lacp_systemid * b)936 lacp_compare_systemid(const struct lacp_systemid *a,
937 const struct lacp_systemid *b)
938 {
939 return (memcmp(a, b, sizeof(*a)));
940 }
941
942 #if 0 /* unused */
943 static int
944 lacp_compare_portid(const struct lacp_portid *a,
945 const struct lacp_portid *b)
946 {
947 return (memcmp(a, b, sizeof(*a)));
948 }
949 #endif
950
951 static uint64_t
lacp_aggregator_bandwidth(struct lacp_aggregator * la)952 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
953 {
954 struct lacp_port *lp;
955 uint64_t speed;
956
957 lp = TAILQ_FIRST(&la->la_ports);
958 if (lp == NULL) {
959 return (0);
960 }
961
962 speed = ifmedia_baudrate(lp->lp_media);
963 speed *= la->la_nports;
964 if (speed == 0) {
965 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
966 lp->lp_media, la->la_nports));
967 }
968
969 return (speed);
970 }
971
972 /*
973 * lacp_select_active_aggregator: select an aggregator to be used to transmit
974 * packets from lagg(4) interface.
975 */
976
977 static void
lacp_select_active_aggregator(struct lacp_softc * lsc)978 lacp_select_active_aggregator(struct lacp_softc *lsc)
979 {
980 struct lacp_aggregator *la;
981 struct lacp_aggregator *best_la = NULL;
982 uint64_t best_speed = 0;
983 char buf[LACP_LAGIDSTR_MAX+1];
984
985 LACP_TRACE(NULL);
986
987 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
988 uint64_t speed;
989
990 if (la->la_nports == 0) {
991 continue;
992 }
993
994 speed = lacp_aggregator_bandwidth(la);
995 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
996 lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
997 speed, la->la_nports));
998
999 /*
1000 * This aggregator is chosen if the partner has a better
1001 * system priority or, the total aggregated speed is higher
1002 * or, it is already the chosen aggregator
1003 */
1004 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
1005 LACP_SYS_PRI(best_la->la_partner)) ||
1006 speed > best_speed ||
1007 (speed == best_speed &&
1008 la == lsc->lsc_active_aggregator)) {
1009 best_la = la;
1010 best_speed = speed;
1011 }
1012 }
1013
1014 KASSERT(best_la == NULL || best_la->la_nports > 0,
1015 ("invalid aggregator refcnt"));
1016 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1017 ("invalid aggregator list"));
1018
1019 if (lsc->lsc_active_aggregator != best_la) {
1020 LACP_DPRINTF((NULL, "active aggregator changed\n"));
1021 LACP_DPRINTF((NULL, "old %s\n",
1022 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1023 buf, sizeof(buf))));
1024 } else {
1025 LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1026 }
1027 LACP_DPRINTF((NULL, "new %s\n",
1028 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1029
1030 if (lsc->lsc_active_aggregator != best_la) {
1031 lsc->lsc_active_aggregator = best_la;
1032 lacp_update_portmap(lsc);
1033 if (best_la) {
1034 lacp_suppress_distributing(lsc, best_la);
1035 }
1036 }
1037 }
1038
1039 static int
lacp_pm_compare(const void * p1,const void * p2)1040 lacp_pm_compare(const void *p1, const void *p2)
1041 {
1042 struct lacp_port *const *a = p1;
1043 struct lacp_port *const *b = p2;
1044 int left, right;
1045
1046 left = (*a)->lp_ifp->if_index;
1047 right = (*b)->lp_ifp->if_index;
1048 return ((left > right) - (left < right));
1049 }
1050
1051 /*
1052 * Updated the inactive portmap array with the new list of ports and
1053 * make it live.
1054 */
1055 static void
lacp_update_portmap(struct lacp_softc * lsc)1056 lacp_update_portmap(struct lacp_softc *lsc)
1057 {
1058 struct lagg_softc *sc = lsc->lsc_softc;
1059 struct lacp_aggregator *la;
1060 struct lacp_portmap *p;
1061 struct lacp_port *lp;
1062 uint64_t speed;
1063 u_int newmap;
1064 int i;
1065 #ifdef NUMA
1066 int count;
1067 uint8_t domain;
1068 #endif
1069
1070 newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1071 p = &lsc->lsc_pmap[newmap];
1072 la = lsc->lsc_active_aggregator;
1073 speed = 0;
1074 bzero(p, sizeof(struct lacp_portmap));
1075
1076 if (la != NULL && la->la_nports > 0) {
1077 p->pm_count = la->la_nports;
1078 i = 0;
1079 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) {
1080 p->pm_map[i++] = lp;
1081 #ifdef NUMA
1082 domain = lp->lp_ifp->if_numa_domain;
1083 if (domain >= MAXMEMDOM)
1084 continue;
1085 count = p->pm_numa[domain].count;
1086 p->pm_numa[domain].map[count] = lp;
1087 p->pm_numa[domain].count++;
1088 #endif
1089 }
1090 KASSERT(i == p->pm_count, ("Invalid port count"));
1091
1092 #ifdef NUMA
1093 for (i = 0; i < MAXMEMDOM; i++) {
1094 if (p->pm_numa[i].count != 0) {
1095 p->pm_num_dom++;
1096 if (p->pm_numa[i].count > 1) {
1097 qsort(&p->pm_numa[i].map[0],
1098 p->pm_numa[i].count,
1099 sizeof(p->pm_numa[i].map[0]),
1100 lacp_pm_compare);
1101 }
1102 }
1103 }
1104 #endif
1105 speed = lacp_aggregator_bandwidth(la);
1106 }
1107
1108 if (p->pm_count > 1) {
1109 qsort(&p->pm_map[0], p->pm_count,
1110 sizeof(p->pm_map[0]), lacp_pm_compare);
1111 }
1112 sc->sc_ifp->if_baudrate = speed;
1113 EVENTHANDLER_INVOKE(ifnet_event, sc->sc_ifp,
1114 IFNET_EVENT_UPDATE_BAUDRATE);
1115
1116 /* switch the active portmap over */
1117 atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1118 LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1119 lsc->lsc_activemap,
1120 lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1121 }
1122
1123 static uint16_t
lacp_compose_key(struct lacp_port * lp)1124 lacp_compose_key(struct lacp_port *lp)
1125 {
1126 struct lagg_port *lgp = lp->lp_lagg;
1127 struct lagg_softc *sc = lgp->lp_softc;
1128 u_int media = lp->lp_media;
1129 uint16_t key;
1130
1131 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1132 /*
1133 * non-aggregatable links should have unique keys.
1134 *
1135 * XXX this isn't really unique as if_index is 16 bit.
1136 */
1137
1138 /* bit 0..14: (some bits of) if_index of this port */
1139 key = lp->lp_ifp->if_index;
1140 /* bit 15: 1 */
1141 key |= 0x8000;
1142 } else {
1143 u_int subtype = IFM_SUBTYPE(media);
1144
1145 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1146 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1147
1148 /* bit 0..4: IFM_SUBTYPE modulo speed */
1149 switch (subtype) {
1150 case IFM_10_T:
1151 case IFM_10_2:
1152 case IFM_10_5:
1153 case IFM_10_STP:
1154 case IFM_10_FL:
1155 key = IFM_10_T;
1156 break;
1157 case IFM_100_TX:
1158 case IFM_100_FX:
1159 case IFM_100_T4:
1160 case IFM_100_VG:
1161 case IFM_100_T2:
1162 case IFM_100_T:
1163 case IFM_100_SGMII:
1164 case IFM_100_BX:
1165 key = IFM_100_TX;
1166 break;
1167 case IFM_1000_SX:
1168 case IFM_1000_LX:
1169 case IFM_1000_CX:
1170 case IFM_1000_T:
1171 case IFM_1000_KX:
1172 case IFM_1000_SGMII:
1173 case IFM_1000_CX_SGMII:
1174 case IFM_1000_BX:
1175 key = IFM_1000_SX;
1176 break;
1177 case IFM_10G_LR:
1178 case IFM_10G_SR:
1179 case IFM_10G_CX4:
1180 case IFM_10G_TWINAX:
1181 case IFM_10G_TWINAX_LONG:
1182 case IFM_10G_LRM:
1183 case IFM_10G_T:
1184 case IFM_10G_KX4:
1185 case IFM_10G_KR:
1186 case IFM_10G_CR1:
1187 case IFM_10G_ER:
1188 case IFM_10G_SFI:
1189 case IFM_10G_AOC:
1190 key = IFM_10G_LR;
1191 break;
1192 case IFM_20G_KR2:
1193 key = IFM_20G_KR2;
1194 break;
1195 case IFM_2500_KX:
1196 case IFM_2500_T:
1197 case IFM_2500_X:
1198 key = IFM_2500_KX;
1199 break;
1200 case IFM_5000_T:
1201 case IFM_5000_KR:
1202 case IFM_5000_KR_S:
1203 case IFM_5000_KR1:
1204 key = IFM_5000_T;
1205 break;
1206 case IFM_50G_PCIE:
1207 case IFM_50G_CR2:
1208 case IFM_50G_KR2:
1209 case IFM_50G_KR4:
1210 case IFM_50G_SR2:
1211 case IFM_50G_LR2:
1212 case IFM_50G_LAUI2_AC:
1213 case IFM_50G_LAUI2:
1214 case IFM_50G_AUI2_AC:
1215 case IFM_50G_AUI2:
1216 case IFM_50G_CP:
1217 case IFM_50G_SR:
1218 case IFM_50G_LR:
1219 case IFM_50G_FR:
1220 case IFM_50G_KR_PAM4:
1221 case IFM_50G_AUI1_AC:
1222 case IFM_50G_AUI1:
1223 key = IFM_50G_PCIE;
1224 break;
1225 case IFM_56G_R4:
1226 key = IFM_56G_R4;
1227 break;
1228 case IFM_25G_PCIE:
1229 case IFM_25G_CR:
1230 case IFM_25G_KR:
1231 case IFM_25G_SR:
1232 case IFM_25G_LR:
1233 case IFM_25G_ACC:
1234 case IFM_25G_AOC:
1235 case IFM_25G_T:
1236 case IFM_25G_CR_S:
1237 case IFM_25G_CR1:
1238 case IFM_25G_KR_S:
1239 case IFM_25G_AUI:
1240 case IFM_25G_KR1:
1241 key = IFM_25G_PCIE;
1242 break;
1243 case IFM_40G_CR4:
1244 case IFM_40G_SR4:
1245 case IFM_40G_LR4:
1246 case IFM_40G_LM4:
1247 case IFM_40G_XLPPI:
1248 case IFM_40G_KR4:
1249 case IFM_40G_XLAUI:
1250 case IFM_40G_XLAUI_AC:
1251 case IFM_40G_ER4:
1252 key = IFM_40G_CR4;
1253 break;
1254 case IFM_100G_CR4:
1255 case IFM_100G_SR4:
1256 case IFM_100G_KR4:
1257 case IFM_100G_LR4:
1258 case IFM_100G_CAUI4_AC:
1259 case IFM_100G_CAUI4:
1260 case IFM_100G_AUI4_AC:
1261 case IFM_100G_AUI4:
1262 case IFM_100G_CR_PAM4:
1263 case IFM_100G_KR_PAM4:
1264 case IFM_100G_CP2:
1265 case IFM_100G_SR2:
1266 case IFM_100G_DR:
1267 case IFM_100G_KR2_PAM4:
1268 case IFM_100G_CAUI2_AC:
1269 case IFM_100G_CAUI2:
1270 case IFM_100G_AUI2_AC:
1271 case IFM_100G_AUI2:
1272 key = IFM_100G_CR4;
1273 break;
1274 case IFM_200G_CR4_PAM4:
1275 case IFM_200G_SR4:
1276 case IFM_200G_FR4:
1277 case IFM_200G_LR4:
1278 case IFM_200G_DR4:
1279 case IFM_200G_KR4_PAM4:
1280 case IFM_200G_AUI4_AC:
1281 case IFM_200G_AUI4:
1282 case IFM_200G_AUI8_AC:
1283 case IFM_200G_AUI8:
1284 key = IFM_200G_CR4_PAM4;
1285 break;
1286 case IFM_400G_FR8:
1287 case IFM_400G_LR8:
1288 case IFM_400G_DR4:
1289 case IFM_400G_AUI8_AC:
1290 case IFM_400G_AUI8:
1291 case IFM_400G_SR8:
1292 case IFM_400G_CR8:
1293 key = IFM_400G_FR8;
1294 break;
1295 default:
1296 key = subtype;
1297 break;
1298 }
1299 /* bit 5..14: (some bits of) if_index of lagg device */
1300 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1301 /* bit 15: 0 */
1302 }
1303 return (htons(key));
1304 }
1305
1306 static void
lacp_aggregator_addref(struct lacp_softc * lsc,struct lacp_aggregator * la)1307 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1308 {
1309 char buf[LACP_LAGIDSTR_MAX+1];
1310
1311 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1312 __func__,
1313 lacp_format_lagid(&la->la_actor, &la->la_partner,
1314 buf, sizeof(buf)),
1315 la->la_refcnt, la->la_refcnt + 1));
1316
1317 KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1318 la->la_refcnt++;
1319 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1320 }
1321
1322 static void
lacp_aggregator_delref(struct lacp_softc * lsc,struct lacp_aggregator * la)1323 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1324 {
1325 char buf[LACP_LAGIDSTR_MAX+1];
1326
1327 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1328 __func__,
1329 lacp_format_lagid(&la->la_actor, &la->la_partner,
1330 buf, sizeof(buf)),
1331 la->la_refcnt, la->la_refcnt - 1));
1332
1333 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1334 la->la_refcnt--;
1335 if (la->la_refcnt > 0) {
1336 return;
1337 }
1338
1339 KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1340 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1341
1342 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1343
1344 free(la, M_DEVBUF);
1345 }
1346
1347 /*
1348 * lacp_aggregator_get: allocate an aggregator.
1349 */
1350
1351 static struct lacp_aggregator *
lacp_aggregator_get(struct lacp_softc * lsc,struct lacp_port * lp)1352 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1353 {
1354 struct lacp_aggregator *la;
1355
1356 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1357 if (la) {
1358 la->la_refcnt = 1;
1359 la->la_nports = 0;
1360 TAILQ_INIT(&la->la_ports);
1361 la->la_pending = 0;
1362 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1363 }
1364
1365 return (la);
1366 }
1367
1368 /*
1369 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1370 */
1371
1372 static void
lacp_fill_aggregator_id(struct lacp_aggregator * la,const struct lacp_port * lp)1373 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1374 {
1375 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1376 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1377
1378 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1379 }
1380
1381 static void
lacp_fill_aggregator_id_peer(struct lacp_peerinfo * lpi_aggr,const struct lacp_peerinfo * lpi_port)1382 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1383 const struct lacp_peerinfo *lpi_port)
1384 {
1385 memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1386 lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1387 lpi_aggr->lip_key = lpi_port->lip_key;
1388 }
1389
1390 /*
1391 * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1392 */
1393
1394 static bool
lacp_aggregator_is_compatible(const struct lacp_aggregator * la,const struct lacp_port * lp)1395 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1396 const struct lacp_port *lp)
1397 {
1398 if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1399 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1400 return (false);
1401 }
1402
1403 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION))
1404 return (false);
1405
1406 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner))
1407 return (false);
1408
1409 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor))
1410 return (false);
1411
1412 return (true);
1413 }
1414
1415 static bool
lacp_peerinfo_is_compatible(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b)1416 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1417 const struct lacp_peerinfo *b)
1418 {
1419 if (memcmp(&a->lip_systemid, &b->lip_systemid,
1420 sizeof(a->lip_systemid)) != 0) {
1421 return (false);
1422 }
1423
1424 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key)) != 0)
1425 return (false);
1426
1427 return (true);
1428 }
1429
1430 static void
lacp_port_enable(struct lacp_port * lp)1431 lacp_port_enable(struct lacp_port *lp)
1432 {
1433 lp->lp_state |= LACP_STATE_AGGREGATION;
1434 }
1435
1436 static void
lacp_port_disable(struct lacp_port * lp)1437 lacp_port_disable(struct lacp_port *lp)
1438 {
1439 lacp_set_mux(lp, LACP_MUX_DETACHED);
1440
1441 lp->lp_state &= ~LACP_STATE_AGGREGATION;
1442 lp->lp_selected = LACP_UNSELECTED;
1443 lacp_sm_rx_record_default(lp);
1444 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1445 lp->lp_state &= ~LACP_STATE_EXPIRED;
1446 }
1447
1448 /*
1449 * lacp_select: select an aggregator. create one if necessary.
1450 */
1451 static void
lacp_select(struct lacp_port * lp)1452 lacp_select(struct lacp_port *lp)
1453 {
1454 struct lacp_softc *lsc = lp->lp_lsc;
1455 struct lacp_aggregator *la;
1456 char buf[LACP_LAGIDSTR_MAX+1];
1457
1458 if (lp->lp_aggregator) {
1459 return;
1460 }
1461
1462 /* If we haven't heard from our peer, skip this step. */
1463 if (lp->lp_state & LACP_STATE_DEFAULTED)
1464 return;
1465
1466 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1467 ("timer_wait_while still active"));
1468
1469 LACP_DPRINTF((lp, "port lagid=%s\n",
1470 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1471 buf, sizeof(buf))));
1472
1473 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1474 if (lacp_aggregator_is_compatible(la, lp)) {
1475 break;
1476 }
1477 }
1478
1479 if (la == NULL) {
1480 la = lacp_aggregator_get(lsc, lp);
1481 if (la == NULL) {
1482 LACP_DPRINTF((lp, "aggregator creation failed\n"));
1483
1484 /*
1485 * will retry on the next tick.
1486 */
1487
1488 return;
1489 }
1490 lacp_fill_aggregator_id(la, lp);
1491 LACP_DPRINTF((lp, "aggregator created\n"));
1492 } else {
1493 LACP_DPRINTF((lp, "compatible aggregator found\n"));
1494 if (la->la_refcnt == LACP_MAX_PORTS)
1495 return;
1496 lacp_aggregator_addref(lsc, la);
1497 }
1498
1499 LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1500 lacp_format_lagid(&la->la_actor, &la->la_partner,
1501 buf, sizeof(buf))));
1502
1503 lp->lp_aggregator = la;
1504 lp->lp_selected = LACP_SELECTED;
1505 }
1506
1507 /*
1508 * lacp_unselect: finish unselect/detach process.
1509 */
1510
1511 static void
lacp_unselect(struct lacp_port * lp)1512 lacp_unselect(struct lacp_port *lp)
1513 {
1514 struct lacp_softc *lsc = lp->lp_lsc;
1515 struct lacp_aggregator *la = lp->lp_aggregator;
1516
1517 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1518 ("timer_wait_while still active"));
1519
1520 if (la == NULL) {
1521 return;
1522 }
1523
1524 lp->lp_aggregator = NULL;
1525 lacp_aggregator_delref(lsc, la);
1526 }
1527
1528 /* mux machine */
1529
1530 static void
lacp_sm_mux(struct lacp_port * lp)1531 lacp_sm_mux(struct lacp_port *lp)
1532 {
1533 struct lagg_port *lgp = lp->lp_lagg;
1534 struct lagg_softc *sc = lgp->lp_softc;
1535 enum lacp_mux_state new_state;
1536 boolean_t p_sync =
1537 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1538 boolean_t p_collecting =
1539 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1540 enum lacp_selected selected = lp->lp_selected;
1541 struct lacp_aggregator *la;
1542
1543 if (V_lacp_debug > 1)
1544 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1545 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1546 lp->lp_mux_state, selected, p_sync, p_collecting);
1547
1548 re_eval:
1549 la = lp->lp_aggregator;
1550 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1551 ("MUX not detached"));
1552 new_state = lp->lp_mux_state;
1553 switch (lp->lp_mux_state) {
1554 case LACP_MUX_DETACHED:
1555 if (selected != LACP_UNSELECTED) {
1556 new_state = LACP_MUX_WAITING;
1557 }
1558 break;
1559 case LACP_MUX_WAITING:
1560 KASSERT(la->la_pending > 0 ||
1561 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1562 ("timer_wait_while still active"));
1563 if (selected == LACP_SELECTED && la->la_pending == 0) {
1564 new_state = LACP_MUX_ATTACHED;
1565 } else if (selected == LACP_UNSELECTED) {
1566 new_state = LACP_MUX_DETACHED;
1567 }
1568 break;
1569 case LACP_MUX_ATTACHED:
1570 if (selected == LACP_SELECTED && p_sync) {
1571 new_state = LACP_MUX_COLLECTING;
1572 } else if (selected != LACP_SELECTED) {
1573 new_state = LACP_MUX_DETACHED;
1574 }
1575 break;
1576 case LACP_MUX_COLLECTING:
1577 if (selected == LACP_SELECTED && p_sync && p_collecting) {
1578 new_state = LACP_MUX_DISTRIBUTING;
1579 } else if (selected != LACP_SELECTED || !p_sync) {
1580 new_state = LACP_MUX_ATTACHED;
1581 }
1582 break;
1583 case LACP_MUX_DISTRIBUTING:
1584 if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1585 new_state = LACP_MUX_COLLECTING;
1586 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1587 sc->sc_flapping++;
1588 }
1589 break;
1590 default:
1591 panic("%s: unknown state", __func__);
1592 }
1593
1594 if (lp->lp_mux_state == new_state) {
1595 return;
1596 }
1597
1598 lacp_set_mux(lp, new_state);
1599 goto re_eval;
1600 }
1601
1602 static void
lacp_set_mux(struct lacp_port * lp,enum lacp_mux_state new_state)1603 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1604 {
1605 struct lacp_aggregator *la = lp->lp_aggregator;
1606
1607 if (lp->lp_mux_state == new_state) {
1608 return;
1609 }
1610
1611 switch (new_state) {
1612 case LACP_MUX_DETACHED:
1613 lp->lp_state &= ~LACP_STATE_SYNC;
1614 lacp_disable_distributing(lp);
1615 lacp_disable_collecting(lp);
1616 lacp_sm_assert_ntt(lp);
1617 /* cancel timer */
1618 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1619 KASSERT(la->la_pending > 0,
1620 ("timer_wait_while not active"));
1621 la->la_pending--;
1622 }
1623 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1624 lacp_unselect(lp);
1625 break;
1626 case LACP_MUX_WAITING:
1627 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1628 LACP_AGGREGATE_WAIT_TIME);
1629 la->la_pending++;
1630 break;
1631 case LACP_MUX_ATTACHED:
1632 lp->lp_state |= LACP_STATE_SYNC;
1633 lacp_disable_collecting(lp);
1634 lacp_sm_assert_ntt(lp);
1635 break;
1636 case LACP_MUX_COLLECTING:
1637 lacp_enable_collecting(lp);
1638 lacp_disable_distributing(lp);
1639 lacp_sm_assert_ntt(lp);
1640 break;
1641 case LACP_MUX_DISTRIBUTING:
1642 lacp_enable_distributing(lp);
1643 break;
1644 default:
1645 panic("%s: unknown state", __func__);
1646 }
1647
1648 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1649
1650 lp->lp_mux_state = new_state;
1651 }
1652
1653 static void
lacp_sm_mux_timer(struct lacp_port * lp)1654 lacp_sm_mux_timer(struct lacp_port *lp)
1655 {
1656 struct lacp_aggregator *la = lp->lp_aggregator;
1657 char buf[LACP_LAGIDSTR_MAX+1];
1658
1659 KASSERT(la->la_pending > 0, ("no pending event"));
1660
1661 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1662 lacp_format_lagid(&la->la_actor, &la->la_partner,
1663 buf, sizeof(buf)),
1664 la->la_pending, la->la_pending - 1));
1665
1666 la->la_pending--;
1667 }
1668
1669 /* periodic transmit machine */
1670
1671 static void
lacp_sm_ptx_update_timeout(struct lacp_port * lp,uint8_t oldpstate)1672 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1673 {
1674 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1675 LACP_STATE_TIMEOUT)) {
1676 return;
1677 }
1678
1679 LACP_DPRINTF((lp, "partner timeout changed\n"));
1680
1681 /*
1682 * FAST_PERIODIC -> SLOW_PERIODIC
1683 * or
1684 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1685 *
1686 * let lacp_sm_ptx_tx_schedule to update timeout.
1687 */
1688
1689 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1690
1691 /*
1692 * if timeout has been shortened, assert NTT.
1693 */
1694
1695 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1696 lacp_sm_assert_ntt(lp);
1697 }
1698 }
1699
1700 static void
lacp_sm_ptx_tx_schedule(struct lacp_port * lp)1701 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1702 {
1703 int timeout;
1704
1705 if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1706 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1707 /*
1708 * NO_PERIODIC
1709 */
1710
1711 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1712 return;
1713 }
1714
1715 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1716 return;
1717 }
1718
1719 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1720 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1721
1722 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1723 }
1724
1725 static void
lacp_sm_ptx_timer(struct lacp_port * lp)1726 lacp_sm_ptx_timer(struct lacp_port *lp)
1727 {
1728 lacp_sm_assert_ntt(lp);
1729 }
1730
1731 static void
lacp_sm_rx(struct lacp_port * lp,const struct lacpdu * du)1732 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1733 {
1734 int timeout;
1735
1736 /*
1737 * check LACP_DISABLED first
1738 */
1739
1740 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1741 return;
1742 }
1743
1744 /*
1745 * check loopback condition.
1746 */
1747
1748 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1749 &lp->lp_actor.lip_systemid)) {
1750 return;
1751 }
1752
1753 /*
1754 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1755 */
1756
1757 microuptime(&lp->lp_last_lacpdu_rx);
1758 lacp_sm_rx_update_selected(lp, du);
1759 lacp_sm_rx_update_ntt(lp, du);
1760 lacp_sm_rx_record_pdu(lp, du);
1761
1762 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1763 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1764 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1765
1766 lp->lp_state &= ~LACP_STATE_EXPIRED;
1767
1768 /*
1769 * kick transmit machine without waiting the next tick.
1770 */
1771
1772 lacp_sm_tx(lp);
1773 }
1774
1775 static void
lacp_sm_rx_set_expired(struct lacp_port * lp)1776 lacp_sm_rx_set_expired(struct lacp_port *lp)
1777 {
1778 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1779 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1780 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1781 lp->lp_state |= LACP_STATE_EXPIRED;
1782 }
1783
1784 static void
lacp_sm_rx_timer(struct lacp_port * lp)1785 lacp_sm_rx_timer(struct lacp_port *lp)
1786 {
1787 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1788 /* CURRENT -> EXPIRED */
1789 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1790 lacp_sm_rx_set_expired(lp);
1791 } else {
1792 /* EXPIRED -> DEFAULTED */
1793 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1794 lacp_sm_rx_update_default_selected(lp);
1795 lacp_sm_rx_record_default(lp);
1796 lp->lp_state &= ~LACP_STATE_EXPIRED;
1797 }
1798 }
1799
1800 static void
lacp_sm_rx_record_pdu(struct lacp_port * lp,const struct lacpdu * du)1801 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1802 {
1803 boolean_t active;
1804 uint8_t oldpstate;
1805 char buf[LACP_STATESTR_MAX+1];
1806
1807 LACP_TRACE(lp);
1808
1809 oldpstate = lp->lp_partner.lip_state;
1810
1811 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1812 || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1813 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1814
1815 lp->lp_partner = du->ldu_actor;
1816 if (active &&
1817 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1818 LACP_STATE_AGGREGATION) &&
1819 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1820 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1821 /*
1822 * XXX Maintain legacy behavior of leaving the
1823 * LACP_STATE_SYNC bit unchanged from the partner's
1824 * advertisement if lsc_strict_mode is false.
1825 * TODO: We should re-examine the concept of the "strict mode"
1826 * to ensure it makes sense to maintain a non-strict mode.
1827 */
1828 if (lp->lp_lsc->lsc_strict_mode)
1829 lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1830 } else {
1831 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1832 }
1833
1834 lp->lp_state &= ~LACP_STATE_DEFAULTED;
1835
1836 if (oldpstate != lp->lp_partner.lip_state) {
1837 LACP_DPRINTF((lp, "old pstate %s\n",
1838 lacp_format_state(oldpstate, buf, sizeof(buf))));
1839 LACP_DPRINTF((lp, "new pstate %s\n",
1840 lacp_format_state(lp->lp_partner.lip_state, buf,
1841 sizeof(buf))));
1842 }
1843
1844 lacp_sm_ptx_update_timeout(lp, oldpstate);
1845 }
1846
1847 static void
lacp_sm_rx_update_ntt(struct lacp_port * lp,const struct lacpdu * du)1848 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1849 {
1850
1851 LACP_TRACE(lp);
1852
1853 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1854 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1855 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1856 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1857 lacp_sm_assert_ntt(lp);
1858 }
1859 }
1860
1861 static void
lacp_sm_rx_record_default(struct lacp_port * lp)1862 lacp_sm_rx_record_default(struct lacp_port *lp)
1863 {
1864 uint8_t oldpstate;
1865
1866 LACP_TRACE(lp);
1867
1868 oldpstate = lp->lp_partner.lip_state;
1869 if (lp->lp_lsc->lsc_strict_mode)
1870 lp->lp_partner = lacp_partner_admin_strict;
1871 else
1872 lp->lp_partner = lacp_partner_admin_optimistic;
1873 lp->lp_state |= LACP_STATE_DEFAULTED;
1874 lacp_sm_ptx_update_timeout(lp, oldpstate);
1875 }
1876
1877 static void
lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port * lp,const struct lacp_peerinfo * info)1878 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1879 const struct lacp_peerinfo *info)
1880 {
1881
1882 LACP_TRACE(lp);
1883
1884 if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1885 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1886 LACP_STATE_AGGREGATION)) {
1887 lp->lp_selected = LACP_UNSELECTED;
1888 /* mux machine will clean up lp->lp_aggregator */
1889 }
1890 }
1891
1892 static void
lacp_sm_rx_update_selected(struct lacp_port * lp,const struct lacpdu * du)1893 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1894 {
1895
1896 LACP_TRACE(lp);
1897
1898 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1899 }
1900
1901 static void
lacp_sm_rx_update_default_selected(struct lacp_port * lp)1902 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1903 {
1904
1905 LACP_TRACE(lp);
1906
1907 if (lp->lp_lsc->lsc_strict_mode)
1908 lacp_sm_rx_update_selected_from_peerinfo(lp,
1909 &lacp_partner_admin_strict);
1910 else
1911 lacp_sm_rx_update_selected_from_peerinfo(lp,
1912 &lacp_partner_admin_optimistic);
1913 }
1914
1915 /* transmit machine */
1916
1917 static void
lacp_sm_tx(struct lacp_port * lp)1918 lacp_sm_tx(struct lacp_port *lp)
1919 {
1920 int error = 0;
1921
1922 if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1923 #if 1
1924 || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1925 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1926 #endif
1927 ) {
1928 lp->lp_flags &= ~LACP_PORT_NTT;
1929 }
1930
1931 if (!(lp->lp_flags & LACP_PORT_NTT)) {
1932 return;
1933 }
1934
1935 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1936 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1937 (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1938 LACP_DPRINTF((lp, "rate limited pdu\n"));
1939 return;
1940 }
1941
1942 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1943 error = lacp_xmit_lacpdu(lp);
1944 } else {
1945 LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1946 }
1947
1948 if (error == 0) {
1949 lp->lp_flags &= ~LACP_PORT_NTT;
1950 } else {
1951 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1952 error));
1953 }
1954 }
1955
1956 static void
lacp_sm_assert_ntt(struct lacp_port * lp)1957 lacp_sm_assert_ntt(struct lacp_port *lp)
1958 {
1959
1960 lp->lp_flags |= LACP_PORT_NTT;
1961 }
1962
1963 static void
lacp_run_timers(struct lacp_port * lp)1964 lacp_run_timers(struct lacp_port *lp)
1965 {
1966 int i;
1967 struct timeval time_diff;
1968
1969 for (i = 0; i < LACP_NTIMER; i++) {
1970 KASSERT(lp->lp_timer[i] >= 0,
1971 ("invalid timer value %d", lp->lp_timer[i]));
1972 if (lp->lp_timer[i] == 0) {
1973 continue;
1974 } else {
1975 if (i == LACP_TIMER_CURRENT_WHILE) {
1976 microuptime(&time_diff);
1977 timevalsub(&time_diff, &lp->lp_last_lacpdu_rx);
1978 if (time_diff.tv_sec) {
1979 /* At least one sec has elapsed since last LACP packet. */
1980 --lp->lp_timer[i];
1981 }
1982 } else {
1983 --lp->lp_timer[i];
1984 }
1985
1986 if ((lp->lp_timer[i] <= 0) && (lacp_timer_funcs[i])) {
1987 (*lacp_timer_funcs[i])(lp);
1988 }
1989 }
1990 }
1991 }
1992
1993 int
lacp_marker_input(struct lacp_port * lp,struct mbuf * m)1994 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1995 {
1996 struct lacp_softc *lsc = lp->lp_lsc;
1997 struct lagg_port *lgp = lp->lp_lagg;
1998 struct lacp_port *lp2;
1999 struct markerdu *mdu;
2000 int error = 0;
2001 int pending = 0;
2002
2003 if (m->m_pkthdr.len != sizeof(*mdu)) {
2004 goto bad;
2005 }
2006
2007 if ((m->m_flags & M_MCAST) == 0) {
2008 goto bad;
2009 }
2010
2011 if (m->m_len < sizeof(*mdu)) {
2012 m = m_pullup(m, sizeof(*mdu));
2013 if (m == NULL) {
2014 return (ENOMEM);
2015 }
2016 }
2017
2018 mdu = mtod(m, struct markerdu *);
2019
2020 if (memcmp(&mdu->mdu_eh.ether_dhost,
2021 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
2022 goto bad;
2023 }
2024
2025 if (mdu->mdu_sph.sph_version != 1) {
2026 goto bad;
2027 }
2028
2029 switch (mdu->mdu_tlv.tlv_type) {
2030 case MARKER_TYPE_INFO:
2031 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2032 marker_info_tlv_template, TRUE)) {
2033 goto bad;
2034 }
2035 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
2036 memcpy(&mdu->mdu_eh.ether_dhost,
2037 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
2038 memcpy(&mdu->mdu_eh.ether_shost,
2039 lgp->lp_lladdr, ETHER_ADDR_LEN);
2040 error = lagg_enqueue(lp->lp_ifp, m);
2041 break;
2042
2043 case MARKER_TYPE_RESPONSE:
2044 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2045 marker_response_tlv_template, TRUE)) {
2046 goto bad;
2047 }
2048 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
2049 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
2050 ":", ntohl(mdu->mdu_info.mi_rq_xid)));
2051
2052 /* Verify that it is the last marker we sent out */
2053 if (memcmp(&mdu->mdu_info, &lp->lp_marker,
2054 sizeof(struct lacp_markerinfo)))
2055 goto bad;
2056
2057 LACP_LOCK(lsc);
2058 lp->lp_flags &= ~LACP_PORT_MARK;
2059
2060 if (lsc->lsc_suppress_distributing) {
2061 /* Check if any ports are waiting for a response */
2062 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
2063 if (lp2->lp_flags & LACP_PORT_MARK) {
2064 pending = 1;
2065 break;
2066 }
2067 }
2068
2069 if (pending == 0) {
2070 /* All interface queues are clear */
2071 LACP_DPRINTF((NULL, "queue flush complete\n"));
2072 lsc->lsc_suppress_distributing = FALSE;
2073 }
2074 }
2075 LACP_UNLOCK(lsc);
2076 m_freem(m);
2077 break;
2078
2079 default:
2080 goto bad;
2081 }
2082
2083 return (error);
2084
2085 bad:
2086 LACP_DPRINTF((lp, "bad marker frame\n"));
2087 m_freem(m);
2088 return (EINVAL);
2089 }
2090
2091 static int
tlv_check(const void * p,size_t size,const struct tlvhdr * tlv,const struct tlv_template * tmpl,boolean_t check_type)2092 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
2093 const struct tlv_template *tmpl, boolean_t check_type)
2094 {
2095 while (/* CONSTCOND */ 1) {
2096 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
2097 return (EINVAL);
2098 }
2099 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
2100 tlv->tlv_length != tmpl->tmpl_length) {
2101 return (EINVAL);
2102 }
2103 if (tmpl->tmpl_type == 0) {
2104 break;
2105 }
2106 tlv = (const struct tlvhdr *)
2107 ((const char *)tlv + tlv->tlv_length);
2108 tmpl++;
2109 }
2110
2111 return (0);
2112 }
2113
2114 /* Debugging */
2115 const char *
lacp_format_mac(const uint8_t * mac,char * buf,size_t buflen)2116 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
2117 {
2118 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
2119 (int)mac[0],
2120 (int)mac[1],
2121 (int)mac[2],
2122 (int)mac[3],
2123 (int)mac[4],
2124 (int)mac[5]);
2125
2126 return (buf);
2127 }
2128
2129 const char *
lacp_format_systemid(const struct lacp_systemid * sysid,char * buf,size_t buflen)2130 lacp_format_systemid(const struct lacp_systemid *sysid,
2131 char *buf, size_t buflen)
2132 {
2133 char macbuf[LACP_MACSTR_MAX+1];
2134
2135 snprintf(buf, buflen, "%04X,%s",
2136 ntohs(sysid->lsi_prio),
2137 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
2138
2139 return (buf);
2140 }
2141
2142 const char *
lacp_format_portid(const struct lacp_portid * portid,char * buf,size_t buflen)2143 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
2144 {
2145 snprintf(buf, buflen, "%04X,%04X",
2146 ntohs(portid->lpi_prio),
2147 ntohs(portid->lpi_portno));
2148
2149 return (buf);
2150 }
2151
2152 const char *
lacp_format_partner(const struct lacp_peerinfo * peer,char * buf,size_t buflen)2153 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2154 {
2155 char sysid[LACP_SYSTEMIDSTR_MAX+1];
2156 char portid[LACP_PORTIDSTR_MAX+1];
2157
2158 snprintf(buf, buflen, "(%s,%04X,%s)",
2159 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2160 ntohs(peer->lip_key),
2161 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2162
2163 return (buf);
2164 }
2165
2166 const char *
lacp_format_lagid(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b,char * buf,size_t buflen)2167 lacp_format_lagid(const struct lacp_peerinfo *a,
2168 const struct lacp_peerinfo *b, char *buf, size_t buflen)
2169 {
2170 char astr[LACP_PARTNERSTR_MAX+1];
2171 char bstr[LACP_PARTNERSTR_MAX+1];
2172
2173 #if 0
2174 /*
2175 * there's a convention to display small numbered peer
2176 * in the left.
2177 */
2178
2179 if (lacp_compare_peerinfo(a, b) > 0) {
2180 const struct lacp_peerinfo *t;
2181
2182 t = a;
2183 a = b;
2184 b = t;
2185 }
2186 #endif
2187
2188 snprintf(buf, buflen, "[%s,%s]",
2189 lacp_format_partner(a, astr, sizeof(astr)),
2190 lacp_format_partner(b, bstr, sizeof(bstr)));
2191
2192 return (buf);
2193 }
2194
2195 const char *
lacp_format_lagid_aggregator(const struct lacp_aggregator * la,char * buf,size_t buflen)2196 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2197 char *buf, size_t buflen)
2198 {
2199 if (la == NULL) {
2200 return ("(none)");
2201 }
2202
2203 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2204 }
2205
2206 const char *
lacp_format_state(uint8_t state,char * buf,size_t buflen)2207 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2208 {
2209 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2210 return (buf);
2211 }
2212
2213 static void
lacp_dump_lacpdu(const struct lacpdu * du)2214 lacp_dump_lacpdu(const struct lacpdu *du)
2215 {
2216 char buf[LACP_PARTNERSTR_MAX+1];
2217 char buf2[LACP_STATESTR_MAX+1];
2218
2219 printf("actor=%s\n",
2220 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2221 printf("actor.state=%s\n",
2222 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2223 printf("partner=%s\n",
2224 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2225 printf("partner.state=%s\n",
2226 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2227
2228 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2229 }
2230
2231 static void
lacp_dprintf(const struct lacp_port * lp,const char * fmt,...)2232 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2233 {
2234 va_list va;
2235
2236 if (lp) {
2237 printf("%s: ", lp->lp_ifp->if_xname);
2238 }
2239
2240 va_start(va, fmt);
2241 vprintf(fmt, va);
2242 va_end(va);
2243 }
2244