1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c)2005 YAMAMOTO Takashi, 7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_ratelimit.h" 36 37 #include <sys/param.h> 38 #include <sys/callout.h> 39 #include <sys/eventhandler.h> 40 #include <sys/mbuf.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> /* hz */ 44 #include <sys/socket.h> /* for net/if.h */ 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 #include <machine/stdarg.h> 48 #include <sys/lock.h> 49 #include <sys/rwlock.h> 50 #include <sys/taskqueue.h> 51 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/if_dl.h> 55 #include <net/ethernet.h> 56 #include <net/if_media.h> 57 #include <net/if_types.h> 58 59 #include <net/if_lagg.h> 60 #include <net/ieee8023ad_lacp.h> 61 62 /* 63 * actor system priority and port priority. 64 * XXX should be configurable. 65 */ 66 67 #define LACP_SYSTEM_PRIO 0x8000 68 #define LACP_PORT_PRIO 0x8000 69 70 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 71 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 72 73 static const struct tlv_template lacp_info_tlv_template[] = { 74 { LACP_TYPE_ACTORINFO, 75 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 76 { LACP_TYPE_PARTNERINFO, 77 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 78 { LACP_TYPE_COLLECTORINFO, 79 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 80 { 0, 0 }, 81 }; 82 83 static const struct tlv_template marker_info_tlv_template[] = { 84 { MARKER_TYPE_INFO, 85 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 86 { 0, 0 }, 87 }; 88 89 static const struct tlv_template marker_response_tlv_template[] = { 90 { MARKER_TYPE_RESPONSE, 91 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 92 { 0, 0 }, 93 }; 94 95 typedef void (*lacp_timer_func_t)(struct lacp_port *); 96 97 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 98 static void lacp_fill_markerinfo(struct lacp_port *, 99 struct lacp_markerinfo *); 100 101 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 102 static void lacp_suppress_distributing(struct lacp_softc *, 103 struct lacp_aggregator *); 104 static void lacp_transit_expire(void *); 105 static void lacp_update_portmap(struct lacp_softc *); 106 static void lacp_select_active_aggregator(struct lacp_softc *); 107 static uint16_t lacp_compose_key(struct lacp_port *); 108 static int tlv_check(const void *, size_t, const struct tlvhdr *, 109 const struct tlv_template *, boolean_t); 110 static void lacp_tick(void *); 111 112 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 113 const struct lacp_port *); 114 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 115 const struct lacp_peerinfo *); 116 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 117 const struct lacp_port *); 118 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 119 const struct lacp_peerinfo *); 120 121 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 122 struct lacp_port *); 123 static void lacp_aggregator_addref(struct lacp_softc *, 124 struct lacp_aggregator *); 125 static void lacp_aggregator_delref(struct lacp_softc *, 126 struct lacp_aggregator *); 127 128 /* receive machine */ 129 130 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 131 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 132 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 133 static void lacp_sm_rx_timer(struct lacp_port *); 134 static void lacp_sm_rx_set_expired(struct lacp_port *); 135 static void lacp_sm_rx_update_ntt(struct lacp_port *, 136 const struct lacpdu *); 137 static void lacp_sm_rx_record_pdu(struct lacp_port *, 138 const struct lacpdu *); 139 static void lacp_sm_rx_update_selected(struct lacp_port *, 140 const struct lacpdu *); 141 static void lacp_sm_rx_record_default(struct lacp_port *); 142 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 143 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 144 const struct lacp_peerinfo *); 145 146 /* mux machine */ 147 148 static void lacp_sm_mux(struct lacp_port *); 149 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 150 static void lacp_sm_mux_timer(struct lacp_port *); 151 152 /* periodic transmit machine */ 153 154 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 155 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 156 static void lacp_sm_ptx_timer(struct lacp_port *); 157 158 /* transmit machine */ 159 160 static void lacp_sm_tx(struct lacp_port *); 161 static void lacp_sm_assert_ntt(struct lacp_port *); 162 163 static void lacp_run_timers(struct lacp_port *); 164 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 165 const struct lacp_peerinfo *); 166 static int lacp_compare_systemid(const struct lacp_systemid *, 167 const struct lacp_systemid *); 168 static void lacp_port_enable(struct lacp_port *); 169 static void lacp_port_disable(struct lacp_port *); 170 static void lacp_select(struct lacp_port *); 171 static void lacp_unselect(struct lacp_port *); 172 static void lacp_disable_collecting(struct lacp_port *); 173 static void lacp_enable_collecting(struct lacp_port *); 174 static void lacp_disable_distributing(struct lacp_port *); 175 static void lacp_enable_distributing(struct lacp_port *); 176 static int lacp_xmit_lacpdu(struct lacp_port *); 177 static int lacp_xmit_marker(struct lacp_port *); 178 179 /* Debugging */ 180 181 static void lacp_dump_lacpdu(const struct lacpdu *); 182 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 183 size_t); 184 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 185 const struct lacp_peerinfo *, char *, size_t); 186 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 187 char *, size_t); 188 static const char *lacp_format_state(uint8_t, char *, size_t); 189 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 190 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 191 size_t); 192 static const char *lacp_format_portid(const struct lacp_portid *, char *, 193 size_t); 194 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 195 __attribute__((__format__(__printf__, 2, 3))); 196 197 VNET_DEFINE_STATIC(int, lacp_debug); 198 #define V_lacp_debug VNET(lacp_debug) 199 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); 200 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, 201 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); 202 203 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1; 204 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, 205 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0, 206 "LACP strict protocol compliance default"); 207 208 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } 209 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 210 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } 211 212 /* 213 * partner administration variables. 214 * XXX should be configurable. 215 */ 216 217 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 218 .lip_systemid = { .lsi_prio = 0xffff }, 219 .lip_portid = { .lpi_prio = 0xffff }, 220 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 221 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 222 }; 223 224 static const struct lacp_peerinfo lacp_partner_admin_strict = { 225 .lip_systemid = { .lsi_prio = 0xffff }, 226 .lip_portid = { .lpi_prio = 0xffff }, 227 .lip_state = 0, 228 }; 229 230 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 231 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 232 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 233 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 234 }; 235 236 struct mbuf * 237 lacp_input(struct lagg_port *lgp, struct mbuf *m) 238 { 239 struct lacp_port *lp = LACP_PORT(lgp); 240 uint8_t subtype; 241 242 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 243 m_freem(m); 244 return (NULL); 245 } 246 247 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 248 switch (subtype) { 249 case SLOWPROTOCOLS_SUBTYPE_LACP: 250 lacp_pdu_input(lp, m); 251 return (NULL); 252 253 case SLOWPROTOCOLS_SUBTYPE_MARKER: 254 lacp_marker_input(lp, m); 255 return (NULL); 256 } 257 258 /* Not a subtype we are interested in */ 259 return (m); 260 } 261 262 /* 263 * lacp_pdu_input: process lacpdu 264 */ 265 static int 266 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 267 { 268 struct lacp_softc *lsc = lp->lp_lsc; 269 struct lacpdu *du; 270 int error = 0; 271 272 if (m->m_pkthdr.len != sizeof(*du)) { 273 goto bad; 274 } 275 276 if ((m->m_flags & M_MCAST) == 0) { 277 goto bad; 278 } 279 280 if (m->m_len < sizeof(*du)) { 281 m = m_pullup(m, sizeof(*du)); 282 if (m == NULL) { 283 return (ENOMEM); 284 } 285 } 286 287 du = mtod(m, struct lacpdu *); 288 289 if (memcmp(&du->ldu_eh.ether_dhost, 290 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 291 goto bad; 292 } 293 294 /* 295 * ignore the version for compatibility with 296 * the future protocol revisions. 297 */ 298 #if 0 299 if (du->ldu_sph.sph_version != 1) { 300 goto bad; 301 } 302 #endif 303 304 /* 305 * ignore tlv types for compatibility with 306 * the future protocol revisions. 307 */ 308 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 309 lacp_info_tlv_template, FALSE)) { 310 goto bad; 311 } 312 313 if (V_lacp_debug > 0) { 314 lacp_dprintf(lp, "lacpdu receive\n"); 315 lacp_dump_lacpdu(du); 316 } 317 318 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 319 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 320 goto bad; 321 } 322 323 LACP_LOCK(lsc); 324 lacp_sm_rx(lp, du); 325 LACP_UNLOCK(lsc); 326 327 m_freem(m); 328 return (error); 329 330 bad: 331 m_freem(m); 332 return (EINVAL); 333 } 334 335 static void 336 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 337 { 338 struct lagg_port *lgp = lp->lp_lagg; 339 struct lagg_softc *sc = lgp->lp_softc; 340 341 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 342 memcpy(&info->lip_systemid.lsi_mac, 343 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 344 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 345 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 346 info->lip_state = lp->lp_state; 347 } 348 349 static void 350 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 351 { 352 struct ifnet *ifp = lp->lp_ifp; 353 354 /* Fill in the port index and system id (encoded as the MAC) */ 355 info->mi_rq_port = htons(ifp->if_index); 356 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 357 info->mi_rq_xid = htonl(0); 358 } 359 360 static int 361 lacp_xmit_lacpdu(struct lacp_port *lp) 362 { 363 struct lagg_port *lgp = lp->lp_lagg; 364 struct mbuf *m; 365 struct lacpdu *du; 366 int error; 367 368 LACP_LOCK_ASSERT(lp->lp_lsc); 369 370 m = m_gethdr(M_NOWAIT, MT_DATA); 371 if (m == NULL) { 372 return (ENOMEM); 373 } 374 m->m_len = m->m_pkthdr.len = sizeof(*du); 375 376 du = mtod(m, struct lacpdu *); 377 memset(du, 0, sizeof(*du)); 378 379 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 380 ETHER_ADDR_LEN); 381 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 382 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 383 384 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 385 du->ldu_sph.sph_version = 1; 386 387 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 388 du->ldu_actor = lp->lp_actor; 389 390 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 391 sizeof(du->ldu_partner)); 392 du->ldu_partner = lp->lp_partner; 393 394 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 395 sizeof(du->ldu_collector)); 396 du->ldu_collector.lci_maxdelay = 0; 397 398 if (V_lacp_debug > 0) { 399 lacp_dprintf(lp, "lacpdu transmit\n"); 400 lacp_dump_lacpdu(du); 401 } 402 403 m->m_flags |= M_MCAST; 404 405 /* 406 * XXX should use higher priority queue. 407 * otherwise network congestion can break aggregation. 408 */ 409 410 error = lagg_enqueue(lp->lp_ifp, m); 411 return (error); 412 } 413 414 static int 415 lacp_xmit_marker(struct lacp_port *lp) 416 { 417 struct lagg_port *lgp = lp->lp_lagg; 418 struct mbuf *m; 419 struct markerdu *mdu; 420 int error; 421 422 LACP_LOCK_ASSERT(lp->lp_lsc); 423 424 m = m_gethdr(M_NOWAIT, MT_DATA); 425 if (m == NULL) { 426 return (ENOMEM); 427 } 428 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 429 430 mdu = mtod(m, struct markerdu *); 431 memset(mdu, 0, sizeof(*mdu)); 432 433 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 434 ETHER_ADDR_LEN); 435 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 436 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 437 438 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 439 mdu->mdu_sph.sph_version = 1; 440 441 /* Bump the transaction id and copy over the marker info */ 442 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 443 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 444 mdu->mdu_info = lp->lp_marker; 445 446 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 447 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 448 ntohl(mdu->mdu_info.mi_rq_xid))); 449 450 m->m_flags |= M_MCAST; 451 error = lagg_enqueue(lp->lp_ifp, m); 452 return (error); 453 } 454 455 void 456 lacp_linkstate(struct lagg_port *lgp) 457 { 458 struct lacp_port *lp = LACP_PORT(lgp); 459 struct lacp_softc *lsc = lp->lp_lsc; 460 struct ifnet *ifp = lgp->lp_ifp; 461 struct ifmediareq ifmr; 462 int error = 0; 463 u_int media; 464 uint8_t old_state; 465 uint16_t old_key; 466 467 bzero((char *)&ifmr, sizeof(ifmr)); 468 error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr); 469 if (error != 0) { 470 bzero((char *)&ifmr, sizeof(ifmr)); 471 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 472 } 473 if (error != 0) 474 return; 475 476 LACP_LOCK(lsc); 477 media = ifmr.ifm_active; 478 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 479 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 480 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 481 old_state = lp->lp_state; 482 old_key = lp->lp_key; 483 484 lp->lp_media = media; 485 /* 486 * If the port is not an active full duplex Ethernet link then it can 487 * not be aggregated. 488 */ 489 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 490 ifp->if_link_state != LINK_STATE_UP) { 491 lacp_port_disable(lp); 492 } else { 493 lacp_port_enable(lp); 494 } 495 lp->lp_key = lacp_compose_key(lp); 496 497 if (old_state != lp->lp_state || old_key != lp->lp_key) { 498 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 499 lp->lp_selected = LACP_UNSELECTED; 500 } 501 LACP_UNLOCK(lsc); 502 } 503 504 static void 505 lacp_tick(void *arg) 506 { 507 struct lacp_softc *lsc = arg; 508 struct lacp_port *lp; 509 510 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 511 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 512 continue; 513 514 CURVNET_SET(lp->lp_ifp->if_vnet); 515 lacp_run_timers(lp); 516 517 lacp_select(lp); 518 lacp_sm_mux(lp); 519 lacp_sm_tx(lp); 520 lacp_sm_ptx_tx_schedule(lp); 521 CURVNET_RESTORE(); 522 } 523 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 524 } 525 526 int 527 lacp_port_create(struct lagg_port *lgp) 528 { 529 struct lagg_softc *sc = lgp->lp_softc; 530 struct lacp_softc *lsc = LACP_SOFTC(sc); 531 struct lacp_port *lp; 532 struct ifnet *ifp = lgp->lp_ifp; 533 struct sockaddr_dl sdl; 534 struct ifmultiaddr *rifma = NULL; 535 int error; 536 537 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 538 sdl.sdl_alen = ETHER_ADDR_LEN; 539 540 bcopy(ðermulticastaddr_slowprotocols, 541 LLADDR(&sdl), ETHER_ADDR_LEN); 542 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 543 if (error) { 544 printf("%s: ADDMULTI failed on %s\n", __func__, 545 lgp->lp_ifp->if_xname); 546 return (error); 547 } 548 549 lp = malloc(sizeof(struct lacp_port), 550 M_DEVBUF, M_NOWAIT|M_ZERO); 551 if (lp == NULL) 552 return (ENOMEM); 553 554 LACP_LOCK(lsc); 555 lgp->lp_psc = lp; 556 lp->lp_ifp = ifp; 557 lp->lp_lagg = lgp; 558 lp->lp_lsc = lsc; 559 lp->lp_ifma = rifma; 560 561 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 562 563 lacp_fill_actorinfo(lp, &lp->lp_actor); 564 lacp_fill_markerinfo(lp, &lp->lp_marker); 565 lp->lp_state = LACP_STATE_ACTIVITY; 566 lp->lp_aggregator = NULL; 567 lacp_sm_rx_set_expired(lp); 568 LACP_UNLOCK(lsc); 569 lacp_linkstate(lgp); 570 571 return (0); 572 } 573 574 void 575 lacp_port_destroy(struct lagg_port *lgp) 576 { 577 struct lacp_port *lp = LACP_PORT(lgp); 578 struct lacp_softc *lsc = lp->lp_lsc; 579 int i; 580 581 LACP_LOCK(lsc); 582 for (i = 0; i < LACP_NTIMER; i++) { 583 LACP_TIMER_DISARM(lp, i); 584 } 585 586 lacp_disable_collecting(lp); 587 lacp_disable_distributing(lp); 588 lacp_unselect(lp); 589 590 LIST_REMOVE(lp, lp_next); 591 LACP_UNLOCK(lsc); 592 593 /* The address may have already been removed by if_purgemaddrs() */ 594 if (!lgp->lp_detaching) 595 if_delmulti_ifma(lp->lp_ifma); 596 597 free(lp, M_DEVBUF); 598 } 599 600 void 601 lacp_req(struct lagg_softc *sc, void *data) 602 { 603 struct lacp_opreq *req = (struct lacp_opreq *)data; 604 struct lacp_softc *lsc = LACP_SOFTC(sc); 605 struct lacp_aggregator *la; 606 607 bzero(req, sizeof(struct lacp_opreq)); 608 609 /* 610 * If the LACP softc is NULL, return with the opreq structure full of 611 * zeros. It is normal for the softc to be NULL while the lagg is 612 * being destroyed. 613 */ 614 if (NULL == lsc) 615 return; 616 617 la = lsc->lsc_active_aggregator; 618 LACP_LOCK(lsc); 619 if (la != NULL) { 620 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 621 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 622 ETHER_ADDR_LEN); 623 req->actor_key = ntohs(la->la_actor.lip_key); 624 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 625 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 626 req->actor_state = la->la_actor.lip_state; 627 628 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 629 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 630 ETHER_ADDR_LEN); 631 req->partner_key = ntohs(la->la_partner.lip_key); 632 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 633 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 634 req->partner_state = la->la_partner.lip_state; 635 } 636 LACP_UNLOCK(lsc); 637 } 638 639 void 640 lacp_portreq(struct lagg_port *lgp, void *data) 641 { 642 struct lacp_opreq *req = (struct lacp_opreq *)data; 643 struct lacp_port *lp = LACP_PORT(lgp); 644 struct lacp_softc *lsc = lp->lp_lsc; 645 646 LACP_LOCK(lsc); 647 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 648 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 649 ETHER_ADDR_LEN); 650 req->actor_key = ntohs(lp->lp_actor.lip_key); 651 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 652 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 653 req->actor_state = lp->lp_actor.lip_state; 654 655 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 656 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 657 ETHER_ADDR_LEN); 658 req->partner_key = ntohs(lp->lp_partner.lip_key); 659 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 660 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 661 req->partner_state = lp->lp_partner.lip_state; 662 LACP_UNLOCK(lsc); 663 } 664 665 static void 666 lacp_disable_collecting(struct lacp_port *lp) 667 { 668 LACP_DPRINTF((lp, "collecting disabled\n")); 669 lp->lp_state &= ~LACP_STATE_COLLECTING; 670 } 671 672 static void 673 lacp_enable_collecting(struct lacp_port *lp) 674 { 675 LACP_DPRINTF((lp, "collecting enabled\n")); 676 lp->lp_state |= LACP_STATE_COLLECTING; 677 } 678 679 static void 680 lacp_disable_distributing(struct lacp_port *lp) 681 { 682 struct lacp_aggregator *la = lp->lp_aggregator; 683 struct lacp_softc *lsc = lp->lp_lsc; 684 struct lagg_softc *sc = lsc->lsc_softc; 685 char buf[LACP_LAGIDSTR_MAX+1]; 686 687 LACP_LOCK_ASSERT(lsc); 688 689 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 690 return; 691 } 692 693 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 694 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 695 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 696 697 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 698 "nports %d -> %d\n", 699 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 700 la->la_nports, la->la_nports - 1)); 701 702 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 703 la->la_nports--; 704 sc->sc_active = la->la_nports; 705 706 if (lsc->lsc_active_aggregator == la) { 707 lacp_suppress_distributing(lsc, la); 708 lacp_select_active_aggregator(lsc); 709 /* regenerate the port map, the active aggregator has changed */ 710 lacp_update_portmap(lsc); 711 } 712 713 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 714 if_link_state_change(sc->sc_ifp, 715 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 716 } 717 718 static void 719 lacp_enable_distributing(struct lacp_port *lp) 720 { 721 struct lacp_aggregator *la = lp->lp_aggregator; 722 struct lacp_softc *lsc = lp->lp_lsc; 723 struct lagg_softc *sc = lsc->lsc_softc; 724 char buf[LACP_LAGIDSTR_MAX+1]; 725 726 LACP_LOCK_ASSERT(lsc); 727 728 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 729 return; 730 } 731 732 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 733 "nports %d -> %d\n", 734 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 735 la->la_nports, la->la_nports + 1)); 736 737 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 738 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 739 la->la_nports++; 740 sc->sc_active = la->la_nports; 741 742 lp->lp_state |= LACP_STATE_DISTRIBUTING; 743 744 if (lsc->lsc_active_aggregator == la) { 745 lacp_suppress_distributing(lsc, la); 746 lacp_update_portmap(lsc); 747 } else 748 /* try to become the active aggregator */ 749 lacp_select_active_aggregator(lsc); 750 751 if_link_state_change(sc->sc_ifp, 752 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 753 } 754 755 static void 756 lacp_transit_expire(void *vp) 757 { 758 struct lacp_softc *lsc = vp; 759 760 LACP_LOCK_ASSERT(lsc); 761 762 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); 763 LACP_TRACE(NULL); 764 CURVNET_RESTORE(); 765 766 lsc->lsc_suppress_distributing = FALSE; 767 } 768 769 void 770 lacp_attach(struct lagg_softc *sc) 771 { 772 struct lacp_softc *lsc; 773 774 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); 775 776 sc->sc_psc = lsc; 777 lsc->lsc_softc = sc; 778 779 lsc->lsc_hashkey = m_ether_tcpip_hash_init(); 780 lsc->lsc_active_aggregator = NULL; 781 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); 782 LACP_LOCK_INIT(lsc); 783 TAILQ_INIT(&lsc->lsc_aggregators); 784 LIST_INIT(&lsc->lsc_ports); 785 786 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 787 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 788 789 /* if the lagg is already up then do the same */ 790 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 791 lacp_init(sc); 792 } 793 794 void 795 lacp_detach(void *psc) 796 { 797 struct lacp_softc *lsc = (struct lacp_softc *)psc; 798 799 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 800 ("aggregators still active")); 801 KASSERT(lsc->lsc_active_aggregator == NULL, 802 ("aggregator still attached")); 803 804 callout_drain(&lsc->lsc_transit_callout); 805 callout_drain(&lsc->lsc_callout); 806 807 LACP_LOCK_DESTROY(lsc); 808 free(lsc, M_DEVBUF); 809 } 810 811 void 812 lacp_init(struct lagg_softc *sc) 813 { 814 struct lacp_softc *lsc = LACP_SOFTC(sc); 815 816 LACP_LOCK(lsc); 817 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 818 LACP_UNLOCK(lsc); 819 } 820 821 void 822 lacp_stop(struct lagg_softc *sc) 823 { 824 struct lacp_softc *lsc = LACP_SOFTC(sc); 825 826 LACP_LOCK(lsc); 827 callout_stop(&lsc->lsc_transit_callout); 828 callout_stop(&lsc->lsc_callout); 829 LACP_UNLOCK(lsc); 830 } 831 832 struct lagg_port * 833 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) 834 { 835 struct lacp_softc *lsc = LACP_SOFTC(sc); 836 struct lacp_portmap *pm; 837 struct lacp_port *lp; 838 uint32_t hash; 839 840 if (__predict_false(lsc->lsc_suppress_distributing)) { 841 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 842 return (NULL); 843 } 844 845 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 846 if (pm->pm_count == 0) { 847 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 848 return (NULL); 849 } 850 851 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 852 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 853 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 854 else 855 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); 856 hash %= pm->pm_count; 857 lp = pm->pm_map[hash]; 858 859 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, 860 ("aggregated port is not distributing")); 861 862 return (lp->lp_lagg); 863 } 864 865 #ifdef RATELIMIT 866 struct lagg_port * 867 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid) 868 { 869 struct lacp_softc *lsc = LACP_SOFTC(sc); 870 struct lacp_portmap *pm; 871 struct lacp_port *lp; 872 uint32_t hash; 873 874 if (__predict_false(lsc->lsc_suppress_distributing)) { 875 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 876 return (NULL); 877 } 878 879 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 880 if (pm->pm_count == 0) { 881 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 882 return (NULL); 883 } 884 885 hash = flowid >> sc->flowid_shift; 886 hash %= pm->pm_count; 887 lp = pm->pm_map[hash]; 888 889 return (lp->lp_lagg); 890 } 891 #endif 892 893 /* 894 * lacp_suppress_distributing: drop transmit packets for a while 895 * to preserve packet ordering. 896 */ 897 898 static void 899 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 900 { 901 struct lacp_port *lp; 902 903 if (lsc->lsc_active_aggregator != la) { 904 return; 905 } 906 907 LACP_TRACE(NULL); 908 909 lsc->lsc_suppress_distributing = TRUE; 910 911 /* send a marker frame down each port to verify the queues are empty */ 912 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 913 lp->lp_flags |= LACP_PORT_MARK; 914 lacp_xmit_marker(lp); 915 } 916 917 /* set a timeout for the marker frames */ 918 callout_reset(&lsc->lsc_transit_callout, 919 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 920 } 921 922 static int 923 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 924 const struct lacp_peerinfo *b) 925 { 926 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 927 } 928 929 static int 930 lacp_compare_systemid(const struct lacp_systemid *a, 931 const struct lacp_systemid *b) 932 { 933 return (memcmp(a, b, sizeof(*a))); 934 } 935 936 #if 0 /* unused */ 937 static int 938 lacp_compare_portid(const struct lacp_portid *a, 939 const struct lacp_portid *b) 940 { 941 return (memcmp(a, b, sizeof(*a))); 942 } 943 #endif 944 945 static uint64_t 946 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 947 { 948 struct lacp_port *lp; 949 uint64_t speed; 950 951 lp = TAILQ_FIRST(&la->la_ports); 952 if (lp == NULL) { 953 return (0); 954 } 955 956 speed = ifmedia_baudrate(lp->lp_media); 957 speed *= la->la_nports; 958 if (speed == 0) { 959 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 960 lp->lp_media, la->la_nports)); 961 } 962 963 return (speed); 964 } 965 966 /* 967 * lacp_select_active_aggregator: select an aggregator to be used to transmit 968 * packets from lagg(4) interface. 969 */ 970 971 static void 972 lacp_select_active_aggregator(struct lacp_softc *lsc) 973 { 974 struct lacp_aggregator *la; 975 struct lacp_aggregator *best_la = NULL; 976 uint64_t best_speed = 0; 977 char buf[LACP_LAGIDSTR_MAX+1]; 978 979 LACP_TRACE(NULL); 980 981 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 982 uint64_t speed; 983 984 if (la->la_nports == 0) { 985 continue; 986 } 987 988 speed = lacp_aggregator_bandwidth(la); 989 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 990 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 991 speed, la->la_nports)); 992 993 /* 994 * This aggregator is chosen if the partner has a better 995 * system priority or, the total aggregated speed is higher 996 * or, it is already the chosen aggregator 997 */ 998 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 999 LACP_SYS_PRI(best_la->la_partner)) || 1000 speed > best_speed || 1001 (speed == best_speed && 1002 la == lsc->lsc_active_aggregator)) { 1003 best_la = la; 1004 best_speed = speed; 1005 } 1006 } 1007 1008 KASSERT(best_la == NULL || best_la->la_nports > 0, 1009 ("invalid aggregator refcnt")); 1010 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1011 ("invalid aggregator list")); 1012 1013 if (lsc->lsc_active_aggregator != best_la) { 1014 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1015 LACP_DPRINTF((NULL, "old %s\n", 1016 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1017 buf, sizeof(buf)))); 1018 } else { 1019 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1020 } 1021 LACP_DPRINTF((NULL, "new %s\n", 1022 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1023 1024 if (lsc->lsc_active_aggregator != best_la) { 1025 lsc->lsc_active_aggregator = best_la; 1026 lacp_update_portmap(lsc); 1027 if (best_la) { 1028 lacp_suppress_distributing(lsc, best_la); 1029 } 1030 } 1031 } 1032 1033 /* 1034 * Updated the inactive portmap array with the new list of ports and 1035 * make it live. 1036 */ 1037 static void 1038 lacp_update_portmap(struct lacp_softc *lsc) 1039 { 1040 struct lagg_softc *sc = lsc->lsc_softc; 1041 struct lacp_aggregator *la; 1042 struct lacp_portmap *p; 1043 struct lacp_port *lp; 1044 uint64_t speed; 1045 u_int newmap; 1046 int i; 1047 1048 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1049 p = &lsc->lsc_pmap[newmap]; 1050 la = lsc->lsc_active_aggregator; 1051 speed = 0; 1052 bzero(p, sizeof(struct lacp_portmap)); 1053 1054 if (la != NULL && la->la_nports > 0) { 1055 p->pm_count = la->la_nports; 1056 i = 0; 1057 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) 1058 p->pm_map[i++] = lp; 1059 KASSERT(i == p->pm_count, ("Invalid port count")); 1060 speed = lacp_aggregator_bandwidth(la); 1061 } 1062 sc->sc_ifp->if_baudrate = speed; 1063 1064 /* switch the active portmap over */ 1065 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1066 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1067 lsc->lsc_activemap, 1068 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1069 } 1070 1071 static uint16_t 1072 lacp_compose_key(struct lacp_port *lp) 1073 { 1074 struct lagg_port *lgp = lp->lp_lagg; 1075 struct lagg_softc *sc = lgp->lp_softc; 1076 u_int media = lp->lp_media; 1077 uint16_t key; 1078 1079 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1080 1081 /* 1082 * non-aggregatable links should have unique keys. 1083 * 1084 * XXX this isn't really unique as if_index is 16 bit. 1085 */ 1086 1087 /* bit 0..14: (some bits of) if_index of this port */ 1088 key = lp->lp_ifp->if_index; 1089 /* bit 15: 1 */ 1090 key |= 0x8000; 1091 } else { 1092 u_int subtype = IFM_SUBTYPE(media); 1093 1094 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1095 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1096 1097 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1098 switch (subtype) { 1099 case IFM_10_T: 1100 case IFM_10_2: 1101 case IFM_10_5: 1102 case IFM_10_STP: 1103 case IFM_10_FL: 1104 key = IFM_10_T; 1105 break; 1106 case IFM_100_TX: 1107 case IFM_100_FX: 1108 case IFM_100_T4: 1109 case IFM_100_VG: 1110 case IFM_100_T2: 1111 case IFM_100_T: 1112 case IFM_100_SGMII: 1113 key = IFM_100_TX; 1114 break; 1115 case IFM_1000_SX: 1116 case IFM_1000_LX: 1117 case IFM_1000_CX: 1118 case IFM_1000_T: 1119 case IFM_1000_KX: 1120 case IFM_1000_SGMII: 1121 case IFM_1000_CX_SGMII: 1122 key = IFM_1000_SX; 1123 break; 1124 case IFM_10G_LR: 1125 case IFM_10G_SR: 1126 case IFM_10G_CX4: 1127 case IFM_10G_TWINAX: 1128 case IFM_10G_TWINAX_LONG: 1129 case IFM_10G_LRM: 1130 case IFM_10G_T: 1131 case IFM_10G_KX4: 1132 case IFM_10G_KR: 1133 case IFM_10G_CR1: 1134 case IFM_10G_ER: 1135 case IFM_10G_SFI: 1136 case IFM_10G_AOC: 1137 key = IFM_10G_LR; 1138 break; 1139 case IFM_20G_KR2: 1140 key = IFM_20G_KR2; 1141 break; 1142 case IFM_2500_KX: 1143 case IFM_2500_T: 1144 case IFM_2500_X: 1145 key = IFM_2500_KX; 1146 break; 1147 case IFM_5000_T: 1148 case IFM_5000_KR: 1149 case IFM_5000_KR_S: 1150 case IFM_5000_KR1: 1151 key = IFM_5000_T; 1152 break; 1153 case IFM_50G_PCIE: 1154 case IFM_50G_CR2: 1155 case IFM_50G_KR2: 1156 case IFM_50G_SR2: 1157 case IFM_50G_LR2: 1158 case IFM_50G_LAUI2_AC: 1159 case IFM_50G_LAUI2: 1160 case IFM_50G_AUI2_AC: 1161 case IFM_50G_AUI2: 1162 case IFM_50G_CP: 1163 case IFM_50G_SR: 1164 case IFM_50G_LR: 1165 case IFM_50G_FR: 1166 case IFM_50G_KR_PAM4: 1167 case IFM_50G_AUI1_AC: 1168 case IFM_50G_AUI1: 1169 key = IFM_50G_PCIE; 1170 break; 1171 case IFM_56G_R4: 1172 key = IFM_56G_R4; 1173 break; 1174 case IFM_25G_PCIE: 1175 case IFM_25G_CR: 1176 case IFM_25G_KR: 1177 case IFM_25G_SR: 1178 case IFM_25G_LR: 1179 case IFM_25G_ACC: 1180 case IFM_25G_AOC: 1181 case IFM_25G_T: 1182 case IFM_25G_CR_S: 1183 case IFM_25G_CR1: 1184 case IFM_25G_KR_S: 1185 case IFM_25G_AUI: 1186 case IFM_25G_KR1: 1187 key = IFM_25G_PCIE; 1188 break; 1189 case IFM_40G_CR4: 1190 case IFM_40G_SR4: 1191 case IFM_40G_LR4: 1192 case IFM_40G_XLPPI: 1193 case IFM_40G_KR4: 1194 case IFM_40G_XLAUI: 1195 case IFM_40G_XLAUI_AC: 1196 case IFM_40G_ER4: 1197 key = IFM_40G_CR4; 1198 break; 1199 case IFM_100G_CR4: 1200 case IFM_100G_SR4: 1201 case IFM_100G_KR4: 1202 case IFM_100G_LR4: 1203 case IFM_100G_CAUI4_AC: 1204 case IFM_100G_CAUI4: 1205 case IFM_100G_AUI4_AC: 1206 case IFM_100G_AUI4: 1207 case IFM_100G_CR_PAM4: 1208 case IFM_100G_KR_PAM4: 1209 case IFM_100G_CP2: 1210 case IFM_100G_SR2: 1211 case IFM_100G_DR: 1212 case IFM_100G_KR2_PAM4: 1213 case IFM_100G_CAUI2_AC: 1214 case IFM_100G_CAUI2: 1215 case IFM_100G_AUI2_AC: 1216 case IFM_100G_AUI2: 1217 key = IFM_100G_CR4; 1218 break; 1219 case IFM_200G_CR4_PAM4: 1220 case IFM_200G_SR4: 1221 case IFM_200G_FR4: 1222 case IFM_200G_LR4: 1223 case IFM_200G_DR4: 1224 case IFM_200G_KR4_PAM4: 1225 case IFM_200G_AUI4_AC: 1226 case IFM_200G_AUI4: 1227 case IFM_200G_AUI8_AC: 1228 case IFM_200G_AUI8: 1229 key = IFM_200G_CR4_PAM4; 1230 break; 1231 case IFM_400G_FR8: 1232 case IFM_400G_LR8: 1233 case IFM_400G_DR4: 1234 case IFM_400G_AUI8_AC: 1235 case IFM_400G_AUI8: 1236 key = IFM_400G_FR8; 1237 break; 1238 default: 1239 key = subtype; 1240 break; 1241 } 1242 /* bit 5..14: (some bits of) if_index of lagg device */ 1243 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1244 /* bit 15: 0 */ 1245 } 1246 return (htons(key)); 1247 } 1248 1249 static void 1250 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1251 { 1252 char buf[LACP_LAGIDSTR_MAX+1]; 1253 1254 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1255 __func__, 1256 lacp_format_lagid(&la->la_actor, &la->la_partner, 1257 buf, sizeof(buf)), 1258 la->la_refcnt, la->la_refcnt + 1)); 1259 1260 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1261 la->la_refcnt++; 1262 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1263 } 1264 1265 static void 1266 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1267 { 1268 char buf[LACP_LAGIDSTR_MAX+1]; 1269 1270 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1271 __func__, 1272 lacp_format_lagid(&la->la_actor, &la->la_partner, 1273 buf, sizeof(buf)), 1274 la->la_refcnt, la->la_refcnt - 1)); 1275 1276 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1277 la->la_refcnt--; 1278 if (la->la_refcnt > 0) { 1279 return; 1280 } 1281 1282 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1283 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1284 1285 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1286 1287 free(la, M_DEVBUF); 1288 } 1289 1290 /* 1291 * lacp_aggregator_get: allocate an aggregator. 1292 */ 1293 1294 static struct lacp_aggregator * 1295 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1296 { 1297 struct lacp_aggregator *la; 1298 1299 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1300 if (la) { 1301 la->la_refcnt = 1; 1302 la->la_nports = 0; 1303 TAILQ_INIT(&la->la_ports); 1304 la->la_pending = 0; 1305 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1306 } 1307 1308 return (la); 1309 } 1310 1311 /* 1312 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1313 */ 1314 1315 static void 1316 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1317 { 1318 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1319 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1320 1321 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1322 } 1323 1324 static void 1325 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1326 const struct lacp_peerinfo *lpi_port) 1327 { 1328 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1329 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1330 lpi_aggr->lip_key = lpi_port->lip_key; 1331 } 1332 1333 /* 1334 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1335 */ 1336 1337 static int 1338 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1339 const struct lacp_port *lp) 1340 { 1341 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1342 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1343 return (0); 1344 } 1345 1346 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1347 return (0); 1348 } 1349 1350 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1351 return (0); 1352 } 1353 1354 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1355 return (0); 1356 } 1357 1358 return (1); 1359 } 1360 1361 static int 1362 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1363 const struct lacp_peerinfo *b) 1364 { 1365 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1366 sizeof(a->lip_systemid))) { 1367 return (0); 1368 } 1369 1370 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1371 return (0); 1372 } 1373 1374 return (1); 1375 } 1376 1377 static void 1378 lacp_port_enable(struct lacp_port *lp) 1379 { 1380 lp->lp_state |= LACP_STATE_AGGREGATION; 1381 } 1382 1383 static void 1384 lacp_port_disable(struct lacp_port *lp) 1385 { 1386 lacp_set_mux(lp, LACP_MUX_DETACHED); 1387 1388 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1389 lp->lp_selected = LACP_UNSELECTED; 1390 lacp_sm_rx_record_default(lp); 1391 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1392 lp->lp_state &= ~LACP_STATE_EXPIRED; 1393 } 1394 1395 /* 1396 * lacp_select: select an aggregator. create one if necessary. 1397 */ 1398 static void 1399 lacp_select(struct lacp_port *lp) 1400 { 1401 struct lacp_softc *lsc = lp->lp_lsc; 1402 struct lacp_aggregator *la; 1403 char buf[LACP_LAGIDSTR_MAX+1]; 1404 1405 if (lp->lp_aggregator) { 1406 return; 1407 } 1408 1409 /* If we haven't heard from our peer, skip this step. */ 1410 if (lp->lp_state & LACP_STATE_DEFAULTED) 1411 return; 1412 1413 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1414 ("timer_wait_while still active")); 1415 1416 LACP_DPRINTF((lp, "port lagid=%s\n", 1417 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1418 buf, sizeof(buf)))); 1419 1420 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1421 if (lacp_aggregator_is_compatible(la, lp)) { 1422 break; 1423 } 1424 } 1425 1426 if (la == NULL) { 1427 la = lacp_aggregator_get(lsc, lp); 1428 if (la == NULL) { 1429 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1430 1431 /* 1432 * will retry on the next tick. 1433 */ 1434 1435 return; 1436 } 1437 lacp_fill_aggregator_id(la, lp); 1438 LACP_DPRINTF((lp, "aggregator created\n")); 1439 } else { 1440 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1441 if (la->la_refcnt == LACP_MAX_PORTS) 1442 return; 1443 lacp_aggregator_addref(lsc, la); 1444 } 1445 1446 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1447 lacp_format_lagid(&la->la_actor, &la->la_partner, 1448 buf, sizeof(buf)))); 1449 1450 lp->lp_aggregator = la; 1451 lp->lp_selected = LACP_SELECTED; 1452 } 1453 1454 /* 1455 * lacp_unselect: finish unselect/detach process. 1456 */ 1457 1458 static void 1459 lacp_unselect(struct lacp_port *lp) 1460 { 1461 struct lacp_softc *lsc = lp->lp_lsc; 1462 struct lacp_aggregator *la = lp->lp_aggregator; 1463 1464 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1465 ("timer_wait_while still active")); 1466 1467 if (la == NULL) { 1468 return; 1469 } 1470 1471 lp->lp_aggregator = NULL; 1472 lacp_aggregator_delref(lsc, la); 1473 } 1474 1475 /* mux machine */ 1476 1477 static void 1478 lacp_sm_mux(struct lacp_port *lp) 1479 { 1480 struct lagg_port *lgp = lp->lp_lagg; 1481 struct lagg_softc *sc = lgp->lp_softc; 1482 enum lacp_mux_state new_state; 1483 boolean_t p_sync = 1484 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1485 boolean_t p_collecting = 1486 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1487 enum lacp_selected selected = lp->lp_selected; 1488 struct lacp_aggregator *la; 1489 1490 if (V_lacp_debug > 1) 1491 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1492 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1493 lp->lp_mux_state, selected, p_sync, p_collecting); 1494 1495 re_eval: 1496 la = lp->lp_aggregator; 1497 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1498 ("MUX not detached")); 1499 new_state = lp->lp_mux_state; 1500 switch (lp->lp_mux_state) { 1501 case LACP_MUX_DETACHED: 1502 if (selected != LACP_UNSELECTED) { 1503 new_state = LACP_MUX_WAITING; 1504 } 1505 break; 1506 case LACP_MUX_WAITING: 1507 KASSERT(la->la_pending > 0 || 1508 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1509 ("timer_wait_while still active")); 1510 if (selected == LACP_SELECTED && la->la_pending == 0) { 1511 new_state = LACP_MUX_ATTACHED; 1512 } else if (selected == LACP_UNSELECTED) { 1513 new_state = LACP_MUX_DETACHED; 1514 } 1515 break; 1516 case LACP_MUX_ATTACHED: 1517 if (selected == LACP_SELECTED && p_sync) { 1518 new_state = LACP_MUX_COLLECTING; 1519 } else if (selected != LACP_SELECTED) { 1520 new_state = LACP_MUX_DETACHED; 1521 } 1522 break; 1523 case LACP_MUX_COLLECTING: 1524 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1525 new_state = LACP_MUX_DISTRIBUTING; 1526 } else if (selected != LACP_SELECTED || !p_sync) { 1527 new_state = LACP_MUX_ATTACHED; 1528 } 1529 break; 1530 case LACP_MUX_DISTRIBUTING: 1531 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1532 new_state = LACP_MUX_COLLECTING; 1533 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1534 sc->sc_flapping++; 1535 } 1536 break; 1537 default: 1538 panic("%s: unknown state", __func__); 1539 } 1540 1541 if (lp->lp_mux_state == new_state) { 1542 return; 1543 } 1544 1545 lacp_set_mux(lp, new_state); 1546 goto re_eval; 1547 } 1548 1549 static void 1550 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1551 { 1552 struct lacp_aggregator *la = lp->lp_aggregator; 1553 1554 if (lp->lp_mux_state == new_state) { 1555 return; 1556 } 1557 1558 switch (new_state) { 1559 case LACP_MUX_DETACHED: 1560 lp->lp_state &= ~LACP_STATE_SYNC; 1561 lacp_disable_distributing(lp); 1562 lacp_disable_collecting(lp); 1563 lacp_sm_assert_ntt(lp); 1564 /* cancel timer */ 1565 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1566 KASSERT(la->la_pending > 0, 1567 ("timer_wait_while not active")); 1568 la->la_pending--; 1569 } 1570 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1571 lacp_unselect(lp); 1572 break; 1573 case LACP_MUX_WAITING: 1574 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1575 LACP_AGGREGATE_WAIT_TIME); 1576 la->la_pending++; 1577 break; 1578 case LACP_MUX_ATTACHED: 1579 lp->lp_state |= LACP_STATE_SYNC; 1580 lacp_disable_collecting(lp); 1581 lacp_sm_assert_ntt(lp); 1582 break; 1583 case LACP_MUX_COLLECTING: 1584 lacp_enable_collecting(lp); 1585 lacp_disable_distributing(lp); 1586 lacp_sm_assert_ntt(lp); 1587 break; 1588 case LACP_MUX_DISTRIBUTING: 1589 lacp_enable_distributing(lp); 1590 break; 1591 default: 1592 panic("%s: unknown state", __func__); 1593 } 1594 1595 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1596 1597 lp->lp_mux_state = new_state; 1598 } 1599 1600 static void 1601 lacp_sm_mux_timer(struct lacp_port *lp) 1602 { 1603 struct lacp_aggregator *la = lp->lp_aggregator; 1604 char buf[LACP_LAGIDSTR_MAX+1]; 1605 1606 KASSERT(la->la_pending > 0, ("no pending event")); 1607 1608 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1609 lacp_format_lagid(&la->la_actor, &la->la_partner, 1610 buf, sizeof(buf)), 1611 la->la_pending, la->la_pending - 1)); 1612 1613 la->la_pending--; 1614 } 1615 1616 /* periodic transmit machine */ 1617 1618 static void 1619 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1620 { 1621 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1622 LACP_STATE_TIMEOUT)) { 1623 return; 1624 } 1625 1626 LACP_DPRINTF((lp, "partner timeout changed\n")); 1627 1628 /* 1629 * FAST_PERIODIC -> SLOW_PERIODIC 1630 * or 1631 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1632 * 1633 * let lacp_sm_ptx_tx_schedule to update timeout. 1634 */ 1635 1636 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1637 1638 /* 1639 * if timeout has been shortened, assert NTT. 1640 */ 1641 1642 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1643 lacp_sm_assert_ntt(lp); 1644 } 1645 } 1646 1647 static void 1648 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1649 { 1650 int timeout; 1651 1652 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1653 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1654 1655 /* 1656 * NO_PERIODIC 1657 */ 1658 1659 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1660 return; 1661 } 1662 1663 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1664 return; 1665 } 1666 1667 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1668 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1669 1670 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1671 } 1672 1673 static void 1674 lacp_sm_ptx_timer(struct lacp_port *lp) 1675 { 1676 lacp_sm_assert_ntt(lp); 1677 } 1678 1679 static void 1680 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1681 { 1682 int timeout; 1683 1684 /* 1685 * check LACP_DISABLED first 1686 */ 1687 1688 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1689 return; 1690 } 1691 1692 /* 1693 * check loopback condition. 1694 */ 1695 1696 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1697 &lp->lp_actor.lip_systemid)) { 1698 return; 1699 } 1700 1701 /* 1702 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1703 */ 1704 1705 lacp_sm_rx_update_selected(lp, du); 1706 lacp_sm_rx_update_ntt(lp, du); 1707 lacp_sm_rx_record_pdu(lp, du); 1708 1709 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1710 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1711 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1712 1713 lp->lp_state &= ~LACP_STATE_EXPIRED; 1714 1715 /* 1716 * kick transmit machine without waiting the next tick. 1717 */ 1718 1719 lacp_sm_tx(lp); 1720 } 1721 1722 static void 1723 lacp_sm_rx_set_expired(struct lacp_port *lp) 1724 { 1725 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1726 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1727 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1728 lp->lp_state |= LACP_STATE_EXPIRED; 1729 } 1730 1731 static void 1732 lacp_sm_rx_timer(struct lacp_port *lp) 1733 { 1734 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1735 /* CURRENT -> EXPIRED */ 1736 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1737 lacp_sm_rx_set_expired(lp); 1738 } else { 1739 /* EXPIRED -> DEFAULTED */ 1740 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1741 lacp_sm_rx_update_default_selected(lp); 1742 lacp_sm_rx_record_default(lp); 1743 lp->lp_state &= ~LACP_STATE_EXPIRED; 1744 } 1745 } 1746 1747 static void 1748 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1749 { 1750 boolean_t active; 1751 uint8_t oldpstate; 1752 char buf[LACP_STATESTR_MAX+1]; 1753 1754 LACP_TRACE(lp); 1755 1756 oldpstate = lp->lp_partner.lip_state; 1757 1758 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1759 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1760 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1761 1762 lp->lp_partner = du->ldu_actor; 1763 if (active && 1764 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1765 LACP_STATE_AGGREGATION) && 1766 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1767 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1768 /* 1769 * XXX Maintain legacy behavior of leaving the 1770 * LACP_STATE_SYNC bit unchanged from the partner's 1771 * advertisement if lsc_strict_mode is false. 1772 * TODO: We should re-examine the concept of the "strict mode" 1773 * to ensure it makes sense to maintain a non-strict mode. 1774 */ 1775 if (lp->lp_lsc->lsc_strict_mode) 1776 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1777 } else { 1778 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1779 } 1780 1781 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1782 1783 if (oldpstate != lp->lp_partner.lip_state) { 1784 LACP_DPRINTF((lp, "old pstate %s\n", 1785 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1786 LACP_DPRINTF((lp, "new pstate %s\n", 1787 lacp_format_state(lp->lp_partner.lip_state, buf, 1788 sizeof(buf)))); 1789 } 1790 1791 lacp_sm_ptx_update_timeout(lp, oldpstate); 1792 } 1793 1794 static void 1795 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1796 { 1797 1798 LACP_TRACE(lp); 1799 1800 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1801 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1802 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1803 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1804 lacp_sm_assert_ntt(lp); 1805 } 1806 } 1807 1808 static void 1809 lacp_sm_rx_record_default(struct lacp_port *lp) 1810 { 1811 uint8_t oldpstate; 1812 1813 LACP_TRACE(lp); 1814 1815 oldpstate = lp->lp_partner.lip_state; 1816 if (lp->lp_lsc->lsc_strict_mode) 1817 lp->lp_partner = lacp_partner_admin_strict; 1818 else 1819 lp->lp_partner = lacp_partner_admin_optimistic; 1820 lp->lp_state |= LACP_STATE_DEFAULTED; 1821 lacp_sm_ptx_update_timeout(lp, oldpstate); 1822 } 1823 1824 static void 1825 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1826 const struct lacp_peerinfo *info) 1827 { 1828 1829 LACP_TRACE(lp); 1830 1831 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1832 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1833 LACP_STATE_AGGREGATION)) { 1834 lp->lp_selected = LACP_UNSELECTED; 1835 /* mux machine will clean up lp->lp_aggregator */ 1836 } 1837 } 1838 1839 static void 1840 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1841 { 1842 1843 LACP_TRACE(lp); 1844 1845 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1846 } 1847 1848 static void 1849 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1850 { 1851 1852 LACP_TRACE(lp); 1853 1854 if (lp->lp_lsc->lsc_strict_mode) 1855 lacp_sm_rx_update_selected_from_peerinfo(lp, 1856 &lacp_partner_admin_strict); 1857 else 1858 lacp_sm_rx_update_selected_from_peerinfo(lp, 1859 &lacp_partner_admin_optimistic); 1860 } 1861 1862 /* transmit machine */ 1863 1864 static void 1865 lacp_sm_tx(struct lacp_port *lp) 1866 { 1867 int error = 0; 1868 1869 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1870 #if 1 1871 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1872 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1873 #endif 1874 ) { 1875 lp->lp_flags &= ~LACP_PORT_NTT; 1876 } 1877 1878 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1879 return; 1880 } 1881 1882 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1883 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1884 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1885 LACP_DPRINTF((lp, "rate limited pdu\n")); 1886 return; 1887 } 1888 1889 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1890 error = lacp_xmit_lacpdu(lp); 1891 } else { 1892 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1893 } 1894 1895 if (error == 0) { 1896 lp->lp_flags &= ~LACP_PORT_NTT; 1897 } else { 1898 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1899 error)); 1900 } 1901 } 1902 1903 static void 1904 lacp_sm_assert_ntt(struct lacp_port *lp) 1905 { 1906 1907 lp->lp_flags |= LACP_PORT_NTT; 1908 } 1909 1910 static void 1911 lacp_run_timers(struct lacp_port *lp) 1912 { 1913 int i; 1914 1915 for (i = 0; i < LACP_NTIMER; i++) { 1916 KASSERT(lp->lp_timer[i] >= 0, 1917 ("invalid timer value %d", lp->lp_timer[i])); 1918 if (lp->lp_timer[i] == 0) { 1919 continue; 1920 } else if (--lp->lp_timer[i] <= 0) { 1921 if (lacp_timer_funcs[i]) { 1922 (*lacp_timer_funcs[i])(lp); 1923 } 1924 } 1925 } 1926 } 1927 1928 int 1929 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1930 { 1931 struct lacp_softc *lsc = lp->lp_lsc; 1932 struct lagg_port *lgp = lp->lp_lagg; 1933 struct lacp_port *lp2; 1934 struct markerdu *mdu; 1935 int error = 0; 1936 int pending = 0; 1937 1938 if (m->m_pkthdr.len != sizeof(*mdu)) { 1939 goto bad; 1940 } 1941 1942 if ((m->m_flags & M_MCAST) == 0) { 1943 goto bad; 1944 } 1945 1946 if (m->m_len < sizeof(*mdu)) { 1947 m = m_pullup(m, sizeof(*mdu)); 1948 if (m == NULL) { 1949 return (ENOMEM); 1950 } 1951 } 1952 1953 mdu = mtod(m, struct markerdu *); 1954 1955 if (memcmp(&mdu->mdu_eh.ether_dhost, 1956 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1957 goto bad; 1958 } 1959 1960 if (mdu->mdu_sph.sph_version != 1) { 1961 goto bad; 1962 } 1963 1964 switch (mdu->mdu_tlv.tlv_type) { 1965 case MARKER_TYPE_INFO: 1966 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1967 marker_info_tlv_template, TRUE)) { 1968 goto bad; 1969 } 1970 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 1971 memcpy(&mdu->mdu_eh.ether_dhost, 1972 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 1973 memcpy(&mdu->mdu_eh.ether_shost, 1974 lgp->lp_lladdr, ETHER_ADDR_LEN); 1975 error = lagg_enqueue(lp->lp_ifp, m); 1976 break; 1977 1978 case MARKER_TYPE_RESPONSE: 1979 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1980 marker_response_tlv_template, TRUE)) { 1981 goto bad; 1982 } 1983 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 1984 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 1985 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 1986 1987 /* Verify that it is the last marker we sent out */ 1988 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 1989 sizeof(struct lacp_markerinfo))) 1990 goto bad; 1991 1992 LACP_LOCK(lsc); 1993 lp->lp_flags &= ~LACP_PORT_MARK; 1994 1995 if (lsc->lsc_suppress_distributing) { 1996 /* Check if any ports are waiting for a response */ 1997 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 1998 if (lp2->lp_flags & LACP_PORT_MARK) { 1999 pending = 1; 2000 break; 2001 } 2002 } 2003 2004 if (pending == 0) { 2005 /* All interface queues are clear */ 2006 LACP_DPRINTF((NULL, "queue flush complete\n")); 2007 lsc->lsc_suppress_distributing = FALSE; 2008 } 2009 } 2010 LACP_UNLOCK(lsc); 2011 m_freem(m); 2012 break; 2013 2014 default: 2015 goto bad; 2016 } 2017 2018 return (error); 2019 2020 bad: 2021 LACP_DPRINTF((lp, "bad marker frame\n")); 2022 m_freem(m); 2023 return (EINVAL); 2024 } 2025 2026 static int 2027 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 2028 const struct tlv_template *tmpl, boolean_t check_type) 2029 { 2030 while (/* CONSTCOND */ 1) { 2031 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 2032 return (EINVAL); 2033 } 2034 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 2035 tlv->tlv_length != tmpl->tmpl_length) { 2036 return (EINVAL); 2037 } 2038 if (tmpl->tmpl_type == 0) { 2039 break; 2040 } 2041 tlv = (const struct tlvhdr *) 2042 ((const char *)tlv + tlv->tlv_length); 2043 tmpl++; 2044 } 2045 2046 return (0); 2047 } 2048 2049 /* Debugging */ 2050 const char * 2051 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 2052 { 2053 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 2054 (int)mac[0], 2055 (int)mac[1], 2056 (int)mac[2], 2057 (int)mac[3], 2058 (int)mac[4], 2059 (int)mac[5]); 2060 2061 return (buf); 2062 } 2063 2064 const char * 2065 lacp_format_systemid(const struct lacp_systemid *sysid, 2066 char *buf, size_t buflen) 2067 { 2068 char macbuf[LACP_MACSTR_MAX+1]; 2069 2070 snprintf(buf, buflen, "%04X,%s", 2071 ntohs(sysid->lsi_prio), 2072 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 2073 2074 return (buf); 2075 } 2076 2077 const char * 2078 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 2079 { 2080 snprintf(buf, buflen, "%04X,%04X", 2081 ntohs(portid->lpi_prio), 2082 ntohs(portid->lpi_portno)); 2083 2084 return (buf); 2085 } 2086 2087 const char * 2088 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 2089 { 2090 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 2091 char portid[LACP_PORTIDSTR_MAX+1]; 2092 2093 snprintf(buf, buflen, "(%s,%04X,%s)", 2094 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 2095 ntohs(peer->lip_key), 2096 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2097 2098 return (buf); 2099 } 2100 2101 const char * 2102 lacp_format_lagid(const struct lacp_peerinfo *a, 2103 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2104 { 2105 char astr[LACP_PARTNERSTR_MAX+1]; 2106 char bstr[LACP_PARTNERSTR_MAX+1]; 2107 2108 #if 0 2109 /* 2110 * there's a convention to display small numbered peer 2111 * in the left. 2112 */ 2113 2114 if (lacp_compare_peerinfo(a, b) > 0) { 2115 const struct lacp_peerinfo *t; 2116 2117 t = a; 2118 a = b; 2119 b = t; 2120 } 2121 #endif 2122 2123 snprintf(buf, buflen, "[%s,%s]", 2124 lacp_format_partner(a, astr, sizeof(astr)), 2125 lacp_format_partner(b, bstr, sizeof(bstr))); 2126 2127 return (buf); 2128 } 2129 2130 const char * 2131 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2132 char *buf, size_t buflen) 2133 { 2134 if (la == NULL) { 2135 return ("(none)"); 2136 } 2137 2138 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2139 } 2140 2141 const char * 2142 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2143 { 2144 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2145 return (buf); 2146 } 2147 2148 static void 2149 lacp_dump_lacpdu(const struct lacpdu *du) 2150 { 2151 char buf[LACP_PARTNERSTR_MAX+1]; 2152 char buf2[LACP_STATESTR_MAX+1]; 2153 2154 printf("actor=%s\n", 2155 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2156 printf("actor.state=%s\n", 2157 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2158 printf("partner=%s\n", 2159 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2160 printf("partner.state=%s\n", 2161 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2162 2163 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2164 } 2165 2166 static void 2167 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2168 { 2169 va_list va; 2170 2171 if (lp) { 2172 printf("%s: ", lp->lp_ifp->if_xname); 2173 } 2174 2175 va_start(va, fmt); 2176 vprintf(fmt, va); 2177 va_end(va); 2178 } 2179