1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c)2005 YAMAMOTO Takashi, 7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_kern_tls.h" 36 #include "opt_ratelimit.h" 37 38 #include <sys/param.h> 39 #include <sys/callout.h> 40 #include <sys/eventhandler.h> 41 #include <sys/mbuf.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> /* hz */ 45 #include <sys/socket.h> /* for net/if.h */ 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <machine/stdarg.h> 49 #include <sys/lock.h> 50 #include <sys/rwlock.h> 51 #include <sys/taskqueue.h> 52 53 #include <net/if.h> 54 #include <net/if_var.h> 55 #include <net/if_dl.h> 56 #include <net/ethernet.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 60 #include <net/if_lagg.h> 61 #include <net/ieee8023ad_lacp.h> 62 63 /* 64 * actor system priority and port priority. 65 * XXX should be configurable. 66 */ 67 68 #define LACP_SYSTEM_PRIO 0x8000 69 #define LACP_PORT_PRIO 0x8000 70 71 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 72 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 73 74 static const struct tlv_template lacp_info_tlv_template[] = { 75 { LACP_TYPE_ACTORINFO, 76 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 77 { LACP_TYPE_PARTNERINFO, 78 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 79 { LACP_TYPE_COLLECTORINFO, 80 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 81 { 0, 0 }, 82 }; 83 84 static const struct tlv_template marker_info_tlv_template[] = { 85 { MARKER_TYPE_INFO, 86 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 87 { 0, 0 }, 88 }; 89 90 static const struct tlv_template marker_response_tlv_template[] = { 91 { MARKER_TYPE_RESPONSE, 92 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 93 { 0, 0 }, 94 }; 95 96 typedef void (*lacp_timer_func_t)(struct lacp_port *); 97 98 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 99 static void lacp_fill_markerinfo(struct lacp_port *, 100 struct lacp_markerinfo *); 101 102 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 103 static void lacp_suppress_distributing(struct lacp_softc *, 104 struct lacp_aggregator *); 105 static void lacp_transit_expire(void *); 106 static void lacp_update_portmap(struct lacp_softc *); 107 static void lacp_select_active_aggregator(struct lacp_softc *); 108 static uint16_t lacp_compose_key(struct lacp_port *); 109 static int tlv_check(const void *, size_t, const struct tlvhdr *, 110 const struct tlv_template *, boolean_t); 111 static void lacp_tick(void *); 112 113 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 114 const struct lacp_port *); 115 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 116 const struct lacp_peerinfo *); 117 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 118 const struct lacp_port *); 119 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 120 const struct lacp_peerinfo *); 121 122 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 123 struct lacp_port *); 124 static void lacp_aggregator_addref(struct lacp_softc *, 125 struct lacp_aggregator *); 126 static void lacp_aggregator_delref(struct lacp_softc *, 127 struct lacp_aggregator *); 128 129 /* receive machine */ 130 131 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 132 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 133 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 134 static void lacp_sm_rx_timer(struct lacp_port *); 135 static void lacp_sm_rx_set_expired(struct lacp_port *); 136 static void lacp_sm_rx_update_ntt(struct lacp_port *, 137 const struct lacpdu *); 138 static void lacp_sm_rx_record_pdu(struct lacp_port *, 139 const struct lacpdu *); 140 static void lacp_sm_rx_update_selected(struct lacp_port *, 141 const struct lacpdu *); 142 static void lacp_sm_rx_record_default(struct lacp_port *); 143 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 144 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 145 const struct lacp_peerinfo *); 146 147 /* mux machine */ 148 149 static void lacp_sm_mux(struct lacp_port *); 150 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 151 static void lacp_sm_mux_timer(struct lacp_port *); 152 153 /* periodic transmit machine */ 154 155 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 156 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 157 static void lacp_sm_ptx_timer(struct lacp_port *); 158 159 /* transmit machine */ 160 161 static void lacp_sm_tx(struct lacp_port *); 162 static void lacp_sm_assert_ntt(struct lacp_port *); 163 164 static void lacp_run_timers(struct lacp_port *); 165 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 166 const struct lacp_peerinfo *); 167 static int lacp_compare_systemid(const struct lacp_systemid *, 168 const struct lacp_systemid *); 169 static void lacp_port_enable(struct lacp_port *); 170 static void lacp_port_disable(struct lacp_port *); 171 static void lacp_select(struct lacp_port *); 172 static void lacp_unselect(struct lacp_port *); 173 static void lacp_disable_collecting(struct lacp_port *); 174 static void lacp_enable_collecting(struct lacp_port *); 175 static void lacp_disable_distributing(struct lacp_port *); 176 static void lacp_enable_distributing(struct lacp_port *); 177 static int lacp_xmit_lacpdu(struct lacp_port *); 178 static int lacp_xmit_marker(struct lacp_port *); 179 180 /* Debugging */ 181 182 static void lacp_dump_lacpdu(const struct lacpdu *); 183 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 184 size_t); 185 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 186 const struct lacp_peerinfo *, char *, size_t); 187 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 188 char *, size_t); 189 static const char *lacp_format_state(uint8_t, char *, size_t); 190 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 191 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 192 size_t); 193 static const char *lacp_format_portid(const struct lacp_portid *, char *, 194 size_t); 195 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 196 __attribute__((__format__(__printf__, 2, 3))); 197 198 VNET_DEFINE_STATIC(int, lacp_debug); 199 #define V_lacp_debug VNET(lacp_debug) 200 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); 201 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, 202 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); 203 204 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1; 205 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, 206 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0, 207 "LACP strict protocol compliance default"); 208 209 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } 210 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 211 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } 212 213 /* 214 * partner administration variables. 215 * XXX should be configurable. 216 */ 217 218 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 219 .lip_systemid = { .lsi_prio = 0xffff }, 220 .lip_portid = { .lpi_prio = 0xffff }, 221 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 222 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 223 }; 224 225 static const struct lacp_peerinfo lacp_partner_admin_strict = { 226 .lip_systemid = { .lsi_prio = 0xffff }, 227 .lip_portid = { .lpi_prio = 0xffff }, 228 .lip_state = 0, 229 }; 230 231 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 232 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 233 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 234 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 235 }; 236 237 struct mbuf * 238 lacp_input(struct lagg_port *lgp, struct mbuf *m) 239 { 240 struct lacp_port *lp = LACP_PORT(lgp); 241 uint8_t subtype; 242 243 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 244 m_freem(m); 245 return (NULL); 246 } 247 248 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 249 switch (subtype) { 250 case SLOWPROTOCOLS_SUBTYPE_LACP: 251 lacp_pdu_input(lp, m); 252 return (NULL); 253 254 case SLOWPROTOCOLS_SUBTYPE_MARKER: 255 lacp_marker_input(lp, m); 256 return (NULL); 257 } 258 259 /* Not a subtype we are interested in */ 260 return (m); 261 } 262 263 /* 264 * lacp_pdu_input: process lacpdu 265 */ 266 static int 267 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 268 { 269 struct lacp_softc *lsc = lp->lp_lsc; 270 struct lacpdu *du; 271 int error = 0; 272 273 if (m->m_pkthdr.len != sizeof(*du)) { 274 goto bad; 275 } 276 277 if ((m->m_flags & M_MCAST) == 0) { 278 goto bad; 279 } 280 281 if (m->m_len < sizeof(*du)) { 282 m = m_pullup(m, sizeof(*du)); 283 if (m == NULL) { 284 return (ENOMEM); 285 } 286 } 287 288 du = mtod(m, struct lacpdu *); 289 290 if (memcmp(&du->ldu_eh.ether_dhost, 291 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 292 goto bad; 293 } 294 295 /* 296 * ignore the version for compatibility with 297 * the future protocol revisions. 298 */ 299 #if 0 300 if (du->ldu_sph.sph_version != 1) { 301 goto bad; 302 } 303 #endif 304 305 /* 306 * ignore tlv types for compatibility with 307 * the future protocol revisions. 308 */ 309 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 310 lacp_info_tlv_template, FALSE)) { 311 goto bad; 312 } 313 314 if (V_lacp_debug > 0) { 315 lacp_dprintf(lp, "lacpdu receive\n"); 316 lacp_dump_lacpdu(du); 317 } 318 319 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 320 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 321 goto bad; 322 } 323 324 LACP_LOCK(lsc); 325 lacp_sm_rx(lp, du); 326 LACP_UNLOCK(lsc); 327 328 m_freem(m); 329 return (error); 330 331 bad: 332 m_freem(m); 333 return (EINVAL); 334 } 335 336 static void 337 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 338 { 339 struct lagg_port *lgp = lp->lp_lagg; 340 struct lagg_softc *sc = lgp->lp_softc; 341 342 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 343 memcpy(&info->lip_systemid.lsi_mac, 344 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 345 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 346 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 347 info->lip_state = lp->lp_state; 348 } 349 350 static void 351 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 352 { 353 struct ifnet *ifp = lp->lp_ifp; 354 355 /* Fill in the port index and system id (encoded as the MAC) */ 356 info->mi_rq_port = htons(ifp->if_index); 357 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 358 info->mi_rq_xid = htonl(0); 359 } 360 361 static int 362 lacp_xmit_lacpdu(struct lacp_port *lp) 363 { 364 struct lagg_port *lgp = lp->lp_lagg; 365 struct mbuf *m; 366 struct lacpdu *du; 367 int error; 368 369 LACP_LOCK_ASSERT(lp->lp_lsc); 370 371 m = m_gethdr(M_NOWAIT, MT_DATA); 372 if (m == NULL) { 373 return (ENOMEM); 374 } 375 m->m_len = m->m_pkthdr.len = sizeof(*du); 376 377 du = mtod(m, struct lacpdu *); 378 memset(du, 0, sizeof(*du)); 379 380 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 381 ETHER_ADDR_LEN); 382 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 383 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 384 385 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 386 du->ldu_sph.sph_version = 1; 387 388 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 389 du->ldu_actor = lp->lp_actor; 390 391 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 392 sizeof(du->ldu_partner)); 393 du->ldu_partner = lp->lp_partner; 394 395 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 396 sizeof(du->ldu_collector)); 397 du->ldu_collector.lci_maxdelay = 0; 398 399 if (V_lacp_debug > 0) { 400 lacp_dprintf(lp, "lacpdu transmit\n"); 401 lacp_dump_lacpdu(du); 402 } 403 404 m->m_flags |= M_MCAST; 405 406 /* 407 * XXX should use higher priority queue. 408 * otherwise network congestion can break aggregation. 409 */ 410 411 error = lagg_enqueue(lp->lp_ifp, m); 412 return (error); 413 } 414 415 static int 416 lacp_xmit_marker(struct lacp_port *lp) 417 { 418 struct lagg_port *lgp = lp->lp_lagg; 419 struct mbuf *m; 420 struct markerdu *mdu; 421 int error; 422 423 LACP_LOCK_ASSERT(lp->lp_lsc); 424 425 m = m_gethdr(M_NOWAIT, MT_DATA); 426 if (m == NULL) { 427 return (ENOMEM); 428 } 429 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 430 431 mdu = mtod(m, struct markerdu *); 432 memset(mdu, 0, sizeof(*mdu)); 433 434 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 435 ETHER_ADDR_LEN); 436 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 437 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 438 439 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 440 mdu->mdu_sph.sph_version = 1; 441 442 /* Bump the transaction id and copy over the marker info */ 443 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 444 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 445 mdu->mdu_info = lp->lp_marker; 446 447 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 448 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 449 ntohl(mdu->mdu_info.mi_rq_xid))); 450 451 m->m_flags |= M_MCAST; 452 error = lagg_enqueue(lp->lp_ifp, m); 453 return (error); 454 } 455 456 void 457 lacp_linkstate(struct lagg_port *lgp) 458 { 459 struct lacp_port *lp = LACP_PORT(lgp); 460 struct lacp_softc *lsc = lp->lp_lsc; 461 struct ifnet *ifp = lgp->lp_ifp; 462 struct ifmediareq ifmr; 463 int error = 0; 464 u_int media; 465 uint8_t old_state; 466 uint16_t old_key; 467 468 bzero((char *)&ifmr, sizeof(ifmr)); 469 error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr); 470 if (error != 0) { 471 bzero((char *)&ifmr, sizeof(ifmr)); 472 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 473 } 474 if (error != 0) 475 return; 476 477 LACP_LOCK(lsc); 478 media = ifmr.ifm_active; 479 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 480 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 481 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 482 old_state = lp->lp_state; 483 old_key = lp->lp_key; 484 485 lp->lp_media = media; 486 /* 487 * If the port is not an active full duplex Ethernet link then it can 488 * not be aggregated. 489 */ 490 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 491 ifp->if_link_state != LINK_STATE_UP) { 492 lacp_port_disable(lp); 493 } else { 494 lacp_port_enable(lp); 495 } 496 lp->lp_key = lacp_compose_key(lp); 497 498 if (old_state != lp->lp_state || old_key != lp->lp_key) { 499 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 500 lp->lp_selected = LACP_UNSELECTED; 501 } 502 LACP_UNLOCK(lsc); 503 } 504 505 static void 506 lacp_tick(void *arg) 507 { 508 struct lacp_softc *lsc = arg; 509 struct lacp_port *lp; 510 511 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 512 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 513 continue; 514 515 CURVNET_SET(lp->lp_ifp->if_vnet); 516 lacp_run_timers(lp); 517 518 lacp_select(lp); 519 lacp_sm_mux(lp); 520 lacp_sm_tx(lp); 521 lacp_sm_ptx_tx_schedule(lp); 522 CURVNET_RESTORE(); 523 } 524 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 525 } 526 527 int 528 lacp_port_create(struct lagg_port *lgp) 529 { 530 struct lagg_softc *sc = lgp->lp_softc; 531 struct lacp_softc *lsc = LACP_SOFTC(sc); 532 struct lacp_port *lp; 533 struct ifnet *ifp = lgp->lp_ifp; 534 struct sockaddr_dl sdl; 535 struct ifmultiaddr *rifma = NULL; 536 int error; 537 538 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 539 sdl.sdl_alen = ETHER_ADDR_LEN; 540 541 bcopy(ðermulticastaddr_slowprotocols, 542 LLADDR(&sdl), ETHER_ADDR_LEN); 543 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 544 if (error) { 545 printf("%s: ADDMULTI failed on %s\n", __func__, 546 lgp->lp_ifp->if_xname); 547 return (error); 548 } 549 550 lp = malloc(sizeof(struct lacp_port), 551 M_DEVBUF, M_NOWAIT|M_ZERO); 552 if (lp == NULL) 553 return (ENOMEM); 554 555 LACP_LOCK(lsc); 556 lgp->lp_psc = lp; 557 lp->lp_ifp = ifp; 558 lp->lp_lagg = lgp; 559 lp->lp_lsc = lsc; 560 lp->lp_ifma = rifma; 561 562 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 563 564 lacp_fill_actorinfo(lp, &lp->lp_actor); 565 lacp_fill_markerinfo(lp, &lp->lp_marker); 566 lp->lp_state = LACP_STATE_ACTIVITY; 567 lp->lp_aggregator = NULL; 568 lacp_sm_rx_set_expired(lp); 569 LACP_UNLOCK(lsc); 570 lacp_linkstate(lgp); 571 572 return (0); 573 } 574 575 void 576 lacp_port_destroy(struct lagg_port *lgp) 577 { 578 struct lacp_port *lp = LACP_PORT(lgp); 579 struct lacp_softc *lsc = lp->lp_lsc; 580 int i; 581 582 LACP_LOCK(lsc); 583 for (i = 0; i < LACP_NTIMER; i++) { 584 LACP_TIMER_DISARM(lp, i); 585 } 586 587 lacp_disable_collecting(lp); 588 lacp_disable_distributing(lp); 589 lacp_unselect(lp); 590 591 LIST_REMOVE(lp, lp_next); 592 LACP_UNLOCK(lsc); 593 594 /* The address may have already been removed by if_purgemaddrs() */ 595 if (!lgp->lp_detaching) 596 if_delmulti_ifma(lp->lp_ifma); 597 598 free(lp, M_DEVBUF); 599 } 600 601 void 602 lacp_req(struct lagg_softc *sc, void *data) 603 { 604 struct lacp_opreq *req = (struct lacp_opreq *)data; 605 struct lacp_softc *lsc = LACP_SOFTC(sc); 606 struct lacp_aggregator *la; 607 608 bzero(req, sizeof(struct lacp_opreq)); 609 610 /* 611 * If the LACP softc is NULL, return with the opreq structure full of 612 * zeros. It is normal for the softc to be NULL while the lagg is 613 * being destroyed. 614 */ 615 if (NULL == lsc) 616 return; 617 618 la = lsc->lsc_active_aggregator; 619 LACP_LOCK(lsc); 620 if (la != NULL) { 621 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 622 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 623 ETHER_ADDR_LEN); 624 req->actor_key = ntohs(la->la_actor.lip_key); 625 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 626 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 627 req->actor_state = la->la_actor.lip_state; 628 629 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 630 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 631 ETHER_ADDR_LEN); 632 req->partner_key = ntohs(la->la_partner.lip_key); 633 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 634 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 635 req->partner_state = la->la_partner.lip_state; 636 } 637 LACP_UNLOCK(lsc); 638 } 639 640 void 641 lacp_portreq(struct lagg_port *lgp, void *data) 642 { 643 struct lacp_opreq *req = (struct lacp_opreq *)data; 644 struct lacp_port *lp = LACP_PORT(lgp); 645 struct lacp_softc *lsc = lp->lp_lsc; 646 647 LACP_LOCK(lsc); 648 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 649 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 650 ETHER_ADDR_LEN); 651 req->actor_key = ntohs(lp->lp_actor.lip_key); 652 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 653 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 654 req->actor_state = lp->lp_actor.lip_state; 655 656 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 657 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 658 ETHER_ADDR_LEN); 659 req->partner_key = ntohs(lp->lp_partner.lip_key); 660 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 661 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 662 req->partner_state = lp->lp_partner.lip_state; 663 LACP_UNLOCK(lsc); 664 } 665 666 static void 667 lacp_disable_collecting(struct lacp_port *lp) 668 { 669 LACP_DPRINTF((lp, "collecting disabled\n")); 670 lp->lp_state &= ~LACP_STATE_COLLECTING; 671 } 672 673 static void 674 lacp_enable_collecting(struct lacp_port *lp) 675 { 676 LACP_DPRINTF((lp, "collecting enabled\n")); 677 lp->lp_state |= LACP_STATE_COLLECTING; 678 } 679 680 static void 681 lacp_disable_distributing(struct lacp_port *lp) 682 { 683 struct lacp_aggregator *la = lp->lp_aggregator; 684 struct lacp_softc *lsc = lp->lp_lsc; 685 struct lagg_softc *sc = lsc->lsc_softc; 686 char buf[LACP_LAGIDSTR_MAX+1]; 687 688 LACP_LOCK_ASSERT(lsc); 689 690 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 691 return; 692 } 693 694 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 695 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 696 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 697 698 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 699 "nports %d -> %d\n", 700 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 701 la->la_nports, la->la_nports - 1)); 702 703 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 704 la->la_nports--; 705 sc->sc_active = la->la_nports; 706 707 if (lsc->lsc_active_aggregator == la) { 708 lacp_suppress_distributing(lsc, la); 709 lacp_select_active_aggregator(lsc); 710 /* regenerate the port map, the active aggregator has changed */ 711 lacp_update_portmap(lsc); 712 } 713 714 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 715 if_link_state_change(sc->sc_ifp, 716 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 717 } 718 719 static void 720 lacp_enable_distributing(struct lacp_port *lp) 721 { 722 struct lacp_aggregator *la = lp->lp_aggregator; 723 struct lacp_softc *lsc = lp->lp_lsc; 724 struct lagg_softc *sc = lsc->lsc_softc; 725 char buf[LACP_LAGIDSTR_MAX+1]; 726 727 LACP_LOCK_ASSERT(lsc); 728 729 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 730 return; 731 } 732 733 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 734 "nports %d -> %d\n", 735 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 736 la->la_nports, la->la_nports + 1)); 737 738 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 739 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 740 la->la_nports++; 741 sc->sc_active = la->la_nports; 742 743 lp->lp_state |= LACP_STATE_DISTRIBUTING; 744 745 if (lsc->lsc_active_aggregator == la) { 746 lacp_suppress_distributing(lsc, la); 747 lacp_update_portmap(lsc); 748 } else 749 /* try to become the active aggregator */ 750 lacp_select_active_aggregator(lsc); 751 752 if_link_state_change(sc->sc_ifp, 753 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 754 } 755 756 static void 757 lacp_transit_expire(void *vp) 758 { 759 struct lacp_softc *lsc = vp; 760 761 LACP_LOCK_ASSERT(lsc); 762 763 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); 764 LACP_TRACE(NULL); 765 CURVNET_RESTORE(); 766 767 lsc->lsc_suppress_distributing = FALSE; 768 } 769 770 void 771 lacp_attach(struct lagg_softc *sc) 772 { 773 struct lacp_softc *lsc; 774 775 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); 776 777 sc->sc_psc = lsc; 778 lsc->lsc_softc = sc; 779 780 lsc->lsc_hashkey = m_ether_tcpip_hash_init(); 781 lsc->lsc_active_aggregator = NULL; 782 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); 783 LACP_LOCK_INIT(lsc); 784 TAILQ_INIT(&lsc->lsc_aggregators); 785 LIST_INIT(&lsc->lsc_ports); 786 787 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 788 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 789 790 /* if the lagg is already up then do the same */ 791 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 792 lacp_init(sc); 793 } 794 795 void 796 lacp_detach(void *psc) 797 { 798 struct lacp_softc *lsc = (struct lacp_softc *)psc; 799 800 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 801 ("aggregators still active")); 802 KASSERT(lsc->lsc_active_aggregator == NULL, 803 ("aggregator still attached")); 804 805 callout_drain(&lsc->lsc_transit_callout); 806 callout_drain(&lsc->lsc_callout); 807 808 LACP_LOCK_DESTROY(lsc); 809 free(lsc, M_DEVBUF); 810 } 811 812 void 813 lacp_init(struct lagg_softc *sc) 814 { 815 struct lacp_softc *lsc = LACP_SOFTC(sc); 816 817 LACP_LOCK(lsc); 818 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 819 LACP_UNLOCK(lsc); 820 } 821 822 void 823 lacp_stop(struct lagg_softc *sc) 824 { 825 struct lacp_softc *lsc = LACP_SOFTC(sc); 826 827 LACP_LOCK(lsc); 828 callout_stop(&lsc->lsc_transit_callout); 829 callout_stop(&lsc->lsc_callout); 830 LACP_UNLOCK(lsc); 831 } 832 833 struct lagg_port * 834 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) 835 { 836 struct lacp_softc *lsc = LACP_SOFTC(sc); 837 struct lacp_portmap *pm; 838 struct lacp_port *lp; 839 struct lacp_port **map; 840 uint32_t hash; 841 int count; 842 843 if (__predict_false(lsc->lsc_suppress_distributing)) { 844 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 845 return (NULL); 846 } 847 848 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 849 if (pm->pm_count == 0) { 850 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 851 return (NULL); 852 } 853 854 #ifdef NUMA 855 if ((sc->sc_opts & LAGG_OPT_USE_NUMA) && 856 pm->pm_num_dom > 1 && m->m_pkthdr.numa_domain < MAXMEMDOM) { 857 count = pm->pm_numa[m->m_pkthdr.numa_domain].count; 858 if (count > 0) { 859 map = pm->pm_numa[m->m_pkthdr.numa_domain].map; 860 } else { 861 /* No ports on this domain; use global hash. */ 862 map = pm->pm_map; 863 count = pm->pm_count; 864 } 865 } else 866 #endif 867 { 868 map = pm->pm_map; 869 count = pm->pm_count; 870 } 871 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 872 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 873 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 874 else 875 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); 876 877 hash %= count; 878 lp = map[hash]; 879 880 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, 881 ("aggregated port is not distributing")); 882 883 return (lp->lp_lagg); 884 } 885 886 #if defined(RATELIMIT) || defined(KERN_TLS) 887 struct lagg_port * 888 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid) 889 { 890 struct lacp_softc *lsc = LACP_SOFTC(sc); 891 struct lacp_portmap *pm; 892 struct lacp_port *lp; 893 uint32_t hash; 894 895 if (__predict_false(lsc->lsc_suppress_distributing)) { 896 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 897 return (NULL); 898 } 899 900 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 901 if (pm->pm_count == 0) { 902 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 903 return (NULL); 904 } 905 906 hash = flowid >> sc->flowid_shift; 907 hash %= pm->pm_count; 908 lp = pm->pm_map[hash]; 909 910 return (lp->lp_lagg); 911 } 912 #endif 913 914 /* 915 * lacp_suppress_distributing: drop transmit packets for a while 916 * to preserve packet ordering. 917 */ 918 919 static void 920 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 921 { 922 struct lacp_port *lp; 923 924 if (lsc->lsc_active_aggregator != la) { 925 return; 926 } 927 928 LACP_TRACE(NULL); 929 930 lsc->lsc_suppress_distributing = TRUE; 931 932 /* send a marker frame down each port to verify the queues are empty */ 933 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 934 lp->lp_flags |= LACP_PORT_MARK; 935 lacp_xmit_marker(lp); 936 } 937 938 /* set a timeout for the marker frames */ 939 callout_reset(&lsc->lsc_transit_callout, 940 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 941 } 942 943 static int 944 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 945 const struct lacp_peerinfo *b) 946 { 947 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 948 } 949 950 static int 951 lacp_compare_systemid(const struct lacp_systemid *a, 952 const struct lacp_systemid *b) 953 { 954 return (memcmp(a, b, sizeof(*a))); 955 } 956 957 #if 0 /* unused */ 958 static int 959 lacp_compare_portid(const struct lacp_portid *a, 960 const struct lacp_portid *b) 961 { 962 return (memcmp(a, b, sizeof(*a))); 963 } 964 #endif 965 966 static uint64_t 967 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 968 { 969 struct lacp_port *lp; 970 uint64_t speed; 971 972 lp = TAILQ_FIRST(&la->la_ports); 973 if (lp == NULL) { 974 return (0); 975 } 976 977 speed = ifmedia_baudrate(lp->lp_media); 978 speed *= la->la_nports; 979 if (speed == 0) { 980 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 981 lp->lp_media, la->la_nports)); 982 } 983 984 return (speed); 985 } 986 987 /* 988 * lacp_select_active_aggregator: select an aggregator to be used to transmit 989 * packets from lagg(4) interface. 990 */ 991 992 static void 993 lacp_select_active_aggregator(struct lacp_softc *lsc) 994 { 995 struct lacp_aggregator *la; 996 struct lacp_aggregator *best_la = NULL; 997 uint64_t best_speed = 0; 998 char buf[LACP_LAGIDSTR_MAX+1]; 999 1000 LACP_TRACE(NULL); 1001 1002 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1003 uint64_t speed; 1004 1005 if (la->la_nports == 0) { 1006 continue; 1007 } 1008 1009 speed = lacp_aggregator_bandwidth(la); 1010 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 1011 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 1012 speed, la->la_nports)); 1013 1014 /* 1015 * This aggregator is chosen if the partner has a better 1016 * system priority or, the total aggregated speed is higher 1017 * or, it is already the chosen aggregator 1018 */ 1019 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 1020 LACP_SYS_PRI(best_la->la_partner)) || 1021 speed > best_speed || 1022 (speed == best_speed && 1023 la == lsc->lsc_active_aggregator)) { 1024 best_la = la; 1025 best_speed = speed; 1026 } 1027 } 1028 1029 KASSERT(best_la == NULL || best_la->la_nports > 0, 1030 ("invalid aggregator refcnt")); 1031 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1032 ("invalid aggregator list")); 1033 1034 if (lsc->lsc_active_aggregator != best_la) { 1035 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1036 LACP_DPRINTF((NULL, "old %s\n", 1037 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1038 buf, sizeof(buf)))); 1039 } else { 1040 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1041 } 1042 LACP_DPRINTF((NULL, "new %s\n", 1043 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1044 1045 if (lsc->lsc_active_aggregator != best_la) { 1046 lsc->lsc_active_aggregator = best_la; 1047 lacp_update_portmap(lsc); 1048 if (best_la) { 1049 lacp_suppress_distributing(lsc, best_la); 1050 } 1051 } 1052 } 1053 1054 /* 1055 * Updated the inactive portmap array with the new list of ports and 1056 * make it live. 1057 */ 1058 static void 1059 lacp_update_portmap(struct lacp_softc *lsc) 1060 { 1061 struct lagg_softc *sc = lsc->lsc_softc; 1062 struct lacp_aggregator *la; 1063 struct lacp_portmap *p; 1064 struct lacp_port *lp; 1065 uint64_t speed; 1066 u_int newmap; 1067 int i; 1068 #ifdef NUMA 1069 int count; 1070 uint8_t domain; 1071 #endif 1072 1073 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1074 p = &lsc->lsc_pmap[newmap]; 1075 la = lsc->lsc_active_aggregator; 1076 speed = 0; 1077 bzero(p, sizeof(struct lacp_portmap)); 1078 1079 if (la != NULL && la->la_nports > 0) { 1080 p->pm_count = la->la_nports; 1081 i = 0; 1082 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) { 1083 p->pm_map[i++] = lp; 1084 #ifdef NUMA 1085 domain = lp->lp_ifp->if_numa_domain; 1086 if (domain >= MAXMEMDOM) 1087 continue; 1088 count = p->pm_numa[domain].count; 1089 p->pm_numa[domain].map[count] = lp; 1090 p->pm_numa[domain].count++; 1091 #endif 1092 } 1093 KASSERT(i == p->pm_count, ("Invalid port count")); 1094 1095 #ifdef NUMA 1096 for (i = 0; i < MAXMEMDOM; i++) { 1097 if (p->pm_numa[i].count != 0) 1098 p->pm_num_dom++; 1099 } 1100 #endif 1101 speed = lacp_aggregator_bandwidth(la); 1102 } 1103 sc->sc_ifp->if_baudrate = speed; 1104 1105 /* switch the active portmap over */ 1106 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1107 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1108 lsc->lsc_activemap, 1109 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1110 } 1111 1112 static uint16_t 1113 lacp_compose_key(struct lacp_port *lp) 1114 { 1115 struct lagg_port *lgp = lp->lp_lagg; 1116 struct lagg_softc *sc = lgp->lp_softc; 1117 u_int media = lp->lp_media; 1118 uint16_t key; 1119 1120 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1121 1122 /* 1123 * non-aggregatable links should have unique keys. 1124 * 1125 * XXX this isn't really unique as if_index is 16 bit. 1126 */ 1127 1128 /* bit 0..14: (some bits of) if_index of this port */ 1129 key = lp->lp_ifp->if_index; 1130 /* bit 15: 1 */ 1131 key |= 0x8000; 1132 } else { 1133 u_int subtype = IFM_SUBTYPE(media); 1134 1135 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1136 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1137 1138 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1139 switch (subtype) { 1140 case IFM_10_T: 1141 case IFM_10_2: 1142 case IFM_10_5: 1143 case IFM_10_STP: 1144 case IFM_10_FL: 1145 key = IFM_10_T; 1146 break; 1147 case IFM_100_TX: 1148 case IFM_100_FX: 1149 case IFM_100_T4: 1150 case IFM_100_VG: 1151 case IFM_100_T2: 1152 case IFM_100_T: 1153 case IFM_100_SGMII: 1154 key = IFM_100_TX; 1155 break; 1156 case IFM_1000_SX: 1157 case IFM_1000_LX: 1158 case IFM_1000_CX: 1159 case IFM_1000_T: 1160 case IFM_1000_KX: 1161 case IFM_1000_SGMII: 1162 case IFM_1000_CX_SGMII: 1163 key = IFM_1000_SX; 1164 break; 1165 case IFM_10G_LR: 1166 case IFM_10G_SR: 1167 case IFM_10G_CX4: 1168 case IFM_10G_TWINAX: 1169 case IFM_10G_TWINAX_LONG: 1170 case IFM_10G_LRM: 1171 case IFM_10G_T: 1172 case IFM_10G_KX4: 1173 case IFM_10G_KR: 1174 case IFM_10G_CR1: 1175 case IFM_10G_ER: 1176 case IFM_10G_SFI: 1177 case IFM_10G_AOC: 1178 key = IFM_10G_LR; 1179 break; 1180 case IFM_20G_KR2: 1181 key = IFM_20G_KR2; 1182 break; 1183 case IFM_2500_KX: 1184 case IFM_2500_T: 1185 case IFM_2500_X: 1186 key = IFM_2500_KX; 1187 break; 1188 case IFM_5000_T: 1189 case IFM_5000_KR: 1190 case IFM_5000_KR_S: 1191 case IFM_5000_KR1: 1192 key = IFM_5000_T; 1193 break; 1194 case IFM_50G_PCIE: 1195 case IFM_50G_CR2: 1196 case IFM_50G_KR2: 1197 case IFM_50G_SR2: 1198 case IFM_50G_LR2: 1199 case IFM_50G_LAUI2_AC: 1200 case IFM_50G_LAUI2: 1201 case IFM_50G_AUI2_AC: 1202 case IFM_50G_AUI2: 1203 case IFM_50G_CP: 1204 case IFM_50G_SR: 1205 case IFM_50G_LR: 1206 case IFM_50G_FR: 1207 case IFM_50G_KR_PAM4: 1208 case IFM_50G_AUI1_AC: 1209 case IFM_50G_AUI1: 1210 key = IFM_50G_PCIE; 1211 break; 1212 case IFM_56G_R4: 1213 key = IFM_56G_R4; 1214 break; 1215 case IFM_25G_PCIE: 1216 case IFM_25G_CR: 1217 case IFM_25G_KR: 1218 case IFM_25G_SR: 1219 case IFM_25G_LR: 1220 case IFM_25G_ACC: 1221 case IFM_25G_AOC: 1222 case IFM_25G_T: 1223 case IFM_25G_CR_S: 1224 case IFM_25G_CR1: 1225 case IFM_25G_KR_S: 1226 case IFM_25G_AUI: 1227 case IFM_25G_KR1: 1228 key = IFM_25G_PCIE; 1229 break; 1230 case IFM_40G_CR4: 1231 case IFM_40G_SR4: 1232 case IFM_40G_LR4: 1233 case IFM_40G_XLPPI: 1234 case IFM_40G_KR4: 1235 case IFM_40G_XLAUI: 1236 case IFM_40G_XLAUI_AC: 1237 case IFM_40G_ER4: 1238 key = IFM_40G_CR4; 1239 break; 1240 case IFM_100G_CR4: 1241 case IFM_100G_SR4: 1242 case IFM_100G_KR4: 1243 case IFM_100G_LR4: 1244 case IFM_100G_CAUI4_AC: 1245 case IFM_100G_CAUI4: 1246 case IFM_100G_AUI4_AC: 1247 case IFM_100G_AUI4: 1248 case IFM_100G_CR_PAM4: 1249 case IFM_100G_KR_PAM4: 1250 case IFM_100G_CP2: 1251 case IFM_100G_SR2: 1252 case IFM_100G_DR: 1253 case IFM_100G_KR2_PAM4: 1254 case IFM_100G_CAUI2_AC: 1255 case IFM_100G_CAUI2: 1256 case IFM_100G_AUI2_AC: 1257 case IFM_100G_AUI2: 1258 key = IFM_100G_CR4; 1259 break; 1260 case IFM_200G_CR4_PAM4: 1261 case IFM_200G_SR4: 1262 case IFM_200G_FR4: 1263 case IFM_200G_LR4: 1264 case IFM_200G_DR4: 1265 case IFM_200G_KR4_PAM4: 1266 case IFM_200G_AUI4_AC: 1267 case IFM_200G_AUI4: 1268 case IFM_200G_AUI8_AC: 1269 case IFM_200G_AUI8: 1270 key = IFM_200G_CR4_PAM4; 1271 break; 1272 case IFM_400G_FR8: 1273 case IFM_400G_LR8: 1274 case IFM_400G_DR4: 1275 case IFM_400G_AUI8_AC: 1276 case IFM_400G_AUI8: 1277 key = IFM_400G_FR8; 1278 break; 1279 default: 1280 key = subtype; 1281 break; 1282 } 1283 /* bit 5..14: (some bits of) if_index of lagg device */ 1284 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1285 /* bit 15: 0 */ 1286 } 1287 return (htons(key)); 1288 } 1289 1290 static void 1291 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1292 { 1293 char buf[LACP_LAGIDSTR_MAX+1]; 1294 1295 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1296 __func__, 1297 lacp_format_lagid(&la->la_actor, &la->la_partner, 1298 buf, sizeof(buf)), 1299 la->la_refcnt, la->la_refcnt + 1)); 1300 1301 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1302 la->la_refcnt++; 1303 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1304 } 1305 1306 static void 1307 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1308 { 1309 char buf[LACP_LAGIDSTR_MAX+1]; 1310 1311 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1312 __func__, 1313 lacp_format_lagid(&la->la_actor, &la->la_partner, 1314 buf, sizeof(buf)), 1315 la->la_refcnt, la->la_refcnt - 1)); 1316 1317 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1318 la->la_refcnt--; 1319 if (la->la_refcnt > 0) { 1320 return; 1321 } 1322 1323 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1324 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1325 1326 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1327 1328 free(la, M_DEVBUF); 1329 } 1330 1331 /* 1332 * lacp_aggregator_get: allocate an aggregator. 1333 */ 1334 1335 static struct lacp_aggregator * 1336 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1337 { 1338 struct lacp_aggregator *la; 1339 1340 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1341 if (la) { 1342 la->la_refcnt = 1; 1343 la->la_nports = 0; 1344 TAILQ_INIT(&la->la_ports); 1345 la->la_pending = 0; 1346 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1347 } 1348 1349 return (la); 1350 } 1351 1352 /* 1353 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1354 */ 1355 1356 static void 1357 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1358 { 1359 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1360 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1361 1362 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1363 } 1364 1365 static void 1366 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1367 const struct lacp_peerinfo *lpi_port) 1368 { 1369 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1370 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1371 lpi_aggr->lip_key = lpi_port->lip_key; 1372 } 1373 1374 /* 1375 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1376 */ 1377 1378 static int 1379 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1380 const struct lacp_port *lp) 1381 { 1382 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1383 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1384 return (0); 1385 } 1386 1387 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1388 return (0); 1389 } 1390 1391 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1392 return (0); 1393 } 1394 1395 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1396 return (0); 1397 } 1398 1399 return (1); 1400 } 1401 1402 static int 1403 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1404 const struct lacp_peerinfo *b) 1405 { 1406 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1407 sizeof(a->lip_systemid))) { 1408 return (0); 1409 } 1410 1411 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1412 return (0); 1413 } 1414 1415 return (1); 1416 } 1417 1418 static void 1419 lacp_port_enable(struct lacp_port *lp) 1420 { 1421 lp->lp_state |= LACP_STATE_AGGREGATION; 1422 } 1423 1424 static void 1425 lacp_port_disable(struct lacp_port *lp) 1426 { 1427 lacp_set_mux(lp, LACP_MUX_DETACHED); 1428 1429 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1430 lp->lp_selected = LACP_UNSELECTED; 1431 lacp_sm_rx_record_default(lp); 1432 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1433 lp->lp_state &= ~LACP_STATE_EXPIRED; 1434 } 1435 1436 /* 1437 * lacp_select: select an aggregator. create one if necessary. 1438 */ 1439 static void 1440 lacp_select(struct lacp_port *lp) 1441 { 1442 struct lacp_softc *lsc = lp->lp_lsc; 1443 struct lacp_aggregator *la; 1444 char buf[LACP_LAGIDSTR_MAX+1]; 1445 1446 if (lp->lp_aggregator) { 1447 return; 1448 } 1449 1450 /* If we haven't heard from our peer, skip this step. */ 1451 if (lp->lp_state & LACP_STATE_DEFAULTED) 1452 return; 1453 1454 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1455 ("timer_wait_while still active")); 1456 1457 LACP_DPRINTF((lp, "port lagid=%s\n", 1458 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1459 buf, sizeof(buf)))); 1460 1461 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1462 if (lacp_aggregator_is_compatible(la, lp)) { 1463 break; 1464 } 1465 } 1466 1467 if (la == NULL) { 1468 la = lacp_aggregator_get(lsc, lp); 1469 if (la == NULL) { 1470 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1471 1472 /* 1473 * will retry on the next tick. 1474 */ 1475 1476 return; 1477 } 1478 lacp_fill_aggregator_id(la, lp); 1479 LACP_DPRINTF((lp, "aggregator created\n")); 1480 } else { 1481 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1482 if (la->la_refcnt == LACP_MAX_PORTS) 1483 return; 1484 lacp_aggregator_addref(lsc, la); 1485 } 1486 1487 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1488 lacp_format_lagid(&la->la_actor, &la->la_partner, 1489 buf, sizeof(buf)))); 1490 1491 lp->lp_aggregator = la; 1492 lp->lp_selected = LACP_SELECTED; 1493 } 1494 1495 /* 1496 * lacp_unselect: finish unselect/detach process. 1497 */ 1498 1499 static void 1500 lacp_unselect(struct lacp_port *lp) 1501 { 1502 struct lacp_softc *lsc = lp->lp_lsc; 1503 struct lacp_aggregator *la = lp->lp_aggregator; 1504 1505 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1506 ("timer_wait_while still active")); 1507 1508 if (la == NULL) { 1509 return; 1510 } 1511 1512 lp->lp_aggregator = NULL; 1513 lacp_aggregator_delref(lsc, la); 1514 } 1515 1516 /* mux machine */ 1517 1518 static void 1519 lacp_sm_mux(struct lacp_port *lp) 1520 { 1521 struct lagg_port *lgp = lp->lp_lagg; 1522 struct lagg_softc *sc = lgp->lp_softc; 1523 enum lacp_mux_state new_state; 1524 boolean_t p_sync = 1525 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1526 boolean_t p_collecting = 1527 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1528 enum lacp_selected selected = lp->lp_selected; 1529 struct lacp_aggregator *la; 1530 1531 if (V_lacp_debug > 1) 1532 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1533 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1534 lp->lp_mux_state, selected, p_sync, p_collecting); 1535 1536 re_eval: 1537 la = lp->lp_aggregator; 1538 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1539 ("MUX not detached")); 1540 new_state = lp->lp_mux_state; 1541 switch (lp->lp_mux_state) { 1542 case LACP_MUX_DETACHED: 1543 if (selected != LACP_UNSELECTED) { 1544 new_state = LACP_MUX_WAITING; 1545 } 1546 break; 1547 case LACP_MUX_WAITING: 1548 KASSERT(la->la_pending > 0 || 1549 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1550 ("timer_wait_while still active")); 1551 if (selected == LACP_SELECTED && la->la_pending == 0) { 1552 new_state = LACP_MUX_ATTACHED; 1553 } else if (selected == LACP_UNSELECTED) { 1554 new_state = LACP_MUX_DETACHED; 1555 } 1556 break; 1557 case LACP_MUX_ATTACHED: 1558 if (selected == LACP_SELECTED && p_sync) { 1559 new_state = LACP_MUX_COLLECTING; 1560 } else if (selected != LACP_SELECTED) { 1561 new_state = LACP_MUX_DETACHED; 1562 } 1563 break; 1564 case LACP_MUX_COLLECTING: 1565 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1566 new_state = LACP_MUX_DISTRIBUTING; 1567 } else if (selected != LACP_SELECTED || !p_sync) { 1568 new_state = LACP_MUX_ATTACHED; 1569 } 1570 break; 1571 case LACP_MUX_DISTRIBUTING: 1572 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1573 new_state = LACP_MUX_COLLECTING; 1574 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1575 sc->sc_flapping++; 1576 } 1577 break; 1578 default: 1579 panic("%s: unknown state", __func__); 1580 } 1581 1582 if (lp->lp_mux_state == new_state) { 1583 return; 1584 } 1585 1586 lacp_set_mux(lp, new_state); 1587 goto re_eval; 1588 } 1589 1590 static void 1591 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1592 { 1593 struct lacp_aggregator *la = lp->lp_aggregator; 1594 1595 if (lp->lp_mux_state == new_state) { 1596 return; 1597 } 1598 1599 switch (new_state) { 1600 case LACP_MUX_DETACHED: 1601 lp->lp_state &= ~LACP_STATE_SYNC; 1602 lacp_disable_distributing(lp); 1603 lacp_disable_collecting(lp); 1604 lacp_sm_assert_ntt(lp); 1605 /* cancel timer */ 1606 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1607 KASSERT(la->la_pending > 0, 1608 ("timer_wait_while not active")); 1609 la->la_pending--; 1610 } 1611 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1612 lacp_unselect(lp); 1613 break; 1614 case LACP_MUX_WAITING: 1615 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1616 LACP_AGGREGATE_WAIT_TIME); 1617 la->la_pending++; 1618 break; 1619 case LACP_MUX_ATTACHED: 1620 lp->lp_state |= LACP_STATE_SYNC; 1621 lacp_disable_collecting(lp); 1622 lacp_sm_assert_ntt(lp); 1623 break; 1624 case LACP_MUX_COLLECTING: 1625 lacp_enable_collecting(lp); 1626 lacp_disable_distributing(lp); 1627 lacp_sm_assert_ntt(lp); 1628 break; 1629 case LACP_MUX_DISTRIBUTING: 1630 lacp_enable_distributing(lp); 1631 break; 1632 default: 1633 panic("%s: unknown state", __func__); 1634 } 1635 1636 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1637 1638 lp->lp_mux_state = new_state; 1639 } 1640 1641 static void 1642 lacp_sm_mux_timer(struct lacp_port *lp) 1643 { 1644 struct lacp_aggregator *la = lp->lp_aggregator; 1645 char buf[LACP_LAGIDSTR_MAX+1]; 1646 1647 KASSERT(la->la_pending > 0, ("no pending event")); 1648 1649 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1650 lacp_format_lagid(&la->la_actor, &la->la_partner, 1651 buf, sizeof(buf)), 1652 la->la_pending, la->la_pending - 1)); 1653 1654 la->la_pending--; 1655 } 1656 1657 /* periodic transmit machine */ 1658 1659 static void 1660 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1661 { 1662 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1663 LACP_STATE_TIMEOUT)) { 1664 return; 1665 } 1666 1667 LACP_DPRINTF((lp, "partner timeout changed\n")); 1668 1669 /* 1670 * FAST_PERIODIC -> SLOW_PERIODIC 1671 * or 1672 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1673 * 1674 * let lacp_sm_ptx_tx_schedule to update timeout. 1675 */ 1676 1677 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1678 1679 /* 1680 * if timeout has been shortened, assert NTT. 1681 */ 1682 1683 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1684 lacp_sm_assert_ntt(lp); 1685 } 1686 } 1687 1688 static void 1689 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1690 { 1691 int timeout; 1692 1693 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1694 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1695 1696 /* 1697 * NO_PERIODIC 1698 */ 1699 1700 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1701 return; 1702 } 1703 1704 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1705 return; 1706 } 1707 1708 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1709 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1710 1711 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1712 } 1713 1714 static void 1715 lacp_sm_ptx_timer(struct lacp_port *lp) 1716 { 1717 lacp_sm_assert_ntt(lp); 1718 } 1719 1720 static void 1721 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1722 { 1723 int timeout; 1724 1725 /* 1726 * check LACP_DISABLED first 1727 */ 1728 1729 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1730 return; 1731 } 1732 1733 /* 1734 * check loopback condition. 1735 */ 1736 1737 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1738 &lp->lp_actor.lip_systemid)) { 1739 return; 1740 } 1741 1742 /* 1743 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1744 */ 1745 1746 lacp_sm_rx_update_selected(lp, du); 1747 lacp_sm_rx_update_ntt(lp, du); 1748 lacp_sm_rx_record_pdu(lp, du); 1749 1750 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1751 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1752 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1753 1754 lp->lp_state &= ~LACP_STATE_EXPIRED; 1755 1756 /* 1757 * kick transmit machine without waiting the next tick. 1758 */ 1759 1760 lacp_sm_tx(lp); 1761 } 1762 1763 static void 1764 lacp_sm_rx_set_expired(struct lacp_port *lp) 1765 { 1766 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1767 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1768 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1769 lp->lp_state |= LACP_STATE_EXPIRED; 1770 } 1771 1772 static void 1773 lacp_sm_rx_timer(struct lacp_port *lp) 1774 { 1775 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1776 /* CURRENT -> EXPIRED */ 1777 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1778 lacp_sm_rx_set_expired(lp); 1779 } else { 1780 /* EXPIRED -> DEFAULTED */ 1781 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1782 lacp_sm_rx_update_default_selected(lp); 1783 lacp_sm_rx_record_default(lp); 1784 lp->lp_state &= ~LACP_STATE_EXPIRED; 1785 } 1786 } 1787 1788 static void 1789 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1790 { 1791 boolean_t active; 1792 uint8_t oldpstate; 1793 char buf[LACP_STATESTR_MAX+1]; 1794 1795 LACP_TRACE(lp); 1796 1797 oldpstate = lp->lp_partner.lip_state; 1798 1799 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1800 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1801 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1802 1803 lp->lp_partner = du->ldu_actor; 1804 if (active && 1805 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1806 LACP_STATE_AGGREGATION) && 1807 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1808 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1809 /* 1810 * XXX Maintain legacy behavior of leaving the 1811 * LACP_STATE_SYNC bit unchanged from the partner's 1812 * advertisement if lsc_strict_mode is false. 1813 * TODO: We should re-examine the concept of the "strict mode" 1814 * to ensure it makes sense to maintain a non-strict mode. 1815 */ 1816 if (lp->lp_lsc->lsc_strict_mode) 1817 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1818 } else { 1819 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1820 } 1821 1822 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1823 1824 if (oldpstate != lp->lp_partner.lip_state) { 1825 LACP_DPRINTF((lp, "old pstate %s\n", 1826 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1827 LACP_DPRINTF((lp, "new pstate %s\n", 1828 lacp_format_state(lp->lp_partner.lip_state, buf, 1829 sizeof(buf)))); 1830 } 1831 1832 lacp_sm_ptx_update_timeout(lp, oldpstate); 1833 } 1834 1835 static void 1836 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1837 { 1838 1839 LACP_TRACE(lp); 1840 1841 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1842 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1843 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1844 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1845 lacp_sm_assert_ntt(lp); 1846 } 1847 } 1848 1849 static void 1850 lacp_sm_rx_record_default(struct lacp_port *lp) 1851 { 1852 uint8_t oldpstate; 1853 1854 LACP_TRACE(lp); 1855 1856 oldpstate = lp->lp_partner.lip_state; 1857 if (lp->lp_lsc->lsc_strict_mode) 1858 lp->lp_partner = lacp_partner_admin_strict; 1859 else 1860 lp->lp_partner = lacp_partner_admin_optimistic; 1861 lp->lp_state |= LACP_STATE_DEFAULTED; 1862 lacp_sm_ptx_update_timeout(lp, oldpstate); 1863 } 1864 1865 static void 1866 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1867 const struct lacp_peerinfo *info) 1868 { 1869 1870 LACP_TRACE(lp); 1871 1872 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1873 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1874 LACP_STATE_AGGREGATION)) { 1875 lp->lp_selected = LACP_UNSELECTED; 1876 /* mux machine will clean up lp->lp_aggregator */ 1877 } 1878 } 1879 1880 static void 1881 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1882 { 1883 1884 LACP_TRACE(lp); 1885 1886 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1887 } 1888 1889 static void 1890 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1891 { 1892 1893 LACP_TRACE(lp); 1894 1895 if (lp->lp_lsc->lsc_strict_mode) 1896 lacp_sm_rx_update_selected_from_peerinfo(lp, 1897 &lacp_partner_admin_strict); 1898 else 1899 lacp_sm_rx_update_selected_from_peerinfo(lp, 1900 &lacp_partner_admin_optimistic); 1901 } 1902 1903 /* transmit machine */ 1904 1905 static void 1906 lacp_sm_tx(struct lacp_port *lp) 1907 { 1908 int error = 0; 1909 1910 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1911 #if 1 1912 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1913 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1914 #endif 1915 ) { 1916 lp->lp_flags &= ~LACP_PORT_NTT; 1917 } 1918 1919 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1920 return; 1921 } 1922 1923 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1924 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1925 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1926 LACP_DPRINTF((lp, "rate limited pdu\n")); 1927 return; 1928 } 1929 1930 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1931 error = lacp_xmit_lacpdu(lp); 1932 } else { 1933 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1934 } 1935 1936 if (error == 0) { 1937 lp->lp_flags &= ~LACP_PORT_NTT; 1938 } else { 1939 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1940 error)); 1941 } 1942 } 1943 1944 static void 1945 lacp_sm_assert_ntt(struct lacp_port *lp) 1946 { 1947 1948 lp->lp_flags |= LACP_PORT_NTT; 1949 } 1950 1951 static void 1952 lacp_run_timers(struct lacp_port *lp) 1953 { 1954 int i; 1955 1956 for (i = 0; i < LACP_NTIMER; i++) { 1957 KASSERT(lp->lp_timer[i] >= 0, 1958 ("invalid timer value %d", lp->lp_timer[i])); 1959 if (lp->lp_timer[i] == 0) { 1960 continue; 1961 } else if (--lp->lp_timer[i] <= 0) { 1962 if (lacp_timer_funcs[i]) { 1963 (*lacp_timer_funcs[i])(lp); 1964 } 1965 } 1966 } 1967 } 1968 1969 int 1970 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1971 { 1972 struct lacp_softc *lsc = lp->lp_lsc; 1973 struct lagg_port *lgp = lp->lp_lagg; 1974 struct lacp_port *lp2; 1975 struct markerdu *mdu; 1976 int error = 0; 1977 int pending = 0; 1978 1979 if (m->m_pkthdr.len != sizeof(*mdu)) { 1980 goto bad; 1981 } 1982 1983 if ((m->m_flags & M_MCAST) == 0) { 1984 goto bad; 1985 } 1986 1987 if (m->m_len < sizeof(*mdu)) { 1988 m = m_pullup(m, sizeof(*mdu)); 1989 if (m == NULL) { 1990 return (ENOMEM); 1991 } 1992 } 1993 1994 mdu = mtod(m, struct markerdu *); 1995 1996 if (memcmp(&mdu->mdu_eh.ether_dhost, 1997 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1998 goto bad; 1999 } 2000 2001 if (mdu->mdu_sph.sph_version != 1) { 2002 goto bad; 2003 } 2004 2005 switch (mdu->mdu_tlv.tlv_type) { 2006 case MARKER_TYPE_INFO: 2007 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2008 marker_info_tlv_template, TRUE)) { 2009 goto bad; 2010 } 2011 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 2012 memcpy(&mdu->mdu_eh.ether_dhost, 2013 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 2014 memcpy(&mdu->mdu_eh.ether_shost, 2015 lgp->lp_lladdr, ETHER_ADDR_LEN); 2016 error = lagg_enqueue(lp->lp_ifp, m); 2017 break; 2018 2019 case MARKER_TYPE_RESPONSE: 2020 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2021 marker_response_tlv_template, TRUE)) { 2022 goto bad; 2023 } 2024 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 2025 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 2026 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 2027 2028 /* Verify that it is the last marker we sent out */ 2029 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 2030 sizeof(struct lacp_markerinfo))) 2031 goto bad; 2032 2033 LACP_LOCK(lsc); 2034 lp->lp_flags &= ~LACP_PORT_MARK; 2035 2036 if (lsc->lsc_suppress_distributing) { 2037 /* Check if any ports are waiting for a response */ 2038 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 2039 if (lp2->lp_flags & LACP_PORT_MARK) { 2040 pending = 1; 2041 break; 2042 } 2043 } 2044 2045 if (pending == 0) { 2046 /* All interface queues are clear */ 2047 LACP_DPRINTF((NULL, "queue flush complete\n")); 2048 lsc->lsc_suppress_distributing = FALSE; 2049 } 2050 } 2051 LACP_UNLOCK(lsc); 2052 m_freem(m); 2053 break; 2054 2055 default: 2056 goto bad; 2057 } 2058 2059 return (error); 2060 2061 bad: 2062 LACP_DPRINTF((lp, "bad marker frame\n")); 2063 m_freem(m); 2064 return (EINVAL); 2065 } 2066 2067 static int 2068 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 2069 const struct tlv_template *tmpl, boolean_t check_type) 2070 { 2071 while (/* CONSTCOND */ 1) { 2072 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 2073 return (EINVAL); 2074 } 2075 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 2076 tlv->tlv_length != tmpl->tmpl_length) { 2077 return (EINVAL); 2078 } 2079 if (tmpl->tmpl_type == 0) { 2080 break; 2081 } 2082 tlv = (const struct tlvhdr *) 2083 ((const char *)tlv + tlv->tlv_length); 2084 tmpl++; 2085 } 2086 2087 return (0); 2088 } 2089 2090 /* Debugging */ 2091 const char * 2092 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 2093 { 2094 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 2095 (int)mac[0], 2096 (int)mac[1], 2097 (int)mac[2], 2098 (int)mac[3], 2099 (int)mac[4], 2100 (int)mac[5]); 2101 2102 return (buf); 2103 } 2104 2105 const char * 2106 lacp_format_systemid(const struct lacp_systemid *sysid, 2107 char *buf, size_t buflen) 2108 { 2109 char macbuf[LACP_MACSTR_MAX+1]; 2110 2111 snprintf(buf, buflen, "%04X,%s", 2112 ntohs(sysid->lsi_prio), 2113 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 2114 2115 return (buf); 2116 } 2117 2118 const char * 2119 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 2120 { 2121 snprintf(buf, buflen, "%04X,%04X", 2122 ntohs(portid->lpi_prio), 2123 ntohs(portid->lpi_portno)); 2124 2125 return (buf); 2126 } 2127 2128 const char * 2129 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 2130 { 2131 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 2132 char portid[LACP_PORTIDSTR_MAX+1]; 2133 2134 snprintf(buf, buflen, "(%s,%04X,%s)", 2135 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 2136 ntohs(peer->lip_key), 2137 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2138 2139 return (buf); 2140 } 2141 2142 const char * 2143 lacp_format_lagid(const struct lacp_peerinfo *a, 2144 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2145 { 2146 char astr[LACP_PARTNERSTR_MAX+1]; 2147 char bstr[LACP_PARTNERSTR_MAX+1]; 2148 2149 #if 0 2150 /* 2151 * there's a convention to display small numbered peer 2152 * in the left. 2153 */ 2154 2155 if (lacp_compare_peerinfo(a, b) > 0) { 2156 const struct lacp_peerinfo *t; 2157 2158 t = a; 2159 a = b; 2160 b = t; 2161 } 2162 #endif 2163 2164 snprintf(buf, buflen, "[%s,%s]", 2165 lacp_format_partner(a, astr, sizeof(astr)), 2166 lacp_format_partner(b, bstr, sizeof(bstr))); 2167 2168 return (buf); 2169 } 2170 2171 const char * 2172 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2173 char *buf, size_t buflen) 2174 { 2175 if (la == NULL) { 2176 return ("(none)"); 2177 } 2178 2179 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2180 } 2181 2182 const char * 2183 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2184 { 2185 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2186 return (buf); 2187 } 2188 2189 static void 2190 lacp_dump_lacpdu(const struct lacpdu *du) 2191 { 2192 char buf[LACP_PARTNERSTR_MAX+1]; 2193 char buf2[LACP_STATESTR_MAX+1]; 2194 2195 printf("actor=%s\n", 2196 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2197 printf("actor.state=%s\n", 2198 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2199 printf("partner=%s\n", 2200 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2201 printf("partner.state=%s\n", 2202 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2203 2204 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2205 } 2206 2207 static void 2208 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2209 { 2210 va_list va; 2211 2212 if (lp) { 2213 printf("%s: ", lp->lp_ifp->if_xname); 2214 } 2215 2216 va_start(va, fmt); 2217 vprintf(fmt, va); 2218 va_end(va); 2219 } 2220