1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c)2005 YAMAMOTO Takashi, 7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_kern_tls.h" 36 #include "opt_ratelimit.h" 37 38 #include <sys/param.h> 39 #include <sys/callout.h> 40 #include <sys/eventhandler.h> 41 #include <sys/mbuf.h> 42 #include <sys/systm.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> /* hz */ 45 #include <sys/socket.h> /* for net/if.h */ 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <machine/stdarg.h> 49 #include <sys/lock.h> 50 #include <sys/rwlock.h> 51 #include <sys/taskqueue.h> 52 #include <sys/time.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_private.h> 57 #include <net/if_dl.h> 58 #include <net/ethernet.h> 59 #include <net/infiniband.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 63 #include <net/if_lagg.h> 64 #include <net/ieee8023ad_lacp.h> 65 66 /* 67 * actor system priority and port priority. 68 * XXX should be configurable. 69 */ 70 71 #define LACP_SYSTEM_PRIO 0x8000 72 #define LACP_PORT_PRIO 0x8000 73 74 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 75 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 76 77 static const struct tlv_template lacp_info_tlv_template[] = { 78 { LACP_TYPE_ACTORINFO, 79 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 80 { LACP_TYPE_PARTNERINFO, 81 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 82 { LACP_TYPE_COLLECTORINFO, 83 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 84 { 0, 0 }, 85 }; 86 87 static const struct tlv_template marker_info_tlv_template[] = { 88 { MARKER_TYPE_INFO, 89 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 90 { 0, 0 }, 91 }; 92 93 static const struct tlv_template marker_response_tlv_template[] = { 94 { MARKER_TYPE_RESPONSE, 95 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 96 { 0, 0 }, 97 }; 98 99 typedef void (*lacp_timer_func_t)(struct lacp_port *); 100 101 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 102 static void lacp_fill_markerinfo(struct lacp_port *, 103 struct lacp_markerinfo *); 104 105 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 106 static void lacp_suppress_distributing(struct lacp_softc *, 107 struct lacp_aggregator *); 108 static void lacp_transit_expire(void *); 109 static void lacp_update_portmap(struct lacp_softc *); 110 static void lacp_select_active_aggregator(struct lacp_softc *); 111 static uint16_t lacp_compose_key(struct lacp_port *); 112 static int tlv_check(const void *, size_t, const struct tlvhdr *, 113 const struct tlv_template *, boolean_t); 114 static void lacp_tick(void *); 115 116 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 117 const struct lacp_port *); 118 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 119 const struct lacp_peerinfo *); 120 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 121 const struct lacp_port *); 122 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 123 const struct lacp_peerinfo *); 124 125 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 126 struct lacp_port *); 127 static void lacp_aggregator_addref(struct lacp_softc *, 128 struct lacp_aggregator *); 129 static void lacp_aggregator_delref(struct lacp_softc *, 130 struct lacp_aggregator *); 131 132 /* receive machine */ 133 134 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 135 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 136 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 137 static void lacp_sm_rx_timer(struct lacp_port *); 138 static void lacp_sm_rx_set_expired(struct lacp_port *); 139 static void lacp_sm_rx_update_ntt(struct lacp_port *, 140 const struct lacpdu *); 141 static void lacp_sm_rx_record_pdu(struct lacp_port *, 142 const struct lacpdu *); 143 static void lacp_sm_rx_update_selected(struct lacp_port *, 144 const struct lacpdu *); 145 static void lacp_sm_rx_record_default(struct lacp_port *); 146 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 147 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 148 const struct lacp_peerinfo *); 149 150 /* mux machine */ 151 152 static void lacp_sm_mux(struct lacp_port *); 153 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 154 static void lacp_sm_mux_timer(struct lacp_port *); 155 156 /* periodic transmit machine */ 157 158 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 159 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 160 static void lacp_sm_ptx_timer(struct lacp_port *); 161 162 /* transmit machine */ 163 164 static void lacp_sm_tx(struct lacp_port *); 165 static void lacp_sm_assert_ntt(struct lacp_port *); 166 167 static void lacp_run_timers(struct lacp_port *); 168 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 169 const struct lacp_peerinfo *); 170 static int lacp_compare_systemid(const struct lacp_systemid *, 171 const struct lacp_systemid *); 172 static void lacp_port_enable(struct lacp_port *); 173 static void lacp_port_disable(struct lacp_port *); 174 static void lacp_select(struct lacp_port *); 175 static void lacp_unselect(struct lacp_port *); 176 static void lacp_disable_collecting(struct lacp_port *); 177 static void lacp_enable_collecting(struct lacp_port *); 178 static void lacp_disable_distributing(struct lacp_port *); 179 static void lacp_enable_distributing(struct lacp_port *); 180 static int lacp_xmit_lacpdu(struct lacp_port *); 181 static int lacp_xmit_marker(struct lacp_port *); 182 183 /* Debugging */ 184 185 static void lacp_dump_lacpdu(const struct lacpdu *); 186 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 187 size_t); 188 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 189 const struct lacp_peerinfo *, char *, size_t); 190 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 191 char *, size_t); 192 static const char *lacp_format_state(uint8_t, char *, size_t); 193 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 194 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 195 size_t); 196 static const char *lacp_format_portid(const struct lacp_portid *, char *, 197 size_t); 198 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 199 __attribute__((__format__(__printf__, 2, 3))); 200 201 VNET_DEFINE_STATIC(int, lacp_debug); 202 #define V_lacp_debug VNET(lacp_debug) 203 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 204 "ieee802.3ad"); 205 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, 206 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); 207 208 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1; 209 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, 210 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0, 211 "LACP strict protocol compliance default"); 212 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } 213 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 214 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } 215 216 /* 217 * partner administration variables. 218 * XXX should be configurable. 219 */ 220 221 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 222 .lip_systemid = { .lsi_prio = 0xffff }, 223 .lip_portid = { .lpi_prio = 0xffff }, 224 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 225 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 226 }; 227 228 static const struct lacp_peerinfo lacp_partner_admin_strict = { 229 .lip_systemid = { .lsi_prio = 0xffff }, 230 .lip_portid = { .lpi_prio = 0xffff }, 231 .lip_state = 0, 232 }; 233 234 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 235 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 236 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 237 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 238 }; 239 240 struct mbuf * 241 lacp_input(struct lagg_port *lgp, struct mbuf *m) 242 { 243 struct lacp_port *lp = LACP_PORT(lgp); 244 uint8_t subtype; 245 246 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 247 m_freem(m); 248 return (NULL); 249 } 250 251 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 252 switch (subtype) { 253 case SLOWPROTOCOLS_SUBTYPE_LACP: 254 lacp_pdu_input(lp, m); 255 return (NULL); 256 257 case SLOWPROTOCOLS_SUBTYPE_MARKER: 258 lacp_marker_input(lp, m); 259 return (NULL); 260 } 261 262 /* Not a subtype we are interested in */ 263 return (m); 264 } 265 266 /* 267 * lacp_pdu_input: process lacpdu 268 */ 269 static int 270 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 271 { 272 struct lacp_softc *lsc = lp->lp_lsc; 273 struct lacpdu *du; 274 int error = 0; 275 276 if (m->m_pkthdr.len != sizeof(*du)) { 277 goto bad; 278 } 279 280 if ((m->m_flags & M_MCAST) == 0) { 281 goto bad; 282 } 283 284 if (m->m_len < sizeof(*du)) { 285 m = m_pullup(m, sizeof(*du)); 286 if (m == NULL) { 287 return (ENOMEM); 288 } 289 } 290 291 du = mtod(m, struct lacpdu *); 292 293 if (memcmp(&du->ldu_eh.ether_dhost, 294 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 295 goto bad; 296 } 297 298 /* 299 * ignore the version for compatibility with 300 * the future protocol revisions. 301 */ 302 #if 0 303 if (du->ldu_sph.sph_version != 1) { 304 goto bad; 305 } 306 #endif 307 308 /* 309 * ignore tlv types for compatibility with 310 * the future protocol revisions. 311 */ 312 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 313 lacp_info_tlv_template, FALSE)) { 314 goto bad; 315 } 316 317 if (V_lacp_debug > 0) { 318 lacp_dprintf(lp, "lacpdu receive\n"); 319 lacp_dump_lacpdu(du); 320 } 321 322 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 323 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 324 goto bad; 325 } 326 327 LACP_LOCK(lsc); 328 lacp_sm_rx(lp, du); 329 LACP_UNLOCK(lsc); 330 331 m_freem(m); 332 return (error); 333 334 bad: 335 m_freem(m); 336 return (EINVAL); 337 } 338 339 static void 340 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 341 { 342 struct lagg_port *lgp = lp->lp_lagg; 343 struct lagg_softc *sc = lgp->lp_softc; 344 345 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 346 memcpy(&info->lip_systemid.lsi_mac, 347 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 348 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 349 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 350 info->lip_state = lp->lp_state; 351 } 352 353 static void 354 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 355 { 356 struct ifnet *ifp = lp->lp_ifp; 357 358 /* Fill in the port index and system id (encoded as the MAC) */ 359 info->mi_rq_port = htons(ifp->if_index); 360 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 361 info->mi_rq_xid = htonl(0); 362 } 363 364 static int 365 lacp_xmit_lacpdu(struct lacp_port *lp) 366 { 367 struct lagg_port *lgp = lp->lp_lagg; 368 struct mbuf *m; 369 struct lacpdu *du; 370 int error; 371 372 LACP_LOCK_ASSERT(lp->lp_lsc); 373 374 m = m_gethdr(M_NOWAIT, MT_DATA); 375 if (m == NULL) { 376 return (ENOMEM); 377 } 378 m->m_len = m->m_pkthdr.len = sizeof(*du); 379 380 du = mtod(m, struct lacpdu *); 381 memset(du, 0, sizeof(*du)); 382 383 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 384 ETHER_ADDR_LEN); 385 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 386 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 387 388 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 389 du->ldu_sph.sph_version = 1; 390 391 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 392 du->ldu_actor = lp->lp_actor; 393 394 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 395 sizeof(du->ldu_partner)); 396 du->ldu_partner = lp->lp_partner; 397 398 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 399 sizeof(du->ldu_collector)); 400 du->ldu_collector.lci_maxdelay = 0; 401 402 if (V_lacp_debug > 0) { 403 lacp_dprintf(lp, "lacpdu transmit\n"); 404 lacp_dump_lacpdu(du); 405 } 406 407 m->m_flags |= M_MCAST; 408 409 /* 410 * XXX should use higher priority queue. 411 * otherwise network congestion can break aggregation. 412 */ 413 414 error = lagg_enqueue(lp->lp_ifp, m); 415 return (error); 416 } 417 418 static int 419 lacp_xmit_marker(struct lacp_port *lp) 420 { 421 struct lagg_port *lgp = lp->lp_lagg; 422 struct mbuf *m; 423 struct markerdu *mdu; 424 int error; 425 426 LACP_LOCK_ASSERT(lp->lp_lsc); 427 428 m = m_gethdr(M_NOWAIT, MT_DATA); 429 if (m == NULL) { 430 return (ENOMEM); 431 } 432 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 433 434 mdu = mtod(m, struct markerdu *); 435 memset(mdu, 0, sizeof(*mdu)); 436 437 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 438 ETHER_ADDR_LEN); 439 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 440 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 441 442 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 443 mdu->mdu_sph.sph_version = 1; 444 445 /* Bump the transaction id and copy over the marker info */ 446 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 447 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 448 mdu->mdu_info = lp->lp_marker; 449 450 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 451 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 452 ntohl(mdu->mdu_info.mi_rq_xid))); 453 454 m->m_flags |= M_MCAST; 455 error = lagg_enqueue(lp->lp_ifp, m); 456 return (error); 457 } 458 459 void 460 lacp_linkstate(struct lagg_port *lgp) 461 { 462 struct lacp_port *lp = LACP_PORT(lgp); 463 struct lacp_softc *lsc = lp->lp_lsc; 464 struct ifnet *ifp = lgp->lp_ifp; 465 struct ifmediareq ifmr; 466 int error = 0; 467 u_int media; 468 uint8_t old_state; 469 uint16_t old_key; 470 471 bzero((char *)&ifmr, sizeof(ifmr)); 472 error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr); 473 if (error != 0) { 474 bzero((char *)&ifmr, sizeof(ifmr)); 475 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 476 } 477 if (error != 0) 478 return; 479 480 LACP_LOCK(lsc); 481 media = ifmr.ifm_active; 482 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 483 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 484 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 485 old_state = lp->lp_state; 486 old_key = lp->lp_key; 487 488 lp->lp_media = media; 489 /* 490 * If the port is not an active full duplex Ethernet link then it can 491 * not be aggregated. 492 */ 493 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 494 ifp->if_link_state != LINK_STATE_UP) { 495 lacp_port_disable(lp); 496 } else { 497 lacp_port_enable(lp); 498 } 499 lp->lp_key = lacp_compose_key(lp); 500 501 if (old_state != lp->lp_state || old_key != lp->lp_key) { 502 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 503 lp->lp_selected = LACP_UNSELECTED; 504 } 505 LACP_UNLOCK(lsc); 506 } 507 508 static void 509 lacp_tick(void *arg) 510 { 511 struct lacp_softc *lsc = arg; 512 struct lacp_port *lp; 513 514 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 515 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 516 continue; 517 518 CURVNET_SET(lp->lp_ifp->if_vnet); 519 lacp_run_timers(lp); 520 521 lacp_select(lp); 522 lacp_sm_mux(lp); 523 lacp_sm_tx(lp); 524 lacp_sm_ptx_tx_schedule(lp); 525 CURVNET_RESTORE(); 526 } 527 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 528 } 529 530 int 531 lacp_port_create(struct lagg_port *lgp) 532 { 533 struct lagg_softc *sc = lgp->lp_softc; 534 struct lacp_softc *lsc = LACP_SOFTC(sc); 535 struct lacp_port *lp; 536 struct ifnet *ifp = lgp->lp_ifp; 537 struct sockaddr_dl sdl; 538 struct ifmultiaddr *rifma = NULL; 539 int error; 540 541 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 542 sdl.sdl_alen = ETHER_ADDR_LEN; 543 544 bcopy(ðermulticastaddr_slowprotocols, 545 LLADDR(&sdl), ETHER_ADDR_LEN); 546 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 547 if (error) { 548 printf("%s: ADDMULTI failed on %s\n", __func__, 549 lgp->lp_ifp->if_xname); 550 return (error); 551 } 552 553 lp = malloc(sizeof(struct lacp_port), 554 M_DEVBUF, M_NOWAIT|M_ZERO); 555 if (lp == NULL) 556 return (ENOMEM); 557 558 LACP_LOCK(lsc); 559 lgp->lp_psc = lp; 560 lp->lp_ifp = ifp; 561 lp->lp_lagg = lgp; 562 lp->lp_lsc = lsc; 563 lp->lp_ifma = rifma; 564 565 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 566 567 lacp_fill_actorinfo(lp, &lp->lp_actor); 568 lacp_fill_markerinfo(lp, &lp->lp_marker); 569 lp->lp_state = LACP_STATE_ACTIVITY; 570 lp->lp_aggregator = NULL; 571 lacp_sm_rx_set_expired(lp); 572 LACP_UNLOCK(lsc); 573 lacp_linkstate(lgp); 574 575 return (0); 576 } 577 578 void 579 lacp_port_destroy(struct lagg_port *lgp) 580 { 581 struct lacp_port *lp = LACP_PORT(lgp); 582 struct lacp_softc *lsc = lp->lp_lsc; 583 int i; 584 585 LACP_LOCK(lsc); 586 for (i = 0; i < LACP_NTIMER; i++) { 587 LACP_TIMER_DISARM(lp, i); 588 } 589 590 lacp_disable_collecting(lp); 591 lacp_disable_distributing(lp); 592 lacp_unselect(lp); 593 594 LIST_REMOVE(lp, lp_next); 595 LACP_UNLOCK(lsc); 596 597 /* The address may have already been removed by if_purgemaddrs() */ 598 if (!lgp->lp_detaching) 599 if_delmulti_ifma(lp->lp_ifma); 600 601 free(lp, M_DEVBUF); 602 } 603 604 void 605 lacp_req(struct lagg_softc *sc, void *data) 606 { 607 struct lacp_opreq *req = (struct lacp_opreq *)data; 608 struct lacp_softc *lsc = LACP_SOFTC(sc); 609 struct lacp_aggregator *la; 610 611 bzero(req, sizeof(struct lacp_opreq)); 612 613 /* 614 * If the LACP softc is NULL, return with the opreq structure full of 615 * zeros. It is normal for the softc to be NULL while the lagg is 616 * being destroyed. 617 */ 618 if (NULL == lsc) 619 return; 620 621 la = lsc->lsc_active_aggregator; 622 LACP_LOCK(lsc); 623 if (la != NULL) { 624 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 625 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 626 ETHER_ADDR_LEN); 627 req->actor_key = ntohs(la->la_actor.lip_key); 628 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 629 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 630 req->actor_state = la->la_actor.lip_state; 631 632 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 633 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 634 ETHER_ADDR_LEN); 635 req->partner_key = ntohs(la->la_partner.lip_key); 636 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 637 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 638 req->partner_state = la->la_partner.lip_state; 639 } 640 LACP_UNLOCK(lsc); 641 } 642 643 void 644 lacp_portreq(struct lagg_port *lgp, void *data) 645 { 646 struct lacp_opreq *req = (struct lacp_opreq *)data; 647 struct lacp_port *lp = LACP_PORT(lgp); 648 struct lacp_softc *lsc = lp->lp_lsc; 649 650 LACP_LOCK(lsc); 651 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 652 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 653 ETHER_ADDR_LEN); 654 req->actor_key = ntohs(lp->lp_actor.lip_key); 655 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 656 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 657 req->actor_state = lp->lp_actor.lip_state; 658 659 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 660 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 661 ETHER_ADDR_LEN); 662 req->partner_key = ntohs(lp->lp_partner.lip_key); 663 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 664 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 665 req->partner_state = lp->lp_partner.lip_state; 666 LACP_UNLOCK(lsc); 667 } 668 669 static void 670 lacp_disable_collecting(struct lacp_port *lp) 671 { 672 LACP_DPRINTF((lp, "collecting disabled\n")); 673 lp->lp_state &= ~LACP_STATE_COLLECTING; 674 } 675 676 static void 677 lacp_enable_collecting(struct lacp_port *lp) 678 { 679 LACP_DPRINTF((lp, "collecting enabled\n")); 680 lp->lp_state |= LACP_STATE_COLLECTING; 681 } 682 683 static void 684 lacp_disable_distributing(struct lacp_port *lp) 685 { 686 struct lacp_aggregator *la = lp->lp_aggregator; 687 struct lacp_softc *lsc = lp->lp_lsc; 688 struct lagg_softc *sc = lsc->lsc_softc; 689 char buf[LACP_LAGIDSTR_MAX+1]; 690 691 LACP_LOCK_ASSERT(lsc); 692 693 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 694 return; 695 } 696 697 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 698 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 699 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 700 701 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 702 "nports %d -> %d\n", 703 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 704 la->la_nports, la->la_nports - 1)); 705 706 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 707 la->la_nports--; 708 sc->sc_active = la->la_nports; 709 710 if (lsc->lsc_active_aggregator == la) { 711 lacp_suppress_distributing(lsc, la); 712 lacp_select_active_aggregator(lsc); 713 /* regenerate the port map, the active aggregator has changed */ 714 lacp_update_portmap(lsc); 715 } 716 717 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 718 if_link_state_change(sc->sc_ifp, 719 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 720 } 721 722 static void 723 lacp_enable_distributing(struct lacp_port *lp) 724 { 725 struct lacp_aggregator *la = lp->lp_aggregator; 726 struct lacp_softc *lsc = lp->lp_lsc; 727 struct lagg_softc *sc = lsc->lsc_softc; 728 char buf[LACP_LAGIDSTR_MAX+1]; 729 730 LACP_LOCK_ASSERT(lsc); 731 732 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 733 return; 734 } 735 736 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 737 "nports %d -> %d\n", 738 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 739 la->la_nports, la->la_nports + 1)); 740 741 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 742 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 743 la->la_nports++; 744 sc->sc_active = la->la_nports; 745 746 lp->lp_state |= LACP_STATE_DISTRIBUTING; 747 748 if (lsc->lsc_active_aggregator == la) { 749 lacp_suppress_distributing(lsc, la); 750 lacp_update_portmap(lsc); 751 } else 752 /* try to become the active aggregator */ 753 lacp_select_active_aggregator(lsc); 754 755 if_link_state_change(sc->sc_ifp, 756 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 757 } 758 759 static void 760 lacp_transit_expire(void *vp) 761 { 762 struct lacp_softc *lsc = vp; 763 764 LACP_LOCK_ASSERT(lsc); 765 766 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); 767 LACP_TRACE(NULL); 768 CURVNET_RESTORE(); 769 770 lsc->lsc_suppress_distributing = FALSE; 771 } 772 773 void 774 lacp_attach(struct lagg_softc *sc) 775 { 776 struct lacp_softc *lsc; 777 778 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); 779 780 sc->sc_psc = lsc; 781 lsc->lsc_softc = sc; 782 783 lsc->lsc_hashkey = m_ether_tcpip_hash_init(); 784 lsc->lsc_active_aggregator = NULL; 785 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); 786 LACP_LOCK_INIT(lsc); 787 TAILQ_INIT(&lsc->lsc_aggregators); 788 LIST_INIT(&lsc->lsc_ports); 789 790 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 791 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 792 793 /* if the lagg is already up then do the same */ 794 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 795 lacp_init(sc); 796 } 797 798 void 799 lacp_detach(void *psc) 800 { 801 struct lacp_softc *lsc = (struct lacp_softc *)psc; 802 803 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 804 ("aggregators still active")); 805 KASSERT(lsc->lsc_active_aggregator == NULL, 806 ("aggregator still attached")); 807 808 callout_drain(&lsc->lsc_transit_callout); 809 callout_drain(&lsc->lsc_callout); 810 811 LACP_LOCK_DESTROY(lsc); 812 free(lsc, M_DEVBUF); 813 } 814 815 void 816 lacp_init(struct lagg_softc *sc) 817 { 818 struct lacp_softc *lsc = LACP_SOFTC(sc); 819 820 LACP_LOCK(lsc); 821 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 822 LACP_UNLOCK(lsc); 823 } 824 825 void 826 lacp_stop(struct lagg_softc *sc) 827 { 828 struct lacp_softc *lsc = LACP_SOFTC(sc); 829 830 LACP_LOCK(lsc); 831 callout_stop(&lsc->lsc_transit_callout); 832 callout_stop(&lsc->lsc_callout); 833 LACP_UNLOCK(lsc); 834 } 835 836 struct lagg_port * 837 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t hash, 838 uint8_t numa_domain, int *err) 839 { 840 struct lacp_softc *lsc = LACP_SOFTC(sc); 841 struct lacp_portmap *pm; 842 struct lacp_port *lp; 843 struct lacp_port **map; 844 int count; 845 846 if (__predict_false(lsc->lsc_suppress_distributing)) { 847 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 848 *err = ENOBUFS; 849 return (NULL); 850 } 851 852 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 853 if (pm->pm_count == 0) { 854 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 855 *err = ENETDOWN; 856 return (NULL); 857 } 858 859 #ifdef NUMA 860 if ((sc->sc_opts & LAGG_OPT_USE_NUMA) && 861 pm->pm_num_dom > 1 && numa_domain < MAXMEMDOM) { 862 count = pm->pm_numa[numa_domain].count; 863 if (count > 0) { 864 map = pm->pm_numa[numa_domain].map; 865 } else { 866 /* No ports on this domain; use global hash. */ 867 map = pm->pm_map; 868 count = pm->pm_count; 869 } 870 } else 871 #endif 872 { 873 map = pm->pm_map; 874 count = pm->pm_count; 875 } 876 877 hash %= count; 878 lp = map[hash]; 879 880 return (lp->lp_lagg); 881 } 882 883 struct lagg_port * 884 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m, int *err) 885 { 886 struct lacp_softc *lsc = LACP_SOFTC(sc); 887 uint32_t hash; 888 uint8_t numa_domain; 889 890 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 891 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 892 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 893 else 894 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); 895 896 numa_domain = m->m_pkthdr.numa_domain; 897 return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, err)); 898 } 899 900 /* 901 * lacp_suppress_distributing: drop transmit packets for a while 902 * to preserve packet ordering. 903 */ 904 905 static void 906 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 907 { 908 struct lacp_port *lp; 909 910 if (lsc->lsc_active_aggregator != la) { 911 return; 912 } 913 914 LACP_TRACE(NULL); 915 916 lsc->lsc_suppress_distributing = TRUE; 917 918 /* send a marker frame down each port to verify the queues are empty */ 919 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 920 lp->lp_flags |= LACP_PORT_MARK; 921 if (lacp_xmit_marker(lp) != 0) 922 lp->lp_flags &= ~LACP_PORT_MARK; 923 } 924 925 /* set a timeout for the marker frames */ 926 callout_reset(&lsc->lsc_transit_callout, 927 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 928 } 929 930 static int 931 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 932 const struct lacp_peerinfo *b) 933 { 934 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 935 } 936 937 static int 938 lacp_compare_systemid(const struct lacp_systemid *a, 939 const struct lacp_systemid *b) 940 { 941 return (memcmp(a, b, sizeof(*a))); 942 } 943 944 #if 0 /* unused */ 945 static int 946 lacp_compare_portid(const struct lacp_portid *a, 947 const struct lacp_portid *b) 948 { 949 return (memcmp(a, b, sizeof(*a))); 950 } 951 #endif 952 953 static uint64_t 954 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 955 { 956 struct lacp_port *lp; 957 uint64_t speed; 958 959 lp = TAILQ_FIRST(&la->la_ports); 960 if (lp == NULL) { 961 return (0); 962 } 963 964 speed = ifmedia_baudrate(lp->lp_media); 965 speed *= la->la_nports; 966 if (speed == 0) { 967 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 968 lp->lp_media, la->la_nports)); 969 } 970 971 return (speed); 972 } 973 974 /* 975 * lacp_select_active_aggregator: select an aggregator to be used to transmit 976 * packets from lagg(4) interface. 977 */ 978 979 static void 980 lacp_select_active_aggregator(struct lacp_softc *lsc) 981 { 982 struct lacp_aggregator *la; 983 struct lacp_aggregator *best_la = NULL; 984 uint64_t best_speed = 0; 985 char buf[LACP_LAGIDSTR_MAX+1]; 986 987 LACP_TRACE(NULL); 988 989 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 990 uint64_t speed; 991 992 if (la->la_nports == 0) { 993 continue; 994 } 995 996 speed = lacp_aggregator_bandwidth(la); 997 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 998 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 999 speed, la->la_nports)); 1000 1001 /* 1002 * This aggregator is chosen if the partner has a better 1003 * system priority or, the total aggregated speed is higher 1004 * or, it is already the chosen aggregator 1005 */ 1006 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 1007 LACP_SYS_PRI(best_la->la_partner)) || 1008 speed > best_speed || 1009 (speed == best_speed && 1010 la == lsc->lsc_active_aggregator)) { 1011 best_la = la; 1012 best_speed = speed; 1013 } 1014 } 1015 1016 KASSERT(best_la == NULL || best_la->la_nports > 0, 1017 ("invalid aggregator refcnt")); 1018 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1019 ("invalid aggregator list")); 1020 1021 if (lsc->lsc_active_aggregator != best_la) { 1022 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1023 LACP_DPRINTF((NULL, "old %s\n", 1024 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1025 buf, sizeof(buf)))); 1026 } else { 1027 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1028 } 1029 LACP_DPRINTF((NULL, "new %s\n", 1030 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1031 1032 if (lsc->lsc_active_aggregator != best_la) { 1033 lsc->lsc_active_aggregator = best_la; 1034 lacp_update_portmap(lsc); 1035 if (best_la) { 1036 lacp_suppress_distributing(lsc, best_la); 1037 } 1038 } 1039 } 1040 1041 /* 1042 * Updated the inactive portmap array with the new list of ports and 1043 * make it live. 1044 */ 1045 static void 1046 lacp_update_portmap(struct lacp_softc *lsc) 1047 { 1048 struct lagg_softc *sc = lsc->lsc_softc; 1049 struct lacp_aggregator *la; 1050 struct lacp_portmap *p; 1051 struct lacp_port *lp; 1052 uint64_t speed; 1053 u_int newmap; 1054 int i; 1055 #ifdef NUMA 1056 int count; 1057 uint8_t domain; 1058 #endif 1059 1060 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1061 p = &lsc->lsc_pmap[newmap]; 1062 la = lsc->lsc_active_aggregator; 1063 speed = 0; 1064 bzero(p, sizeof(struct lacp_portmap)); 1065 1066 if (la != NULL && la->la_nports > 0) { 1067 p->pm_count = la->la_nports; 1068 i = 0; 1069 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) { 1070 p->pm_map[i++] = lp; 1071 #ifdef NUMA 1072 domain = lp->lp_ifp->if_numa_domain; 1073 if (domain >= MAXMEMDOM) 1074 continue; 1075 count = p->pm_numa[domain].count; 1076 p->pm_numa[domain].map[count] = lp; 1077 p->pm_numa[domain].count++; 1078 #endif 1079 } 1080 KASSERT(i == p->pm_count, ("Invalid port count")); 1081 1082 #ifdef NUMA 1083 for (i = 0; i < MAXMEMDOM; i++) { 1084 if (p->pm_numa[i].count != 0) 1085 p->pm_num_dom++; 1086 } 1087 #endif 1088 speed = lacp_aggregator_bandwidth(la); 1089 } 1090 sc->sc_ifp->if_baudrate = speed; 1091 EVENTHANDLER_INVOKE(ifnet_event, sc->sc_ifp, 1092 IFNET_EVENT_UPDATE_BAUDRATE); 1093 1094 /* switch the active portmap over */ 1095 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1096 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1097 lsc->lsc_activemap, 1098 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1099 } 1100 1101 static uint16_t 1102 lacp_compose_key(struct lacp_port *lp) 1103 { 1104 struct lagg_port *lgp = lp->lp_lagg; 1105 struct lagg_softc *sc = lgp->lp_softc; 1106 u_int media = lp->lp_media; 1107 uint16_t key; 1108 1109 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1110 /* 1111 * non-aggregatable links should have unique keys. 1112 * 1113 * XXX this isn't really unique as if_index is 16 bit. 1114 */ 1115 1116 /* bit 0..14: (some bits of) if_index of this port */ 1117 key = lp->lp_ifp->if_index; 1118 /* bit 15: 1 */ 1119 key |= 0x8000; 1120 } else { 1121 u_int subtype = IFM_SUBTYPE(media); 1122 1123 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1124 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1125 1126 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1127 switch (subtype) { 1128 case IFM_10_T: 1129 case IFM_10_2: 1130 case IFM_10_5: 1131 case IFM_10_STP: 1132 case IFM_10_FL: 1133 key = IFM_10_T; 1134 break; 1135 case IFM_100_TX: 1136 case IFM_100_FX: 1137 case IFM_100_T4: 1138 case IFM_100_VG: 1139 case IFM_100_T2: 1140 case IFM_100_T: 1141 case IFM_100_SGMII: 1142 key = IFM_100_TX; 1143 break; 1144 case IFM_1000_SX: 1145 case IFM_1000_LX: 1146 case IFM_1000_CX: 1147 case IFM_1000_T: 1148 case IFM_1000_KX: 1149 case IFM_1000_SGMII: 1150 case IFM_1000_CX_SGMII: 1151 key = IFM_1000_SX; 1152 break; 1153 case IFM_10G_LR: 1154 case IFM_10G_SR: 1155 case IFM_10G_CX4: 1156 case IFM_10G_TWINAX: 1157 case IFM_10G_TWINAX_LONG: 1158 case IFM_10G_LRM: 1159 case IFM_10G_T: 1160 case IFM_10G_KX4: 1161 case IFM_10G_KR: 1162 case IFM_10G_CR1: 1163 case IFM_10G_ER: 1164 case IFM_10G_SFI: 1165 case IFM_10G_AOC: 1166 key = IFM_10G_LR; 1167 break; 1168 case IFM_20G_KR2: 1169 key = IFM_20G_KR2; 1170 break; 1171 case IFM_2500_KX: 1172 case IFM_2500_T: 1173 case IFM_2500_X: 1174 key = IFM_2500_KX; 1175 break; 1176 case IFM_5000_T: 1177 case IFM_5000_KR: 1178 case IFM_5000_KR_S: 1179 case IFM_5000_KR1: 1180 key = IFM_5000_T; 1181 break; 1182 case IFM_50G_PCIE: 1183 case IFM_50G_CR2: 1184 case IFM_50G_KR2: 1185 case IFM_50G_KR4: 1186 case IFM_50G_SR2: 1187 case IFM_50G_LR2: 1188 case IFM_50G_LAUI2_AC: 1189 case IFM_50G_LAUI2: 1190 case IFM_50G_AUI2_AC: 1191 case IFM_50G_AUI2: 1192 case IFM_50G_CP: 1193 case IFM_50G_SR: 1194 case IFM_50G_LR: 1195 case IFM_50G_FR: 1196 case IFM_50G_KR_PAM4: 1197 case IFM_50G_AUI1_AC: 1198 case IFM_50G_AUI1: 1199 key = IFM_50G_PCIE; 1200 break; 1201 case IFM_56G_R4: 1202 key = IFM_56G_R4; 1203 break; 1204 case IFM_25G_PCIE: 1205 case IFM_25G_CR: 1206 case IFM_25G_KR: 1207 case IFM_25G_SR: 1208 case IFM_25G_LR: 1209 case IFM_25G_ACC: 1210 case IFM_25G_AOC: 1211 case IFM_25G_T: 1212 case IFM_25G_CR_S: 1213 case IFM_25G_CR1: 1214 case IFM_25G_KR_S: 1215 case IFM_25G_AUI: 1216 case IFM_25G_KR1: 1217 key = IFM_25G_PCIE; 1218 break; 1219 case IFM_40G_CR4: 1220 case IFM_40G_SR4: 1221 case IFM_40G_LR4: 1222 case IFM_40G_LM4: 1223 case IFM_40G_XLPPI: 1224 case IFM_40G_KR4: 1225 case IFM_40G_XLAUI: 1226 case IFM_40G_XLAUI_AC: 1227 case IFM_40G_ER4: 1228 key = IFM_40G_CR4; 1229 break; 1230 case IFM_100G_CR4: 1231 case IFM_100G_SR4: 1232 case IFM_100G_KR4: 1233 case IFM_100G_LR4: 1234 case IFM_100G_CAUI4_AC: 1235 case IFM_100G_CAUI4: 1236 case IFM_100G_AUI4_AC: 1237 case IFM_100G_AUI4: 1238 case IFM_100G_CR_PAM4: 1239 case IFM_100G_KR_PAM4: 1240 case IFM_100G_CP2: 1241 case IFM_100G_SR2: 1242 case IFM_100G_DR: 1243 case IFM_100G_KR2_PAM4: 1244 case IFM_100G_CAUI2_AC: 1245 case IFM_100G_CAUI2: 1246 case IFM_100G_AUI2_AC: 1247 case IFM_100G_AUI2: 1248 key = IFM_100G_CR4; 1249 break; 1250 case IFM_200G_CR4_PAM4: 1251 case IFM_200G_SR4: 1252 case IFM_200G_FR4: 1253 case IFM_200G_LR4: 1254 case IFM_200G_DR4: 1255 case IFM_200G_KR4_PAM4: 1256 case IFM_200G_AUI4_AC: 1257 case IFM_200G_AUI4: 1258 case IFM_200G_AUI8_AC: 1259 case IFM_200G_AUI8: 1260 key = IFM_200G_CR4_PAM4; 1261 break; 1262 case IFM_400G_FR8: 1263 case IFM_400G_LR8: 1264 case IFM_400G_DR4: 1265 case IFM_400G_AUI8_AC: 1266 case IFM_400G_AUI8: 1267 key = IFM_400G_FR8; 1268 break; 1269 default: 1270 key = subtype; 1271 break; 1272 } 1273 /* bit 5..14: (some bits of) if_index of lagg device */ 1274 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1275 /* bit 15: 0 */ 1276 } 1277 return (htons(key)); 1278 } 1279 1280 static void 1281 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1282 { 1283 char buf[LACP_LAGIDSTR_MAX+1]; 1284 1285 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1286 __func__, 1287 lacp_format_lagid(&la->la_actor, &la->la_partner, 1288 buf, sizeof(buf)), 1289 la->la_refcnt, la->la_refcnt + 1)); 1290 1291 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1292 la->la_refcnt++; 1293 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1294 } 1295 1296 static void 1297 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1298 { 1299 char buf[LACP_LAGIDSTR_MAX+1]; 1300 1301 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1302 __func__, 1303 lacp_format_lagid(&la->la_actor, &la->la_partner, 1304 buf, sizeof(buf)), 1305 la->la_refcnt, la->la_refcnt - 1)); 1306 1307 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1308 la->la_refcnt--; 1309 if (la->la_refcnt > 0) { 1310 return; 1311 } 1312 1313 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1314 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1315 1316 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1317 1318 free(la, M_DEVBUF); 1319 } 1320 1321 /* 1322 * lacp_aggregator_get: allocate an aggregator. 1323 */ 1324 1325 static struct lacp_aggregator * 1326 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1327 { 1328 struct lacp_aggregator *la; 1329 1330 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1331 if (la) { 1332 la->la_refcnt = 1; 1333 la->la_nports = 0; 1334 TAILQ_INIT(&la->la_ports); 1335 la->la_pending = 0; 1336 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1337 } 1338 1339 return (la); 1340 } 1341 1342 /* 1343 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1344 */ 1345 1346 static void 1347 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1348 { 1349 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1350 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1351 1352 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1353 } 1354 1355 static void 1356 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1357 const struct lacp_peerinfo *lpi_port) 1358 { 1359 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1360 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1361 lpi_aggr->lip_key = lpi_port->lip_key; 1362 } 1363 1364 /* 1365 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1366 */ 1367 1368 static int 1369 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1370 const struct lacp_port *lp) 1371 { 1372 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1373 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1374 return (0); 1375 } 1376 1377 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1378 return (0); 1379 } 1380 1381 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1382 return (0); 1383 } 1384 1385 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1386 return (0); 1387 } 1388 1389 return (1); 1390 } 1391 1392 static int 1393 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1394 const struct lacp_peerinfo *b) 1395 { 1396 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1397 sizeof(a->lip_systemid))) { 1398 return (0); 1399 } 1400 1401 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1402 return (0); 1403 } 1404 1405 return (1); 1406 } 1407 1408 static void 1409 lacp_port_enable(struct lacp_port *lp) 1410 { 1411 lp->lp_state |= LACP_STATE_AGGREGATION; 1412 } 1413 1414 static void 1415 lacp_port_disable(struct lacp_port *lp) 1416 { 1417 lacp_set_mux(lp, LACP_MUX_DETACHED); 1418 1419 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1420 lp->lp_selected = LACP_UNSELECTED; 1421 lacp_sm_rx_record_default(lp); 1422 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1423 lp->lp_state &= ~LACP_STATE_EXPIRED; 1424 } 1425 1426 /* 1427 * lacp_select: select an aggregator. create one if necessary. 1428 */ 1429 static void 1430 lacp_select(struct lacp_port *lp) 1431 { 1432 struct lacp_softc *lsc = lp->lp_lsc; 1433 struct lacp_aggregator *la; 1434 char buf[LACP_LAGIDSTR_MAX+1]; 1435 1436 if (lp->lp_aggregator) { 1437 return; 1438 } 1439 1440 /* If we haven't heard from our peer, skip this step. */ 1441 if (lp->lp_state & LACP_STATE_DEFAULTED) 1442 return; 1443 1444 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1445 ("timer_wait_while still active")); 1446 1447 LACP_DPRINTF((lp, "port lagid=%s\n", 1448 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1449 buf, sizeof(buf)))); 1450 1451 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1452 if (lacp_aggregator_is_compatible(la, lp)) { 1453 break; 1454 } 1455 } 1456 1457 if (la == NULL) { 1458 la = lacp_aggregator_get(lsc, lp); 1459 if (la == NULL) { 1460 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1461 1462 /* 1463 * will retry on the next tick. 1464 */ 1465 1466 return; 1467 } 1468 lacp_fill_aggregator_id(la, lp); 1469 LACP_DPRINTF((lp, "aggregator created\n")); 1470 } else { 1471 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1472 if (la->la_refcnt == LACP_MAX_PORTS) 1473 return; 1474 lacp_aggregator_addref(lsc, la); 1475 } 1476 1477 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1478 lacp_format_lagid(&la->la_actor, &la->la_partner, 1479 buf, sizeof(buf)))); 1480 1481 lp->lp_aggregator = la; 1482 lp->lp_selected = LACP_SELECTED; 1483 } 1484 1485 /* 1486 * lacp_unselect: finish unselect/detach process. 1487 */ 1488 1489 static void 1490 lacp_unselect(struct lacp_port *lp) 1491 { 1492 struct lacp_softc *lsc = lp->lp_lsc; 1493 struct lacp_aggregator *la = lp->lp_aggregator; 1494 1495 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1496 ("timer_wait_while still active")); 1497 1498 if (la == NULL) { 1499 return; 1500 } 1501 1502 lp->lp_aggregator = NULL; 1503 lacp_aggregator_delref(lsc, la); 1504 } 1505 1506 /* mux machine */ 1507 1508 static void 1509 lacp_sm_mux(struct lacp_port *lp) 1510 { 1511 struct lagg_port *lgp = lp->lp_lagg; 1512 struct lagg_softc *sc = lgp->lp_softc; 1513 enum lacp_mux_state new_state; 1514 boolean_t p_sync = 1515 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1516 boolean_t p_collecting = 1517 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1518 enum lacp_selected selected = lp->lp_selected; 1519 struct lacp_aggregator *la; 1520 1521 if (V_lacp_debug > 1) 1522 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1523 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1524 lp->lp_mux_state, selected, p_sync, p_collecting); 1525 1526 re_eval: 1527 la = lp->lp_aggregator; 1528 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1529 ("MUX not detached")); 1530 new_state = lp->lp_mux_state; 1531 switch (lp->lp_mux_state) { 1532 case LACP_MUX_DETACHED: 1533 if (selected != LACP_UNSELECTED) { 1534 new_state = LACP_MUX_WAITING; 1535 } 1536 break; 1537 case LACP_MUX_WAITING: 1538 KASSERT(la->la_pending > 0 || 1539 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1540 ("timer_wait_while still active")); 1541 if (selected == LACP_SELECTED && la->la_pending == 0) { 1542 new_state = LACP_MUX_ATTACHED; 1543 } else if (selected == LACP_UNSELECTED) { 1544 new_state = LACP_MUX_DETACHED; 1545 } 1546 break; 1547 case LACP_MUX_ATTACHED: 1548 if (selected == LACP_SELECTED && p_sync) { 1549 new_state = LACP_MUX_COLLECTING; 1550 } else if (selected != LACP_SELECTED) { 1551 new_state = LACP_MUX_DETACHED; 1552 } 1553 break; 1554 case LACP_MUX_COLLECTING: 1555 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1556 new_state = LACP_MUX_DISTRIBUTING; 1557 } else if (selected != LACP_SELECTED || !p_sync) { 1558 new_state = LACP_MUX_ATTACHED; 1559 } 1560 break; 1561 case LACP_MUX_DISTRIBUTING: 1562 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1563 new_state = LACP_MUX_COLLECTING; 1564 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1565 sc->sc_flapping++; 1566 } 1567 break; 1568 default: 1569 panic("%s: unknown state", __func__); 1570 } 1571 1572 if (lp->lp_mux_state == new_state) { 1573 return; 1574 } 1575 1576 lacp_set_mux(lp, new_state); 1577 goto re_eval; 1578 } 1579 1580 static void 1581 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1582 { 1583 struct lacp_aggregator *la = lp->lp_aggregator; 1584 1585 if (lp->lp_mux_state == new_state) { 1586 return; 1587 } 1588 1589 switch (new_state) { 1590 case LACP_MUX_DETACHED: 1591 lp->lp_state &= ~LACP_STATE_SYNC; 1592 lacp_disable_distributing(lp); 1593 lacp_disable_collecting(lp); 1594 lacp_sm_assert_ntt(lp); 1595 /* cancel timer */ 1596 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1597 KASSERT(la->la_pending > 0, 1598 ("timer_wait_while not active")); 1599 la->la_pending--; 1600 } 1601 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1602 lacp_unselect(lp); 1603 break; 1604 case LACP_MUX_WAITING: 1605 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1606 LACP_AGGREGATE_WAIT_TIME); 1607 la->la_pending++; 1608 break; 1609 case LACP_MUX_ATTACHED: 1610 lp->lp_state |= LACP_STATE_SYNC; 1611 lacp_disable_collecting(lp); 1612 lacp_sm_assert_ntt(lp); 1613 break; 1614 case LACP_MUX_COLLECTING: 1615 lacp_enable_collecting(lp); 1616 lacp_disable_distributing(lp); 1617 lacp_sm_assert_ntt(lp); 1618 break; 1619 case LACP_MUX_DISTRIBUTING: 1620 lacp_enable_distributing(lp); 1621 break; 1622 default: 1623 panic("%s: unknown state", __func__); 1624 } 1625 1626 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1627 1628 lp->lp_mux_state = new_state; 1629 } 1630 1631 static void 1632 lacp_sm_mux_timer(struct lacp_port *lp) 1633 { 1634 struct lacp_aggregator *la = lp->lp_aggregator; 1635 char buf[LACP_LAGIDSTR_MAX+1]; 1636 1637 KASSERT(la->la_pending > 0, ("no pending event")); 1638 1639 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1640 lacp_format_lagid(&la->la_actor, &la->la_partner, 1641 buf, sizeof(buf)), 1642 la->la_pending, la->la_pending - 1)); 1643 1644 la->la_pending--; 1645 } 1646 1647 /* periodic transmit machine */ 1648 1649 static void 1650 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1651 { 1652 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1653 LACP_STATE_TIMEOUT)) { 1654 return; 1655 } 1656 1657 LACP_DPRINTF((lp, "partner timeout changed\n")); 1658 1659 /* 1660 * FAST_PERIODIC -> SLOW_PERIODIC 1661 * or 1662 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1663 * 1664 * let lacp_sm_ptx_tx_schedule to update timeout. 1665 */ 1666 1667 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1668 1669 /* 1670 * if timeout has been shortened, assert NTT. 1671 */ 1672 1673 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1674 lacp_sm_assert_ntt(lp); 1675 } 1676 } 1677 1678 static void 1679 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1680 { 1681 int timeout; 1682 1683 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1684 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1685 /* 1686 * NO_PERIODIC 1687 */ 1688 1689 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1690 return; 1691 } 1692 1693 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1694 return; 1695 } 1696 1697 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1698 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1699 1700 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1701 } 1702 1703 static void 1704 lacp_sm_ptx_timer(struct lacp_port *lp) 1705 { 1706 lacp_sm_assert_ntt(lp); 1707 } 1708 1709 static void 1710 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1711 { 1712 int timeout; 1713 1714 /* 1715 * check LACP_DISABLED first 1716 */ 1717 1718 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1719 return; 1720 } 1721 1722 /* 1723 * check loopback condition. 1724 */ 1725 1726 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1727 &lp->lp_actor.lip_systemid)) { 1728 return; 1729 } 1730 1731 /* 1732 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1733 */ 1734 1735 microuptime(&lp->lp_last_lacpdu_rx); 1736 lacp_sm_rx_update_selected(lp, du); 1737 lacp_sm_rx_update_ntt(lp, du); 1738 lacp_sm_rx_record_pdu(lp, du); 1739 1740 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1741 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1742 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1743 1744 lp->lp_state &= ~LACP_STATE_EXPIRED; 1745 1746 /* 1747 * kick transmit machine without waiting the next tick. 1748 */ 1749 1750 lacp_sm_tx(lp); 1751 } 1752 1753 static void 1754 lacp_sm_rx_set_expired(struct lacp_port *lp) 1755 { 1756 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1757 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1758 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1759 lp->lp_state |= LACP_STATE_EXPIRED; 1760 } 1761 1762 static void 1763 lacp_sm_rx_timer(struct lacp_port *lp) 1764 { 1765 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1766 /* CURRENT -> EXPIRED */ 1767 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1768 lacp_sm_rx_set_expired(lp); 1769 } else { 1770 /* EXPIRED -> DEFAULTED */ 1771 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1772 lacp_sm_rx_update_default_selected(lp); 1773 lacp_sm_rx_record_default(lp); 1774 lp->lp_state &= ~LACP_STATE_EXPIRED; 1775 } 1776 } 1777 1778 static void 1779 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1780 { 1781 boolean_t active; 1782 uint8_t oldpstate; 1783 char buf[LACP_STATESTR_MAX+1]; 1784 1785 LACP_TRACE(lp); 1786 1787 oldpstate = lp->lp_partner.lip_state; 1788 1789 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1790 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1791 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1792 1793 lp->lp_partner = du->ldu_actor; 1794 if (active && 1795 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1796 LACP_STATE_AGGREGATION) && 1797 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1798 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1799 /* 1800 * XXX Maintain legacy behavior of leaving the 1801 * LACP_STATE_SYNC bit unchanged from the partner's 1802 * advertisement if lsc_strict_mode is false. 1803 * TODO: We should re-examine the concept of the "strict mode" 1804 * to ensure it makes sense to maintain a non-strict mode. 1805 */ 1806 if (lp->lp_lsc->lsc_strict_mode) 1807 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1808 } else { 1809 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1810 } 1811 1812 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1813 1814 if (oldpstate != lp->lp_partner.lip_state) { 1815 LACP_DPRINTF((lp, "old pstate %s\n", 1816 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1817 LACP_DPRINTF((lp, "new pstate %s\n", 1818 lacp_format_state(lp->lp_partner.lip_state, buf, 1819 sizeof(buf)))); 1820 } 1821 1822 lacp_sm_ptx_update_timeout(lp, oldpstate); 1823 } 1824 1825 static void 1826 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1827 { 1828 1829 LACP_TRACE(lp); 1830 1831 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1832 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1833 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1834 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1835 lacp_sm_assert_ntt(lp); 1836 } 1837 } 1838 1839 static void 1840 lacp_sm_rx_record_default(struct lacp_port *lp) 1841 { 1842 uint8_t oldpstate; 1843 1844 LACP_TRACE(lp); 1845 1846 oldpstate = lp->lp_partner.lip_state; 1847 if (lp->lp_lsc->lsc_strict_mode) 1848 lp->lp_partner = lacp_partner_admin_strict; 1849 else 1850 lp->lp_partner = lacp_partner_admin_optimistic; 1851 lp->lp_state |= LACP_STATE_DEFAULTED; 1852 lacp_sm_ptx_update_timeout(lp, oldpstate); 1853 } 1854 1855 static void 1856 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1857 const struct lacp_peerinfo *info) 1858 { 1859 1860 LACP_TRACE(lp); 1861 1862 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1863 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1864 LACP_STATE_AGGREGATION)) { 1865 lp->lp_selected = LACP_UNSELECTED; 1866 /* mux machine will clean up lp->lp_aggregator */ 1867 } 1868 } 1869 1870 static void 1871 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1872 { 1873 1874 LACP_TRACE(lp); 1875 1876 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1877 } 1878 1879 static void 1880 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1881 { 1882 1883 LACP_TRACE(lp); 1884 1885 if (lp->lp_lsc->lsc_strict_mode) 1886 lacp_sm_rx_update_selected_from_peerinfo(lp, 1887 &lacp_partner_admin_strict); 1888 else 1889 lacp_sm_rx_update_selected_from_peerinfo(lp, 1890 &lacp_partner_admin_optimistic); 1891 } 1892 1893 /* transmit machine */ 1894 1895 static void 1896 lacp_sm_tx(struct lacp_port *lp) 1897 { 1898 int error = 0; 1899 1900 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1901 #if 1 1902 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1903 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1904 #endif 1905 ) { 1906 lp->lp_flags &= ~LACP_PORT_NTT; 1907 } 1908 1909 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1910 return; 1911 } 1912 1913 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1914 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1915 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1916 LACP_DPRINTF((lp, "rate limited pdu\n")); 1917 return; 1918 } 1919 1920 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1921 error = lacp_xmit_lacpdu(lp); 1922 } else { 1923 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1924 } 1925 1926 if (error == 0) { 1927 lp->lp_flags &= ~LACP_PORT_NTT; 1928 } else { 1929 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1930 error)); 1931 } 1932 } 1933 1934 static void 1935 lacp_sm_assert_ntt(struct lacp_port *lp) 1936 { 1937 1938 lp->lp_flags |= LACP_PORT_NTT; 1939 } 1940 1941 static void 1942 lacp_run_timers(struct lacp_port *lp) 1943 { 1944 int i; 1945 struct timeval time_diff; 1946 1947 for (i = 0; i < LACP_NTIMER; i++) { 1948 KASSERT(lp->lp_timer[i] >= 0, 1949 ("invalid timer value %d", lp->lp_timer[i])); 1950 if (lp->lp_timer[i] == 0) { 1951 continue; 1952 } else { 1953 if (i == LACP_TIMER_CURRENT_WHILE) { 1954 microuptime(&time_diff); 1955 timevalsub(&time_diff, &lp->lp_last_lacpdu_rx); 1956 if (time_diff.tv_sec) { 1957 /* At least one sec has elapsed since last LACP packet. */ 1958 --lp->lp_timer[i]; 1959 } 1960 } else { 1961 --lp->lp_timer[i]; 1962 } 1963 1964 if ((lp->lp_timer[i] <= 0) && (lacp_timer_funcs[i])) { 1965 (*lacp_timer_funcs[i])(lp); 1966 } 1967 } 1968 } 1969 } 1970 1971 int 1972 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1973 { 1974 struct lacp_softc *lsc = lp->lp_lsc; 1975 struct lagg_port *lgp = lp->lp_lagg; 1976 struct lacp_port *lp2; 1977 struct markerdu *mdu; 1978 int error = 0; 1979 int pending = 0; 1980 1981 if (m->m_pkthdr.len != sizeof(*mdu)) { 1982 goto bad; 1983 } 1984 1985 if ((m->m_flags & M_MCAST) == 0) { 1986 goto bad; 1987 } 1988 1989 if (m->m_len < sizeof(*mdu)) { 1990 m = m_pullup(m, sizeof(*mdu)); 1991 if (m == NULL) { 1992 return (ENOMEM); 1993 } 1994 } 1995 1996 mdu = mtod(m, struct markerdu *); 1997 1998 if (memcmp(&mdu->mdu_eh.ether_dhost, 1999 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 2000 goto bad; 2001 } 2002 2003 if (mdu->mdu_sph.sph_version != 1) { 2004 goto bad; 2005 } 2006 2007 switch (mdu->mdu_tlv.tlv_type) { 2008 case MARKER_TYPE_INFO: 2009 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2010 marker_info_tlv_template, TRUE)) { 2011 goto bad; 2012 } 2013 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 2014 memcpy(&mdu->mdu_eh.ether_dhost, 2015 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 2016 memcpy(&mdu->mdu_eh.ether_shost, 2017 lgp->lp_lladdr, ETHER_ADDR_LEN); 2018 error = lagg_enqueue(lp->lp_ifp, m); 2019 break; 2020 2021 case MARKER_TYPE_RESPONSE: 2022 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2023 marker_response_tlv_template, TRUE)) { 2024 goto bad; 2025 } 2026 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 2027 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 2028 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 2029 2030 /* Verify that it is the last marker we sent out */ 2031 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 2032 sizeof(struct lacp_markerinfo))) 2033 goto bad; 2034 2035 LACP_LOCK(lsc); 2036 lp->lp_flags &= ~LACP_PORT_MARK; 2037 2038 if (lsc->lsc_suppress_distributing) { 2039 /* Check if any ports are waiting for a response */ 2040 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 2041 if (lp2->lp_flags & LACP_PORT_MARK) { 2042 pending = 1; 2043 break; 2044 } 2045 } 2046 2047 if (pending == 0) { 2048 /* All interface queues are clear */ 2049 LACP_DPRINTF((NULL, "queue flush complete\n")); 2050 lsc->lsc_suppress_distributing = FALSE; 2051 } 2052 } 2053 LACP_UNLOCK(lsc); 2054 m_freem(m); 2055 break; 2056 2057 default: 2058 goto bad; 2059 } 2060 2061 return (error); 2062 2063 bad: 2064 LACP_DPRINTF((lp, "bad marker frame\n")); 2065 m_freem(m); 2066 return (EINVAL); 2067 } 2068 2069 static int 2070 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 2071 const struct tlv_template *tmpl, boolean_t check_type) 2072 { 2073 while (/* CONSTCOND */ 1) { 2074 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 2075 return (EINVAL); 2076 } 2077 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 2078 tlv->tlv_length != tmpl->tmpl_length) { 2079 return (EINVAL); 2080 } 2081 if (tmpl->tmpl_type == 0) { 2082 break; 2083 } 2084 tlv = (const struct tlvhdr *) 2085 ((const char *)tlv + tlv->tlv_length); 2086 tmpl++; 2087 } 2088 2089 return (0); 2090 } 2091 2092 /* Debugging */ 2093 const char * 2094 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 2095 { 2096 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 2097 (int)mac[0], 2098 (int)mac[1], 2099 (int)mac[2], 2100 (int)mac[3], 2101 (int)mac[4], 2102 (int)mac[5]); 2103 2104 return (buf); 2105 } 2106 2107 const char * 2108 lacp_format_systemid(const struct lacp_systemid *sysid, 2109 char *buf, size_t buflen) 2110 { 2111 char macbuf[LACP_MACSTR_MAX+1]; 2112 2113 snprintf(buf, buflen, "%04X,%s", 2114 ntohs(sysid->lsi_prio), 2115 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 2116 2117 return (buf); 2118 } 2119 2120 const char * 2121 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 2122 { 2123 snprintf(buf, buflen, "%04X,%04X", 2124 ntohs(portid->lpi_prio), 2125 ntohs(portid->lpi_portno)); 2126 2127 return (buf); 2128 } 2129 2130 const char * 2131 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 2132 { 2133 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 2134 char portid[LACP_PORTIDSTR_MAX+1]; 2135 2136 snprintf(buf, buflen, "(%s,%04X,%s)", 2137 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 2138 ntohs(peer->lip_key), 2139 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2140 2141 return (buf); 2142 } 2143 2144 const char * 2145 lacp_format_lagid(const struct lacp_peerinfo *a, 2146 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2147 { 2148 char astr[LACP_PARTNERSTR_MAX+1]; 2149 char bstr[LACP_PARTNERSTR_MAX+1]; 2150 2151 #if 0 2152 /* 2153 * there's a convention to display small numbered peer 2154 * in the left. 2155 */ 2156 2157 if (lacp_compare_peerinfo(a, b) > 0) { 2158 const struct lacp_peerinfo *t; 2159 2160 t = a; 2161 a = b; 2162 b = t; 2163 } 2164 #endif 2165 2166 snprintf(buf, buflen, "[%s,%s]", 2167 lacp_format_partner(a, astr, sizeof(astr)), 2168 lacp_format_partner(b, bstr, sizeof(bstr))); 2169 2170 return (buf); 2171 } 2172 2173 const char * 2174 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2175 char *buf, size_t buflen) 2176 { 2177 if (la == NULL) { 2178 return ("(none)"); 2179 } 2180 2181 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2182 } 2183 2184 const char * 2185 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2186 { 2187 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2188 return (buf); 2189 } 2190 2191 static void 2192 lacp_dump_lacpdu(const struct lacpdu *du) 2193 { 2194 char buf[LACP_PARTNERSTR_MAX+1]; 2195 char buf2[LACP_STATESTR_MAX+1]; 2196 2197 printf("actor=%s\n", 2198 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2199 printf("actor.state=%s\n", 2200 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2201 printf("partner=%s\n", 2202 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2203 printf("partner.state=%s\n", 2204 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2205 2206 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2207 } 2208 2209 static void 2210 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2211 { 2212 va_list va; 2213 2214 if (lp) { 2215 printf("%s: ", lp->lp_ifp->if_xname); 2216 } 2217 2218 va_start(va, fmt); 2219 vprintf(fmt, va); 2220 va_end(va); 2221 } 2222