1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c)2005 YAMAMOTO Takashi, 7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #include "opt_kern_tls.h" 34 #include "opt_ratelimit.h" 35 36 #include <sys/param.h> 37 #include <sys/callout.h> 38 #include <sys/eventhandler.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> /* hz */ 43 #include <sys/socket.h> /* for net/if.h */ 44 #include <sys/sockio.h> 45 #include <sys/stdarg.h> 46 #include <sys/sysctl.h> 47 #include <sys/lock.h> 48 #include <sys/rwlock.h> 49 #include <sys/taskqueue.h> 50 #include <sys/time.h> 51 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/if_private.h> 55 #include <net/if_dl.h> 56 #include <net/ethernet.h> 57 #include <net/infiniband.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 61 #include <net/if_lagg.h> 62 #include <net/ieee8023ad_lacp.h> 63 64 /* 65 * actor system priority and port priority. 66 * XXX should be configurable. 67 */ 68 69 #define LACP_SYSTEM_PRIO 0x8000 70 #define LACP_PORT_PRIO 0x8000 71 72 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 73 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 74 75 static const struct tlv_template lacp_info_tlv_template[] = { 76 { LACP_TYPE_ACTORINFO, 77 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 78 { LACP_TYPE_PARTNERINFO, 79 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 80 { LACP_TYPE_COLLECTORINFO, 81 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 82 { 0, 0 }, 83 }; 84 85 static const struct tlv_template marker_info_tlv_template[] = { 86 { MARKER_TYPE_INFO, 87 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 88 { 0, 0 }, 89 }; 90 91 static const struct tlv_template marker_response_tlv_template[] = { 92 { MARKER_TYPE_RESPONSE, 93 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 94 { 0, 0 }, 95 }; 96 97 typedef void (*lacp_timer_func_t)(struct lacp_port *); 98 99 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 100 static void lacp_fill_markerinfo(struct lacp_port *, 101 struct lacp_markerinfo *); 102 103 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 104 static void lacp_suppress_distributing(struct lacp_softc *, 105 struct lacp_aggregator *); 106 static void lacp_transit_expire(void *); 107 static void lacp_update_portmap(struct lacp_softc *); 108 static void lacp_select_active_aggregator(struct lacp_softc *); 109 static uint16_t lacp_compose_key(struct lacp_port *); 110 static int tlv_check(const void *, size_t, const struct tlvhdr *, 111 const struct tlv_template *, boolean_t); 112 static void lacp_tick(void *); 113 114 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 115 const struct lacp_port *); 116 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 117 const struct lacp_peerinfo *); 118 static bool lacp_aggregator_is_compatible(const struct lacp_aggregator *, 119 const struct lacp_port *); 120 static bool lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 121 const struct lacp_peerinfo *); 122 123 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 124 struct lacp_port *); 125 static void lacp_aggregator_addref(struct lacp_softc *, 126 struct lacp_aggregator *); 127 static void lacp_aggregator_delref(struct lacp_softc *, 128 struct lacp_aggregator *); 129 130 /* receive machine */ 131 132 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 133 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 134 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 135 static void lacp_sm_rx_timer(struct lacp_port *); 136 static void lacp_sm_rx_set_expired(struct lacp_port *); 137 static void lacp_sm_rx_update_ntt(struct lacp_port *, 138 const struct lacpdu *); 139 static void lacp_sm_rx_record_pdu(struct lacp_port *, 140 const struct lacpdu *); 141 static void lacp_sm_rx_update_selected(struct lacp_port *, 142 const struct lacpdu *); 143 static void lacp_sm_rx_record_default(struct lacp_port *); 144 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 145 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 146 const struct lacp_peerinfo *); 147 148 /* mux machine */ 149 150 static void lacp_sm_mux(struct lacp_port *); 151 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 152 static void lacp_sm_mux_timer(struct lacp_port *); 153 154 /* periodic transmit machine */ 155 156 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 157 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 158 static void lacp_sm_ptx_timer(struct lacp_port *); 159 160 /* transmit machine */ 161 162 static void lacp_sm_tx(struct lacp_port *); 163 static void lacp_sm_assert_ntt(struct lacp_port *); 164 165 static void lacp_run_timers(struct lacp_port *); 166 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 167 const struct lacp_peerinfo *); 168 static int lacp_compare_systemid(const struct lacp_systemid *, 169 const struct lacp_systemid *); 170 static void lacp_port_enable(struct lacp_port *); 171 static void lacp_port_disable(struct lacp_port *); 172 static void lacp_select(struct lacp_port *); 173 static void lacp_unselect(struct lacp_port *); 174 static void lacp_disable_collecting(struct lacp_port *); 175 static void lacp_enable_collecting(struct lacp_port *); 176 static void lacp_disable_distributing(struct lacp_port *); 177 static void lacp_enable_distributing(struct lacp_port *); 178 static int lacp_xmit_lacpdu(struct lacp_port *); 179 static int lacp_xmit_marker(struct lacp_port *); 180 181 /* Debugging */ 182 183 static void lacp_dump_lacpdu(const struct lacpdu *); 184 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 185 size_t); 186 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 187 const struct lacp_peerinfo *, char *, size_t); 188 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 189 char *, size_t); 190 static const char *lacp_format_state(uint8_t, char *, size_t); 191 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 192 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 193 size_t); 194 static const char *lacp_format_portid(const struct lacp_portid *, char *, 195 size_t); 196 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 197 __attribute__((__format__(__printf__, 2, 3))); 198 199 VNET_DEFINE_STATIC(int, lacp_debug); 200 #define V_lacp_debug VNET(lacp_debug) 201 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 202 "ieee802.3ad"); 203 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, 204 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); 205 206 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1; 207 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, 208 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0, 209 "LACP strict protocol compliance default"); 210 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } 211 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 212 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } 213 214 /* 215 * partner administration variables. 216 * XXX should be configurable. 217 */ 218 219 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 220 .lip_systemid = { .lsi_prio = 0xffff }, 221 .lip_portid = { .lpi_prio = 0xffff }, 222 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 223 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 224 }; 225 226 static const struct lacp_peerinfo lacp_partner_admin_strict = { 227 .lip_systemid = { .lsi_prio = 0xffff }, 228 .lip_portid = { .lpi_prio = 0xffff }, 229 .lip_state = 0, 230 }; 231 232 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 233 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 234 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 235 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 236 }; 237 238 struct mbuf * 239 lacp_input(struct lagg_port *lgp, struct mbuf *m) 240 { 241 struct lacp_port *lp = LACP_PORT(lgp); 242 uint8_t subtype; 243 244 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 245 m_freem(m); 246 return (NULL); 247 } 248 249 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 250 switch (subtype) { 251 case SLOWPROTOCOLS_SUBTYPE_LACP: 252 lacp_pdu_input(lp, m); 253 return (NULL); 254 255 case SLOWPROTOCOLS_SUBTYPE_MARKER: 256 lacp_marker_input(lp, m); 257 return (NULL); 258 } 259 260 /* Not a subtype we are interested in */ 261 return (m); 262 } 263 264 /* 265 * lacp_pdu_input: process lacpdu 266 */ 267 static int 268 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 269 { 270 struct lacp_softc *lsc = lp->lp_lsc; 271 struct lacpdu *du; 272 int error = 0; 273 274 if (m->m_pkthdr.len != sizeof(*du)) { 275 goto bad; 276 } 277 278 if ((m->m_flags & M_MCAST) == 0) { 279 goto bad; 280 } 281 282 if (m->m_len < sizeof(*du)) { 283 m = m_pullup(m, sizeof(*du)); 284 if (m == NULL) { 285 return (ENOMEM); 286 } 287 } 288 289 du = mtod(m, struct lacpdu *); 290 291 if (memcmp(&du->ldu_eh.ether_dhost, 292 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 293 goto bad; 294 } 295 296 /* 297 * ignore the version for compatibility with 298 * the future protocol revisions. 299 */ 300 #if 0 301 if (du->ldu_sph.sph_version != 1) { 302 goto bad; 303 } 304 #endif 305 306 /* 307 * ignore tlv types for compatibility with 308 * the future protocol revisions. 309 */ 310 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 311 lacp_info_tlv_template, FALSE)) { 312 goto bad; 313 } 314 315 if (V_lacp_debug > 0) { 316 lacp_dprintf(lp, "lacpdu receive\n"); 317 lacp_dump_lacpdu(du); 318 } 319 320 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 321 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 322 goto bad; 323 } 324 325 LACP_LOCK(lsc); 326 lacp_sm_rx(lp, du); 327 LACP_UNLOCK(lsc); 328 329 m_freem(m); 330 return (error); 331 332 bad: 333 m_freem(m); 334 return (EINVAL); 335 } 336 337 static void 338 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 339 { 340 struct lagg_port *lgp = lp->lp_lagg; 341 struct lagg_softc *sc = lgp->lp_softc; 342 343 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 344 memcpy(&info->lip_systemid.lsi_mac, 345 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 346 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 347 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 348 info->lip_state = lp->lp_state; 349 } 350 351 static void 352 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 353 { 354 struct ifnet *ifp = lp->lp_ifp; 355 356 /* Fill in the port index and system id (encoded as the MAC) */ 357 info->mi_rq_port = htons(ifp->if_index); 358 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 359 info->mi_rq_xid = htonl(0); 360 } 361 362 static int 363 lacp_xmit_lacpdu(struct lacp_port *lp) 364 { 365 struct lagg_port *lgp = lp->lp_lagg; 366 struct mbuf *m; 367 struct lacpdu *du; 368 int error; 369 370 LACP_LOCK_ASSERT(lp->lp_lsc); 371 372 m = m_gethdr(M_NOWAIT, MT_DATA); 373 if (m == NULL) { 374 return (ENOMEM); 375 } 376 m->m_len = m->m_pkthdr.len = sizeof(*du); 377 378 du = mtod(m, struct lacpdu *); 379 memset(du, 0, sizeof(*du)); 380 381 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 382 ETHER_ADDR_LEN); 383 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 384 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 385 386 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 387 du->ldu_sph.sph_version = 1; 388 389 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 390 du->ldu_actor = lp->lp_actor; 391 392 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 393 sizeof(du->ldu_partner)); 394 du->ldu_partner = lp->lp_partner; 395 396 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 397 sizeof(du->ldu_collector)); 398 du->ldu_collector.lci_maxdelay = 0; 399 400 if (V_lacp_debug > 0) { 401 lacp_dprintf(lp, "lacpdu transmit\n"); 402 lacp_dump_lacpdu(du); 403 } 404 405 m->m_flags |= M_MCAST; 406 407 /* 408 * XXX should use higher priority queue. 409 * otherwise network congestion can break aggregation. 410 */ 411 412 error = lagg_enqueue(lp->lp_ifp, m); 413 return (error); 414 } 415 416 static int 417 lacp_xmit_marker(struct lacp_port *lp) 418 { 419 struct lagg_port *lgp = lp->lp_lagg; 420 struct mbuf *m; 421 struct markerdu *mdu; 422 int error; 423 424 LACP_LOCK_ASSERT(lp->lp_lsc); 425 426 m = m_gethdr(M_NOWAIT, MT_DATA); 427 if (m == NULL) { 428 return (ENOMEM); 429 } 430 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 431 432 mdu = mtod(m, struct markerdu *); 433 memset(mdu, 0, sizeof(*mdu)); 434 435 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 436 ETHER_ADDR_LEN); 437 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 438 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 439 440 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 441 mdu->mdu_sph.sph_version = 1; 442 443 /* Bump the transaction id and copy over the marker info */ 444 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 445 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 446 mdu->mdu_info = lp->lp_marker; 447 448 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 449 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 450 ntohl(mdu->mdu_info.mi_rq_xid))); 451 452 m->m_flags |= M_MCAST; 453 error = lagg_enqueue(lp->lp_ifp, m); 454 return (error); 455 } 456 457 void 458 lacp_linkstate(struct lagg_port *lgp) 459 { 460 struct lacp_port *lp = LACP_PORT(lgp); 461 struct lacp_softc *lsc = lp->lp_lsc; 462 struct ifnet *ifp = lgp->lp_ifp; 463 struct ifmediareq ifmr; 464 int error = 0; 465 u_int media; 466 uint8_t old_state; 467 uint16_t old_key; 468 469 bzero((char *)&ifmr, sizeof(ifmr)); 470 error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr); 471 if (error != 0) { 472 bzero((char *)&ifmr, sizeof(ifmr)); 473 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 474 } 475 if (error != 0) 476 return; 477 478 LACP_LOCK(lsc); 479 media = ifmr.ifm_active; 480 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 481 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 482 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 483 old_state = lp->lp_state; 484 old_key = lp->lp_key; 485 486 lp->lp_media = media; 487 /* 488 * If the port is not an active full duplex Ethernet link then it can 489 * not be aggregated. 490 */ 491 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 492 ifp->if_link_state != LINK_STATE_UP) { 493 lacp_port_disable(lp); 494 } else { 495 lacp_port_enable(lp); 496 } 497 lp->lp_key = lacp_compose_key(lp); 498 499 if (old_state != lp->lp_state || old_key != lp->lp_key) { 500 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 501 lp->lp_selected = LACP_UNSELECTED; 502 } 503 LACP_UNLOCK(lsc); 504 } 505 506 static void 507 lacp_tick(void *arg) 508 { 509 struct lacp_softc *lsc = arg; 510 struct lacp_port *lp; 511 512 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 513 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 514 continue; 515 516 CURVNET_SET(lp->lp_ifp->if_vnet); 517 lacp_run_timers(lp); 518 519 lacp_select(lp); 520 lacp_sm_mux(lp); 521 lacp_sm_tx(lp); 522 lacp_sm_ptx_tx_schedule(lp); 523 CURVNET_RESTORE(); 524 } 525 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 526 } 527 528 int 529 lacp_port_create(struct lagg_port *lgp) 530 { 531 struct lagg_softc *sc = lgp->lp_softc; 532 struct lacp_softc *lsc = LACP_SOFTC(sc); 533 struct lacp_port *lp; 534 struct ifnet *ifp = lgp->lp_ifp; 535 struct sockaddr_dl sdl; 536 struct ifmultiaddr *rifma = NULL; 537 int error; 538 539 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 540 sdl.sdl_alen = ETHER_ADDR_LEN; 541 542 bcopy(ðermulticastaddr_slowprotocols, 543 LLADDR(&sdl), ETHER_ADDR_LEN); 544 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 545 if (error) { 546 printf("%s: ADDMULTI failed on %s\n", __func__, 547 lgp->lp_ifp->if_xname); 548 return (error); 549 } 550 551 lp = malloc(sizeof(struct lacp_port), 552 M_DEVBUF, M_NOWAIT|M_ZERO); 553 if (lp == NULL) 554 return (ENOMEM); 555 556 LACP_LOCK(lsc); 557 lgp->lp_psc = lp; 558 lp->lp_ifp = ifp; 559 lp->lp_lagg = lgp; 560 lp->lp_lsc = lsc; 561 lp->lp_ifma = rifma; 562 563 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 564 565 lacp_fill_actorinfo(lp, &lp->lp_actor); 566 lacp_fill_markerinfo(lp, &lp->lp_marker); 567 lp->lp_state = LACP_STATE_ACTIVITY; 568 lp->lp_aggregator = NULL; 569 lacp_sm_rx_set_expired(lp); 570 LACP_UNLOCK(lsc); 571 lacp_linkstate(lgp); 572 573 return (0); 574 } 575 576 void 577 lacp_port_destroy(struct lagg_port *lgp) 578 { 579 struct lacp_port *lp = LACP_PORT(lgp); 580 struct lacp_softc *lsc = lp->lp_lsc; 581 int i; 582 583 LACP_LOCK(lsc); 584 for (i = 0; i < LACP_NTIMER; i++) { 585 LACP_TIMER_DISARM(lp, i); 586 } 587 588 lacp_disable_collecting(lp); 589 lacp_disable_distributing(lp); 590 lacp_unselect(lp); 591 592 LIST_REMOVE(lp, lp_next); 593 LACP_UNLOCK(lsc); 594 595 /* The address may have already been removed by if_purgemaddrs() */ 596 if (!lgp->lp_detaching) 597 if_delmulti_ifma(lp->lp_ifma); 598 599 free(lp, M_DEVBUF); 600 } 601 602 void 603 lacp_req(struct lagg_softc *sc, void *data) 604 { 605 struct lacp_opreq *req = (struct lacp_opreq *)data; 606 struct lacp_softc *lsc = LACP_SOFTC(sc); 607 struct lacp_aggregator *la; 608 609 bzero(req, sizeof(struct lacp_opreq)); 610 611 /* 612 * If the LACP softc is NULL, return with the opreq structure full of 613 * zeros. It is normal for the softc to be NULL while the lagg is 614 * being destroyed. 615 */ 616 if (NULL == lsc) 617 return; 618 619 la = lsc->lsc_active_aggregator; 620 LACP_LOCK(lsc); 621 if (la != NULL) { 622 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 623 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 624 ETHER_ADDR_LEN); 625 req->actor_key = ntohs(la->la_actor.lip_key); 626 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 627 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 628 req->actor_state = la->la_actor.lip_state; 629 630 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 631 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 632 ETHER_ADDR_LEN); 633 req->partner_key = ntohs(la->la_partner.lip_key); 634 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 635 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 636 req->partner_state = la->la_partner.lip_state; 637 } 638 LACP_UNLOCK(lsc); 639 } 640 641 void 642 lacp_portreq(struct lagg_port *lgp, void *data) 643 { 644 struct lacp_opreq *req = (struct lacp_opreq *)data; 645 struct lacp_port *lp = LACP_PORT(lgp); 646 struct lacp_softc *lsc = lp->lp_lsc; 647 648 LACP_LOCK(lsc); 649 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 650 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 651 ETHER_ADDR_LEN); 652 req->actor_key = ntohs(lp->lp_actor.lip_key); 653 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 654 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 655 req->actor_state = lp->lp_actor.lip_state; 656 657 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 658 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 659 ETHER_ADDR_LEN); 660 req->partner_key = ntohs(lp->lp_partner.lip_key); 661 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 662 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 663 req->partner_state = lp->lp_partner.lip_state; 664 LACP_UNLOCK(lsc); 665 } 666 667 static void 668 lacp_disable_collecting(struct lacp_port *lp) 669 { 670 LACP_DPRINTF((lp, "collecting disabled\n")); 671 lp->lp_state &= ~LACP_STATE_COLLECTING; 672 } 673 674 static void 675 lacp_enable_collecting(struct lacp_port *lp) 676 { 677 LACP_DPRINTF((lp, "collecting enabled\n")); 678 lp->lp_state |= LACP_STATE_COLLECTING; 679 } 680 681 static void 682 lacp_disable_distributing(struct lacp_port *lp) 683 { 684 struct lacp_aggregator *la = lp->lp_aggregator; 685 struct lacp_softc *lsc = lp->lp_lsc; 686 struct lagg_softc *sc = lsc->lsc_softc; 687 char buf[LACP_LAGIDSTR_MAX+1]; 688 689 LACP_LOCK_ASSERT(lsc); 690 691 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 692 return; 693 } 694 695 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 696 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 697 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 698 699 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 700 "nports %d -> %d\n", 701 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 702 la->la_nports, la->la_nports - 1)); 703 704 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 705 la->la_nports--; 706 sc->sc_active = la->la_nports; 707 708 if (lsc->lsc_active_aggregator == la) { 709 lacp_suppress_distributing(lsc, la); 710 lacp_select_active_aggregator(lsc); 711 /* regenerate the port map, the active aggregator has changed */ 712 lacp_update_portmap(lsc); 713 } 714 715 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 716 if_link_state_change(sc->sc_ifp, 717 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 718 } 719 720 static void 721 lacp_enable_distributing(struct lacp_port *lp) 722 { 723 struct lacp_aggregator *la = lp->lp_aggregator; 724 struct lacp_softc *lsc = lp->lp_lsc; 725 struct lagg_softc *sc = lsc->lsc_softc; 726 char buf[LACP_LAGIDSTR_MAX+1]; 727 728 LACP_LOCK_ASSERT(lsc); 729 730 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 731 return; 732 } 733 734 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 735 "nports %d -> %d\n", 736 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 737 la->la_nports, la->la_nports + 1)); 738 739 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 740 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 741 la->la_nports++; 742 sc->sc_active = la->la_nports; 743 744 lp->lp_state |= LACP_STATE_DISTRIBUTING; 745 746 if (lsc->lsc_active_aggregator == la) { 747 lacp_suppress_distributing(lsc, la); 748 lacp_update_portmap(lsc); 749 } else 750 /* try to become the active aggregator */ 751 lacp_select_active_aggregator(lsc); 752 753 if_link_state_change(sc->sc_ifp, 754 sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN); 755 } 756 757 static void 758 lacp_transit_expire(void *vp) 759 { 760 struct lacp_softc *lsc = vp; 761 762 LACP_LOCK_ASSERT(lsc); 763 764 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); 765 LACP_TRACE(NULL); 766 CURVNET_RESTORE(); 767 768 lsc->lsc_suppress_distributing = FALSE; 769 } 770 771 void 772 lacp_attach(struct lagg_softc *sc) 773 { 774 struct lacp_softc *lsc; 775 776 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); 777 778 sc->sc_psc = lsc; 779 lsc->lsc_softc = sc; 780 781 lsc->lsc_hashkey = m_ether_tcpip_hash_init(); 782 lsc->lsc_active_aggregator = NULL; 783 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); 784 LACP_LOCK_INIT(lsc); 785 TAILQ_INIT(&lsc->lsc_aggregators); 786 LIST_INIT(&lsc->lsc_ports); 787 788 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 789 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 790 791 /* if the lagg is already up then do the same */ 792 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 793 lacp_init(sc); 794 } 795 796 void 797 lacp_detach(void *psc) 798 { 799 struct lacp_softc *lsc = (struct lacp_softc *)psc; 800 801 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 802 ("aggregators still active")); 803 KASSERT(lsc->lsc_active_aggregator == NULL, 804 ("aggregator still attached")); 805 806 callout_drain(&lsc->lsc_transit_callout); 807 callout_drain(&lsc->lsc_callout); 808 809 LACP_LOCK_DESTROY(lsc); 810 free(lsc, M_DEVBUF); 811 } 812 813 void 814 lacp_init(struct lagg_softc *sc) 815 { 816 struct lacp_softc *lsc = LACP_SOFTC(sc); 817 818 LACP_LOCK(lsc); 819 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 820 LACP_UNLOCK(lsc); 821 } 822 823 void 824 lacp_stop(struct lagg_softc *sc) 825 { 826 struct lacp_softc *lsc = LACP_SOFTC(sc); 827 828 LACP_LOCK(lsc); 829 callout_stop(&lsc->lsc_transit_callout); 830 callout_stop(&lsc->lsc_callout); 831 LACP_UNLOCK(lsc); 832 } 833 834 struct lagg_port * 835 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t hash, 836 uint8_t numa_domain, int *err) 837 { 838 struct lacp_softc *lsc = LACP_SOFTC(sc); 839 struct lacp_portmap *pm; 840 struct lacp_port *lp; 841 struct lacp_port **map; 842 int count; 843 844 if (__predict_false(lsc->lsc_suppress_distributing)) { 845 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 846 *err = ENOBUFS; 847 return (NULL); 848 } 849 850 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 851 if (pm->pm_count == 0) { 852 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 853 *err = ENETDOWN; 854 return (NULL); 855 } 856 857 #ifdef NUMA 858 if ((sc->sc_opts & LAGG_OPT_USE_NUMA) && 859 pm->pm_num_dom > 1 && numa_domain < MAXMEMDOM) { 860 count = pm->pm_numa[numa_domain].count; 861 if (count > 0) { 862 map = pm->pm_numa[numa_domain].map; 863 } else { 864 /* No ports on this domain; use global hash. */ 865 map = pm->pm_map; 866 count = pm->pm_count; 867 } 868 } else 869 #endif 870 { 871 map = pm->pm_map; 872 count = pm->pm_count; 873 } 874 875 hash %= count; 876 lp = map[hash]; 877 878 return (lp->lp_lagg); 879 } 880 881 struct lagg_port * 882 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m, int *err) 883 { 884 struct lacp_softc *lsc = LACP_SOFTC(sc); 885 uint32_t hash; 886 uint8_t numa_domain; 887 888 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 889 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 890 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 891 else 892 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); 893 894 numa_domain = m->m_pkthdr.numa_domain; 895 return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, err)); 896 } 897 898 /* 899 * lacp_suppress_distributing: drop transmit packets for a while 900 * to preserve packet ordering. 901 */ 902 903 static void 904 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 905 { 906 struct lacp_port *lp; 907 908 if (lsc->lsc_active_aggregator != la) { 909 return; 910 } 911 912 LACP_TRACE(NULL); 913 914 lsc->lsc_suppress_distributing = TRUE; 915 916 /* send a marker frame down each port to verify the queues are empty */ 917 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 918 lp->lp_flags |= LACP_PORT_MARK; 919 if (lacp_xmit_marker(lp) != 0) 920 lp->lp_flags &= ~LACP_PORT_MARK; 921 } 922 923 /* set a timeout for the marker frames */ 924 callout_reset(&lsc->lsc_transit_callout, 925 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 926 } 927 928 static int 929 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 930 const struct lacp_peerinfo *b) 931 { 932 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 933 } 934 935 static int 936 lacp_compare_systemid(const struct lacp_systemid *a, 937 const struct lacp_systemid *b) 938 { 939 return (memcmp(a, b, sizeof(*a))); 940 } 941 942 #if 0 /* unused */ 943 static int 944 lacp_compare_portid(const struct lacp_portid *a, 945 const struct lacp_portid *b) 946 { 947 return (memcmp(a, b, sizeof(*a))); 948 } 949 #endif 950 951 static uint64_t 952 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 953 { 954 struct lacp_port *lp; 955 uint64_t speed; 956 957 lp = TAILQ_FIRST(&la->la_ports); 958 if (lp == NULL) { 959 return (0); 960 } 961 962 speed = ifmedia_baudrate(lp->lp_media); 963 speed *= la->la_nports; 964 if (speed == 0) { 965 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 966 lp->lp_media, la->la_nports)); 967 } 968 969 return (speed); 970 } 971 972 /* 973 * lacp_select_active_aggregator: select an aggregator to be used to transmit 974 * packets from lagg(4) interface. 975 */ 976 977 static void 978 lacp_select_active_aggregator(struct lacp_softc *lsc) 979 { 980 struct lacp_aggregator *la; 981 struct lacp_aggregator *best_la = NULL; 982 uint64_t best_speed = 0; 983 char buf[LACP_LAGIDSTR_MAX+1]; 984 985 LACP_TRACE(NULL); 986 987 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 988 uint64_t speed; 989 990 if (la->la_nports == 0) { 991 continue; 992 } 993 994 speed = lacp_aggregator_bandwidth(la); 995 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 996 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 997 speed, la->la_nports)); 998 999 /* 1000 * This aggregator is chosen if the partner has a better 1001 * system priority or, the total aggregated speed is higher 1002 * or, it is already the chosen aggregator 1003 */ 1004 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 1005 LACP_SYS_PRI(best_la->la_partner)) || 1006 speed > best_speed || 1007 (speed == best_speed && 1008 la == lsc->lsc_active_aggregator)) { 1009 best_la = la; 1010 best_speed = speed; 1011 } 1012 } 1013 1014 KASSERT(best_la == NULL || best_la->la_nports > 0, 1015 ("invalid aggregator refcnt")); 1016 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1017 ("invalid aggregator list")); 1018 1019 if (lsc->lsc_active_aggregator != best_la) { 1020 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1021 LACP_DPRINTF((NULL, "old %s\n", 1022 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1023 buf, sizeof(buf)))); 1024 } else { 1025 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1026 } 1027 LACP_DPRINTF((NULL, "new %s\n", 1028 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1029 1030 if (lsc->lsc_active_aggregator != best_la) { 1031 lsc->lsc_active_aggregator = best_la; 1032 lacp_update_portmap(lsc); 1033 if (best_la) { 1034 lacp_suppress_distributing(lsc, best_la); 1035 } 1036 } 1037 } 1038 1039 /* 1040 * Updated the inactive portmap array with the new list of ports and 1041 * make it live. 1042 */ 1043 static void 1044 lacp_update_portmap(struct lacp_softc *lsc) 1045 { 1046 struct lagg_softc *sc = lsc->lsc_softc; 1047 struct lacp_aggregator *la; 1048 struct lacp_portmap *p; 1049 struct lacp_port *lp; 1050 uint64_t speed; 1051 u_int newmap; 1052 int i; 1053 #ifdef NUMA 1054 int count; 1055 uint8_t domain; 1056 #endif 1057 1058 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1059 p = &lsc->lsc_pmap[newmap]; 1060 la = lsc->lsc_active_aggregator; 1061 speed = 0; 1062 bzero(p, sizeof(struct lacp_portmap)); 1063 1064 if (la != NULL && la->la_nports > 0) { 1065 p->pm_count = la->la_nports; 1066 i = 0; 1067 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) { 1068 p->pm_map[i++] = lp; 1069 #ifdef NUMA 1070 domain = lp->lp_ifp->if_numa_domain; 1071 if (domain >= MAXMEMDOM) 1072 continue; 1073 count = p->pm_numa[domain].count; 1074 p->pm_numa[domain].map[count] = lp; 1075 p->pm_numa[domain].count++; 1076 #endif 1077 } 1078 KASSERT(i == p->pm_count, ("Invalid port count")); 1079 1080 #ifdef NUMA 1081 for (i = 0; i < MAXMEMDOM; i++) { 1082 if (p->pm_numa[i].count != 0) 1083 p->pm_num_dom++; 1084 } 1085 #endif 1086 speed = lacp_aggregator_bandwidth(la); 1087 } 1088 sc->sc_ifp->if_baudrate = speed; 1089 EVENTHANDLER_INVOKE(ifnet_event, sc->sc_ifp, 1090 IFNET_EVENT_UPDATE_BAUDRATE); 1091 1092 /* switch the active portmap over */ 1093 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1094 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1095 lsc->lsc_activemap, 1096 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1097 } 1098 1099 static uint16_t 1100 lacp_compose_key(struct lacp_port *lp) 1101 { 1102 struct lagg_port *lgp = lp->lp_lagg; 1103 struct lagg_softc *sc = lgp->lp_softc; 1104 u_int media = lp->lp_media; 1105 uint16_t key; 1106 1107 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1108 /* 1109 * non-aggregatable links should have unique keys. 1110 * 1111 * XXX this isn't really unique as if_index is 16 bit. 1112 */ 1113 1114 /* bit 0..14: (some bits of) if_index of this port */ 1115 key = lp->lp_ifp->if_index; 1116 /* bit 15: 1 */ 1117 key |= 0x8000; 1118 } else { 1119 u_int subtype = IFM_SUBTYPE(media); 1120 1121 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1122 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1123 1124 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1125 switch (subtype) { 1126 case IFM_10_T: 1127 case IFM_10_2: 1128 case IFM_10_5: 1129 case IFM_10_STP: 1130 case IFM_10_FL: 1131 key = IFM_10_T; 1132 break; 1133 case IFM_100_TX: 1134 case IFM_100_FX: 1135 case IFM_100_T4: 1136 case IFM_100_VG: 1137 case IFM_100_T2: 1138 case IFM_100_T: 1139 case IFM_100_SGMII: 1140 case IFM_100_BX: 1141 key = IFM_100_TX; 1142 break; 1143 case IFM_1000_SX: 1144 case IFM_1000_LX: 1145 case IFM_1000_CX: 1146 case IFM_1000_T: 1147 case IFM_1000_KX: 1148 case IFM_1000_SGMII: 1149 case IFM_1000_CX_SGMII: 1150 case IFM_1000_BX: 1151 key = IFM_1000_SX; 1152 break; 1153 case IFM_10G_LR: 1154 case IFM_10G_SR: 1155 case IFM_10G_CX4: 1156 case IFM_10G_TWINAX: 1157 case IFM_10G_TWINAX_LONG: 1158 case IFM_10G_LRM: 1159 case IFM_10G_T: 1160 case IFM_10G_KX4: 1161 case IFM_10G_KR: 1162 case IFM_10G_CR1: 1163 case IFM_10G_ER: 1164 case IFM_10G_SFI: 1165 case IFM_10G_AOC: 1166 key = IFM_10G_LR; 1167 break; 1168 case IFM_20G_KR2: 1169 key = IFM_20G_KR2; 1170 break; 1171 case IFM_2500_KX: 1172 case IFM_2500_T: 1173 case IFM_2500_X: 1174 key = IFM_2500_KX; 1175 break; 1176 case IFM_5000_T: 1177 case IFM_5000_KR: 1178 case IFM_5000_KR_S: 1179 case IFM_5000_KR1: 1180 key = IFM_5000_T; 1181 break; 1182 case IFM_50G_PCIE: 1183 case IFM_50G_CR2: 1184 case IFM_50G_KR2: 1185 case IFM_50G_KR4: 1186 case IFM_50G_SR2: 1187 case IFM_50G_LR2: 1188 case IFM_50G_LAUI2_AC: 1189 case IFM_50G_LAUI2: 1190 case IFM_50G_AUI2_AC: 1191 case IFM_50G_AUI2: 1192 case IFM_50G_CP: 1193 case IFM_50G_SR: 1194 case IFM_50G_LR: 1195 case IFM_50G_FR: 1196 case IFM_50G_KR_PAM4: 1197 case IFM_50G_AUI1_AC: 1198 case IFM_50G_AUI1: 1199 key = IFM_50G_PCIE; 1200 break; 1201 case IFM_56G_R4: 1202 key = IFM_56G_R4; 1203 break; 1204 case IFM_25G_PCIE: 1205 case IFM_25G_CR: 1206 case IFM_25G_KR: 1207 case IFM_25G_SR: 1208 case IFM_25G_LR: 1209 case IFM_25G_ACC: 1210 case IFM_25G_AOC: 1211 case IFM_25G_T: 1212 case IFM_25G_CR_S: 1213 case IFM_25G_CR1: 1214 case IFM_25G_KR_S: 1215 case IFM_25G_AUI: 1216 case IFM_25G_KR1: 1217 key = IFM_25G_PCIE; 1218 break; 1219 case IFM_40G_CR4: 1220 case IFM_40G_SR4: 1221 case IFM_40G_LR4: 1222 case IFM_40G_LM4: 1223 case IFM_40G_XLPPI: 1224 case IFM_40G_KR4: 1225 case IFM_40G_XLAUI: 1226 case IFM_40G_XLAUI_AC: 1227 case IFM_40G_ER4: 1228 key = IFM_40G_CR4; 1229 break; 1230 case IFM_100G_CR4: 1231 case IFM_100G_SR4: 1232 case IFM_100G_KR4: 1233 case IFM_100G_LR4: 1234 case IFM_100G_CAUI4_AC: 1235 case IFM_100G_CAUI4: 1236 case IFM_100G_AUI4_AC: 1237 case IFM_100G_AUI4: 1238 case IFM_100G_CR_PAM4: 1239 case IFM_100G_KR_PAM4: 1240 case IFM_100G_CP2: 1241 case IFM_100G_SR2: 1242 case IFM_100G_DR: 1243 case IFM_100G_KR2_PAM4: 1244 case IFM_100G_CAUI2_AC: 1245 case IFM_100G_CAUI2: 1246 case IFM_100G_AUI2_AC: 1247 case IFM_100G_AUI2: 1248 key = IFM_100G_CR4; 1249 break; 1250 case IFM_200G_CR4_PAM4: 1251 case IFM_200G_SR4: 1252 case IFM_200G_FR4: 1253 case IFM_200G_LR4: 1254 case IFM_200G_DR4: 1255 case IFM_200G_KR4_PAM4: 1256 case IFM_200G_AUI4_AC: 1257 case IFM_200G_AUI4: 1258 case IFM_200G_AUI8_AC: 1259 case IFM_200G_AUI8: 1260 key = IFM_200G_CR4_PAM4; 1261 break; 1262 case IFM_400G_FR8: 1263 case IFM_400G_LR8: 1264 case IFM_400G_DR4: 1265 case IFM_400G_AUI8_AC: 1266 case IFM_400G_AUI8: 1267 case IFM_400G_SR8: 1268 case IFM_400G_CR8: 1269 key = IFM_400G_FR8; 1270 break; 1271 default: 1272 key = subtype; 1273 break; 1274 } 1275 /* bit 5..14: (some bits of) if_index of lagg device */ 1276 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1277 /* bit 15: 0 */ 1278 } 1279 return (htons(key)); 1280 } 1281 1282 static void 1283 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1284 { 1285 char buf[LACP_LAGIDSTR_MAX+1]; 1286 1287 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1288 __func__, 1289 lacp_format_lagid(&la->la_actor, &la->la_partner, 1290 buf, sizeof(buf)), 1291 la->la_refcnt, la->la_refcnt + 1)); 1292 1293 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1294 la->la_refcnt++; 1295 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1296 } 1297 1298 static void 1299 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1300 { 1301 char buf[LACP_LAGIDSTR_MAX+1]; 1302 1303 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1304 __func__, 1305 lacp_format_lagid(&la->la_actor, &la->la_partner, 1306 buf, sizeof(buf)), 1307 la->la_refcnt, la->la_refcnt - 1)); 1308 1309 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1310 la->la_refcnt--; 1311 if (la->la_refcnt > 0) { 1312 return; 1313 } 1314 1315 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1316 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1317 1318 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1319 1320 free(la, M_DEVBUF); 1321 } 1322 1323 /* 1324 * lacp_aggregator_get: allocate an aggregator. 1325 */ 1326 1327 static struct lacp_aggregator * 1328 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1329 { 1330 struct lacp_aggregator *la; 1331 1332 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1333 if (la) { 1334 la->la_refcnt = 1; 1335 la->la_nports = 0; 1336 TAILQ_INIT(&la->la_ports); 1337 la->la_pending = 0; 1338 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1339 } 1340 1341 return (la); 1342 } 1343 1344 /* 1345 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1346 */ 1347 1348 static void 1349 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1350 { 1351 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1352 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1353 1354 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1355 } 1356 1357 static void 1358 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1359 const struct lacp_peerinfo *lpi_port) 1360 { 1361 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1362 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1363 lpi_aggr->lip_key = lpi_port->lip_key; 1364 } 1365 1366 /* 1367 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1368 */ 1369 1370 static bool 1371 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1372 const struct lacp_port *lp) 1373 { 1374 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1375 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1376 return (false); 1377 } 1378 1379 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) 1380 return (false); 1381 1382 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) 1383 return (false); 1384 1385 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) 1386 return (false); 1387 1388 return (true); 1389 } 1390 1391 static bool 1392 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1393 const struct lacp_peerinfo *b) 1394 { 1395 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1396 sizeof(a->lip_systemid)) != 0) { 1397 return (false); 1398 } 1399 1400 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key)) != 0) 1401 return (false); 1402 1403 return (true); 1404 } 1405 1406 static void 1407 lacp_port_enable(struct lacp_port *lp) 1408 { 1409 lp->lp_state |= LACP_STATE_AGGREGATION; 1410 } 1411 1412 static void 1413 lacp_port_disable(struct lacp_port *lp) 1414 { 1415 lacp_set_mux(lp, LACP_MUX_DETACHED); 1416 1417 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1418 lp->lp_selected = LACP_UNSELECTED; 1419 lacp_sm_rx_record_default(lp); 1420 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1421 lp->lp_state &= ~LACP_STATE_EXPIRED; 1422 } 1423 1424 /* 1425 * lacp_select: select an aggregator. create one if necessary. 1426 */ 1427 static void 1428 lacp_select(struct lacp_port *lp) 1429 { 1430 struct lacp_softc *lsc = lp->lp_lsc; 1431 struct lacp_aggregator *la; 1432 char buf[LACP_LAGIDSTR_MAX+1]; 1433 1434 if (lp->lp_aggregator) { 1435 return; 1436 } 1437 1438 /* If we haven't heard from our peer, skip this step. */ 1439 if (lp->lp_state & LACP_STATE_DEFAULTED) 1440 return; 1441 1442 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1443 ("timer_wait_while still active")); 1444 1445 LACP_DPRINTF((lp, "port lagid=%s\n", 1446 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1447 buf, sizeof(buf)))); 1448 1449 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1450 if (lacp_aggregator_is_compatible(la, lp)) { 1451 break; 1452 } 1453 } 1454 1455 if (la == NULL) { 1456 la = lacp_aggregator_get(lsc, lp); 1457 if (la == NULL) { 1458 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1459 1460 /* 1461 * will retry on the next tick. 1462 */ 1463 1464 return; 1465 } 1466 lacp_fill_aggregator_id(la, lp); 1467 LACP_DPRINTF((lp, "aggregator created\n")); 1468 } else { 1469 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1470 if (la->la_refcnt == LACP_MAX_PORTS) 1471 return; 1472 lacp_aggregator_addref(lsc, la); 1473 } 1474 1475 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1476 lacp_format_lagid(&la->la_actor, &la->la_partner, 1477 buf, sizeof(buf)))); 1478 1479 lp->lp_aggregator = la; 1480 lp->lp_selected = LACP_SELECTED; 1481 } 1482 1483 /* 1484 * lacp_unselect: finish unselect/detach process. 1485 */ 1486 1487 static void 1488 lacp_unselect(struct lacp_port *lp) 1489 { 1490 struct lacp_softc *lsc = lp->lp_lsc; 1491 struct lacp_aggregator *la = lp->lp_aggregator; 1492 1493 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1494 ("timer_wait_while still active")); 1495 1496 if (la == NULL) { 1497 return; 1498 } 1499 1500 lp->lp_aggregator = NULL; 1501 lacp_aggregator_delref(lsc, la); 1502 } 1503 1504 /* mux machine */ 1505 1506 static void 1507 lacp_sm_mux(struct lacp_port *lp) 1508 { 1509 struct lagg_port *lgp = lp->lp_lagg; 1510 struct lagg_softc *sc = lgp->lp_softc; 1511 enum lacp_mux_state new_state; 1512 boolean_t p_sync = 1513 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1514 boolean_t p_collecting = 1515 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1516 enum lacp_selected selected = lp->lp_selected; 1517 struct lacp_aggregator *la; 1518 1519 if (V_lacp_debug > 1) 1520 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1521 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1522 lp->lp_mux_state, selected, p_sync, p_collecting); 1523 1524 re_eval: 1525 la = lp->lp_aggregator; 1526 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1527 ("MUX not detached")); 1528 new_state = lp->lp_mux_state; 1529 switch (lp->lp_mux_state) { 1530 case LACP_MUX_DETACHED: 1531 if (selected != LACP_UNSELECTED) { 1532 new_state = LACP_MUX_WAITING; 1533 } 1534 break; 1535 case LACP_MUX_WAITING: 1536 KASSERT(la->la_pending > 0 || 1537 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1538 ("timer_wait_while still active")); 1539 if (selected == LACP_SELECTED && la->la_pending == 0) { 1540 new_state = LACP_MUX_ATTACHED; 1541 } else if (selected == LACP_UNSELECTED) { 1542 new_state = LACP_MUX_DETACHED; 1543 } 1544 break; 1545 case LACP_MUX_ATTACHED: 1546 if (selected == LACP_SELECTED && p_sync) { 1547 new_state = LACP_MUX_COLLECTING; 1548 } else if (selected != LACP_SELECTED) { 1549 new_state = LACP_MUX_DETACHED; 1550 } 1551 break; 1552 case LACP_MUX_COLLECTING: 1553 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1554 new_state = LACP_MUX_DISTRIBUTING; 1555 } else if (selected != LACP_SELECTED || !p_sync) { 1556 new_state = LACP_MUX_ATTACHED; 1557 } 1558 break; 1559 case LACP_MUX_DISTRIBUTING: 1560 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1561 new_state = LACP_MUX_COLLECTING; 1562 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1563 sc->sc_flapping++; 1564 } 1565 break; 1566 default: 1567 panic("%s: unknown state", __func__); 1568 } 1569 1570 if (lp->lp_mux_state == new_state) { 1571 return; 1572 } 1573 1574 lacp_set_mux(lp, new_state); 1575 goto re_eval; 1576 } 1577 1578 static void 1579 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1580 { 1581 struct lacp_aggregator *la = lp->lp_aggregator; 1582 1583 if (lp->lp_mux_state == new_state) { 1584 return; 1585 } 1586 1587 switch (new_state) { 1588 case LACP_MUX_DETACHED: 1589 lp->lp_state &= ~LACP_STATE_SYNC; 1590 lacp_disable_distributing(lp); 1591 lacp_disable_collecting(lp); 1592 lacp_sm_assert_ntt(lp); 1593 /* cancel timer */ 1594 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1595 KASSERT(la->la_pending > 0, 1596 ("timer_wait_while not active")); 1597 la->la_pending--; 1598 } 1599 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1600 lacp_unselect(lp); 1601 break; 1602 case LACP_MUX_WAITING: 1603 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1604 LACP_AGGREGATE_WAIT_TIME); 1605 la->la_pending++; 1606 break; 1607 case LACP_MUX_ATTACHED: 1608 lp->lp_state |= LACP_STATE_SYNC; 1609 lacp_disable_collecting(lp); 1610 lacp_sm_assert_ntt(lp); 1611 break; 1612 case LACP_MUX_COLLECTING: 1613 lacp_enable_collecting(lp); 1614 lacp_disable_distributing(lp); 1615 lacp_sm_assert_ntt(lp); 1616 break; 1617 case LACP_MUX_DISTRIBUTING: 1618 lacp_enable_distributing(lp); 1619 break; 1620 default: 1621 panic("%s: unknown state", __func__); 1622 } 1623 1624 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1625 1626 lp->lp_mux_state = new_state; 1627 } 1628 1629 static void 1630 lacp_sm_mux_timer(struct lacp_port *lp) 1631 { 1632 struct lacp_aggregator *la = lp->lp_aggregator; 1633 char buf[LACP_LAGIDSTR_MAX+1]; 1634 1635 KASSERT(la->la_pending > 0, ("no pending event")); 1636 1637 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1638 lacp_format_lagid(&la->la_actor, &la->la_partner, 1639 buf, sizeof(buf)), 1640 la->la_pending, la->la_pending - 1)); 1641 1642 la->la_pending--; 1643 } 1644 1645 /* periodic transmit machine */ 1646 1647 static void 1648 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1649 { 1650 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1651 LACP_STATE_TIMEOUT)) { 1652 return; 1653 } 1654 1655 LACP_DPRINTF((lp, "partner timeout changed\n")); 1656 1657 /* 1658 * FAST_PERIODIC -> SLOW_PERIODIC 1659 * or 1660 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1661 * 1662 * let lacp_sm_ptx_tx_schedule to update timeout. 1663 */ 1664 1665 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1666 1667 /* 1668 * if timeout has been shortened, assert NTT. 1669 */ 1670 1671 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1672 lacp_sm_assert_ntt(lp); 1673 } 1674 } 1675 1676 static void 1677 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1678 { 1679 int timeout; 1680 1681 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1682 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1683 /* 1684 * NO_PERIODIC 1685 */ 1686 1687 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1688 return; 1689 } 1690 1691 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1692 return; 1693 } 1694 1695 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1696 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1697 1698 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1699 } 1700 1701 static void 1702 lacp_sm_ptx_timer(struct lacp_port *lp) 1703 { 1704 lacp_sm_assert_ntt(lp); 1705 } 1706 1707 static void 1708 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1709 { 1710 int timeout; 1711 1712 /* 1713 * check LACP_DISABLED first 1714 */ 1715 1716 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1717 return; 1718 } 1719 1720 /* 1721 * check loopback condition. 1722 */ 1723 1724 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1725 &lp->lp_actor.lip_systemid)) { 1726 return; 1727 } 1728 1729 /* 1730 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1731 */ 1732 1733 microuptime(&lp->lp_last_lacpdu_rx); 1734 lacp_sm_rx_update_selected(lp, du); 1735 lacp_sm_rx_update_ntt(lp, du); 1736 lacp_sm_rx_record_pdu(lp, du); 1737 1738 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1739 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1740 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1741 1742 lp->lp_state &= ~LACP_STATE_EXPIRED; 1743 1744 /* 1745 * kick transmit machine without waiting the next tick. 1746 */ 1747 1748 lacp_sm_tx(lp); 1749 } 1750 1751 static void 1752 lacp_sm_rx_set_expired(struct lacp_port *lp) 1753 { 1754 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1755 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1756 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1757 lp->lp_state |= LACP_STATE_EXPIRED; 1758 } 1759 1760 static void 1761 lacp_sm_rx_timer(struct lacp_port *lp) 1762 { 1763 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1764 /* CURRENT -> EXPIRED */ 1765 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1766 lacp_sm_rx_set_expired(lp); 1767 } else { 1768 /* EXPIRED -> DEFAULTED */ 1769 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1770 lacp_sm_rx_update_default_selected(lp); 1771 lacp_sm_rx_record_default(lp); 1772 lp->lp_state &= ~LACP_STATE_EXPIRED; 1773 } 1774 } 1775 1776 static void 1777 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1778 { 1779 boolean_t active; 1780 uint8_t oldpstate; 1781 char buf[LACP_STATESTR_MAX+1]; 1782 1783 LACP_TRACE(lp); 1784 1785 oldpstate = lp->lp_partner.lip_state; 1786 1787 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1788 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1789 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1790 1791 lp->lp_partner = du->ldu_actor; 1792 if (active && 1793 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1794 LACP_STATE_AGGREGATION) && 1795 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1796 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1797 /* 1798 * XXX Maintain legacy behavior of leaving the 1799 * LACP_STATE_SYNC bit unchanged from the partner's 1800 * advertisement if lsc_strict_mode is false. 1801 * TODO: We should re-examine the concept of the "strict mode" 1802 * to ensure it makes sense to maintain a non-strict mode. 1803 */ 1804 if (lp->lp_lsc->lsc_strict_mode) 1805 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1806 } else { 1807 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1808 } 1809 1810 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1811 1812 if (oldpstate != lp->lp_partner.lip_state) { 1813 LACP_DPRINTF((lp, "old pstate %s\n", 1814 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1815 LACP_DPRINTF((lp, "new pstate %s\n", 1816 lacp_format_state(lp->lp_partner.lip_state, buf, 1817 sizeof(buf)))); 1818 } 1819 1820 lacp_sm_ptx_update_timeout(lp, oldpstate); 1821 } 1822 1823 static void 1824 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1825 { 1826 1827 LACP_TRACE(lp); 1828 1829 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1830 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1831 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1832 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1833 lacp_sm_assert_ntt(lp); 1834 } 1835 } 1836 1837 static void 1838 lacp_sm_rx_record_default(struct lacp_port *lp) 1839 { 1840 uint8_t oldpstate; 1841 1842 LACP_TRACE(lp); 1843 1844 oldpstate = lp->lp_partner.lip_state; 1845 if (lp->lp_lsc->lsc_strict_mode) 1846 lp->lp_partner = lacp_partner_admin_strict; 1847 else 1848 lp->lp_partner = lacp_partner_admin_optimistic; 1849 lp->lp_state |= LACP_STATE_DEFAULTED; 1850 lacp_sm_ptx_update_timeout(lp, oldpstate); 1851 } 1852 1853 static void 1854 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1855 const struct lacp_peerinfo *info) 1856 { 1857 1858 LACP_TRACE(lp); 1859 1860 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1861 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1862 LACP_STATE_AGGREGATION)) { 1863 lp->lp_selected = LACP_UNSELECTED; 1864 /* mux machine will clean up lp->lp_aggregator */ 1865 } 1866 } 1867 1868 static void 1869 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1870 { 1871 1872 LACP_TRACE(lp); 1873 1874 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1875 } 1876 1877 static void 1878 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1879 { 1880 1881 LACP_TRACE(lp); 1882 1883 if (lp->lp_lsc->lsc_strict_mode) 1884 lacp_sm_rx_update_selected_from_peerinfo(lp, 1885 &lacp_partner_admin_strict); 1886 else 1887 lacp_sm_rx_update_selected_from_peerinfo(lp, 1888 &lacp_partner_admin_optimistic); 1889 } 1890 1891 /* transmit machine */ 1892 1893 static void 1894 lacp_sm_tx(struct lacp_port *lp) 1895 { 1896 int error = 0; 1897 1898 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1899 #if 1 1900 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1901 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1902 #endif 1903 ) { 1904 lp->lp_flags &= ~LACP_PORT_NTT; 1905 } 1906 1907 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1908 return; 1909 } 1910 1911 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1912 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1913 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1914 LACP_DPRINTF((lp, "rate limited pdu\n")); 1915 return; 1916 } 1917 1918 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1919 error = lacp_xmit_lacpdu(lp); 1920 } else { 1921 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1922 } 1923 1924 if (error == 0) { 1925 lp->lp_flags &= ~LACP_PORT_NTT; 1926 } else { 1927 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1928 error)); 1929 } 1930 } 1931 1932 static void 1933 lacp_sm_assert_ntt(struct lacp_port *lp) 1934 { 1935 1936 lp->lp_flags |= LACP_PORT_NTT; 1937 } 1938 1939 static void 1940 lacp_run_timers(struct lacp_port *lp) 1941 { 1942 int i; 1943 struct timeval time_diff; 1944 1945 for (i = 0; i < LACP_NTIMER; i++) { 1946 KASSERT(lp->lp_timer[i] >= 0, 1947 ("invalid timer value %d", lp->lp_timer[i])); 1948 if (lp->lp_timer[i] == 0) { 1949 continue; 1950 } else { 1951 if (i == LACP_TIMER_CURRENT_WHILE) { 1952 microuptime(&time_diff); 1953 timevalsub(&time_diff, &lp->lp_last_lacpdu_rx); 1954 if (time_diff.tv_sec) { 1955 /* At least one sec has elapsed since last LACP packet. */ 1956 --lp->lp_timer[i]; 1957 } 1958 } else { 1959 --lp->lp_timer[i]; 1960 } 1961 1962 if ((lp->lp_timer[i] <= 0) && (lacp_timer_funcs[i])) { 1963 (*lacp_timer_funcs[i])(lp); 1964 } 1965 } 1966 } 1967 } 1968 1969 int 1970 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1971 { 1972 struct lacp_softc *lsc = lp->lp_lsc; 1973 struct lagg_port *lgp = lp->lp_lagg; 1974 struct lacp_port *lp2; 1975 struct markerdu *mdu; 1976 int error = 0; 1977 int pending = 0; 1978 1979 if (m->m_pkthdr.len != sizeof(*mdu)) { 1980 goto bad; 1981 } 1982 1983 if ((m->m_flags & M_MCAST) == 0) { 1984 goto bad; 1985 } 1986 1987 if (m->m_len < sizeof(*mdu)) { 1988 m = m_pullup(m, sizeof(*mdu)); 1989 if (m == NULL) { 1990 return (ENOMEM); 1991 } 1992 } 1993 1994 mdu = mtod(m, struct markerdu *); 1995 1996 if (memcmp(&mdu->mdu_eh.ether_dhost, 1997 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1998 goto bad; 1999 } 2000 2001 if (mdu->mdu_sph.sph_version != 1) { 2002 goto bad; 2003 } 2004 2005 switch (mdu->mdu_tlv.tlv_type) { 2006 case MARKER_TYPE_INFO: 2007 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2008 marker_info_tlv_template, TRUE)) { 2009 goto bad; 2010 } 2011 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 2012 memcpy(&mdu->mdu_eh.ether_dhost, 2013 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 2014 memcpy(&mdu->mdu_eh.ether_shost, 2015 lgp->lp_lladdr, ETHER_ADDR_LEN); 2016 error = lagg_enqueue(lp->lp_ifp, m); 2017 break; 2018 2019 case MARKER_TYPE_RESPONSE: 2020 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 2021 marker_response_tlv_template, TRUE)) { 2022 goto bad; 2023 } 2024 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 2025 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 2026 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 2027 2028 /* Verify that it is the last marker we sent out */ 2029 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 2030 sizeof(struct lacp_markerinfo))) 2031 goto bad; 2032 2033 LACP_LOCK(lsc); 2034 lp->lp_flags &= ~LACP_PORT_MARK; 2035 2036 if (lsc->lsc_suppress_distributing) { 2037 /* Check if any ports are waiting for a response */ 2038 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 2039 if (lp2->lp_flags & LACP_PORT_MARK) { 2040 pending = 1; 2041 break; 2042 } 2043 } 2044 2045 if (pending == 0) { 2046 /* All interface queues are clear */ 2047 LACP_DPRINTF((NULL, "queue flush complete\n")); 2048 lsc->lsc_suppress_distributing = FALSE; 2049 } 2050 } 2051 LACP_UNLOCK(lsc); 2052 m_freem(m); 2053 break; 2054 2055 default: 2056 goto bad; 2057 } 2058 2059 return (error); 2060 2061 bad: 2062 LACP_DPRINTF((lp, "bad marker frame\n")); 2063 m_freem(m); 2064 return (EINVAL); 2065 } 2066 2067 static int 2068 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 2069 const struct tlv_template *tmpl, boolean_t check_type) 2070 { 2071 while (/* CONSTCOND */ 1) { 2072 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 2073 return (EINVAL); 2074 } 2075 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 2076 tlv->tlv_length != tmpl->tmpl_length) { 2077 return (EINVAL); 2078 } 2079 if (tmpl->tmpl_type == 0) { 2080 break; 2081 } 2082 tlv = (const struct tlvhdr *) 2083 ((const char *)tlv + tlv->tlv_length); 2084 tmpl++; 2085 } 2086 2087 return (0); 2088 } 2089 2090 /* Debugging */ 2091 const char * 2092 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 2093 { 2094 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 2095 (int)mac[0], 2096 (int)mac[1], 2097 (int)mac[2], 2098 (int)mac[3], 2099 (int)mac[4], 2100 (int)mac[5]); 2101 2102 return (buf); 2103 } 2104 2105 const char * 2106 lacp_format_systemid(const struct lacp_systemid *sysid, 2107 char *buf, size_t buflen) 2108 { 2109 char macbuf[LACP_MACSTR_MAX+1]; 2110 2111 snprintf(buf, buflen, "%04X,%s", 2112 ntohs(sysid->lsi_prio), 2113 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 2114 2115 return (buf); 2116 } 2117 2118 const char * 2119 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 2120 { 2121 snprintf(buf, buflen, "%04X,%04X", 2122 ntohs(portid->lpi_prio), 2123 ntohs(portid->lpi_portno)); 2124 2125 return (buf); 2126 } 2127 2128 const char * 2129 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 2130 { 2131 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 2132 char portid[LACP_PORTIDSTR_MAX+1]; 2133 2134 snprintf(buf, buflen, "(%s,%04X,%s)", 2135 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 2136 ntohs(peer->lip_key), 2137 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2138 2139 return (buf); 2140 } 2141 2142 const char * 2143 lacp_format_lagid(const struct lacp_peerinfo *a, 2144 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2145 { 2146 char astr[LACP_PARTNERSTR_MAX+1]; 2147 char bstr[LACP_PARTNERSTR_MAX+1]; 2148 2149 #if 0 2150 /* 2151 * there's a convention to display small numbered peer 2152 * in the left. 2153 */ 2154 2155 if (lacp_compare_peerinfo(a, b) > 0) { 2156 const struct lacp_peerinfo *t; 2157 2158 t = a; 2159 a = b; 2160 b = t; 2161 } 2162 #endif 2163 2164 snprintf(buf, buflen, "[%s,%s]", 2165 lacp_format_partner(a, astr, sizeof(astr)), 2166 lacp_format_partner(b, bstr, sizeof(bstr))); 2167 2168 return (buf); 2169 } 2170 2171 const char * 2172 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2173 char *buf, size_t buflen) 2174 { 2175 if (la == NULL) { 2176 return ("(none)"); 2177 } 2178 2179 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2180 } 2181 2182 const char * 2183 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2184 { 2185 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2186 return (buf); 2187 } 2188 2189 static void 2190 lacp_dump_lacpdu(const struct lacpdu *du) 2191 { 2192 char buf[LACP_PARTNERSTR_MAX+1]; 2193 char buf2[LACP_STATESTR_MAX+1]; 2194 2195 printf("actor=%s\n", 2196 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2197 printf("actor.state=%s\n", 2198 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2199 printf("partner=%s\n", 2200 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2201 printf("partner.state=%s\n", 2202 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2203 2204 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2205 } 2206 2207 static void 2208 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2209 { 2210 va_list va; 2211 2212 if (lp) { 2213 printf("%s: ", lp->lp_ifp->if_xname); 2214 } 2215 2216 va_start(va, fmt); 2217 vprintf(fmt, va); 2218 va_end(va); 2219 } 2220