1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * Copyright (c)2005 YAMAMOTO Takashi, 5 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_ratelimit.h" 34 35 #include <sys/param.h> 36 #include <sys/callout.h> 37 #include <sys/eventhandler.h> 38 #include <sys/mbuf.h> 39 #include <sys/systm.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> /* hz */ 42 #include <sys/socket.h> /* for net/if.h */ 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <machine/stdarg.h> 46 #include <sys/lock.h> 47 #include <sys/rwlock.h> 48 #include <sys/taskqueue.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_dl.h> 53 #include <net/ethernet.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 57 #include <net/if_lagg.h> 58 #include <net/ieee8023ad_lacp.h> 59 60 /* 61 * actor system priority and port priority. 62 * XXX should be configurable. 63 */ 64 65 #define LACP_SYSTEM_PRIO 0x8000 66 #define LACP_PORT_PRIO 0x8000 67 68 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 69 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 70 71 static const struct tlv_template lacp_info_tlv_template[] = { 72 { LACP_TYPE_ACTORINFO, 73 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 74 { LACP_TYPE_PARTNERINFO, 75 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 76 { LACP_TYPE_COLLECTORINFO, 77 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 78 { 0, 0 }, 79 }; 80 81 static const struct tlv_template marker_info_tlv_template[] = { 82 { MARKER_TYPE_INFO, 83 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 84 { 0, 0 }, 85 }; 86 87 static const struct tlv_template marker_response_tlv_template[] = { 88 { MARKER_TYPE_RESPONSE, 89 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 90 { 0, 0 }, 91 }; 92 93 typedef void (*lacp_timer_func_t)(struct lacp_port *); 94 95 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 96 static void lacp_fill_markerinfo(struct lacp_port *, 97 struct lacp_markerinfo *); 98 99 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 100 static void lacp_suppress_distributing(struct lacp_softc *, 101 struct lacp_aggregator *); 102 static void lacp_transit_expire(void *); 103 static void lacp_update_portmap(struct lacp_softc *); 104 static void lacp_select_active_aggregator(struct lacp_softc *); 105 static uint16_t lacp_compose_key(struct lacp_port *); 106 static int tlv_check(const void *, size_t, const struct tlvhdr *, 107 const struct tlv_template *, boolean_t); 108 static void lacp_tick(void *); 109 110 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 111 const struct lacp_port *); 112 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 113 const struct lacp_peerinfo *); 114 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 115 const struct lacp_port *); 116 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 117 const struct lacp_peerinfo *); 118 119 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 120 struct lacp_port *); 121 static void lacp_aggregator_addref(struct lacp_softc *, 122 struct lacp_aggregator *); 123 static void lacp_aggregator_delref(struct lacp_softc *, 124 struct lacp_aggregator *); 125 126 /* receive machine */ 127 128 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 129 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 130 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 131 static void lacp_sm_rx_timer(struct lacp_port *); 132 static void lacp_sm_rx_set_expired(struct lacp_port *); 133 static void lacp_sm_rx_update_ntt(struct lacp_port *, 134 const struct lacpdu *); 135 static void lacp_sm_rx_record_pdu(struct lacp_port *, 136 const struct lacpdu *); 137 static void lacp_sm_rx_update_selected(struct lacp_port *, 138 const struct lacpdu *); 139 static void lacp_sm_rx_record_default(struct lacp_port *); 140 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 141 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 142 const struct lacp_peerinfo *); 143 144 /* mux machine */ 145 146 static void lacp_sm_mux(struct lacp_port *); 147 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 148 static void lacp_sm_mux_timer(struct lacp_port *); 149 150 /* periodic transmit machine */ 151 152 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 153 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 154 static void lacp_sm_ptx_timer(struct lacp_port *); 155 156 /* transmit machine */ 157 158 static void lacp_sm_tx(struct lacp_port *); 159 static void lacp_sm_assert_ntt(struct lacp_port *); 160 161 static void lacp_run_timers(struct lacp_port *); 162 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 163 const struct lacp_peerinfo *); 164 static int lacp_compare_systemid(const struct lacp_systemid *, 165 const struct lacp_systemid *); 166 static void lacp_port_enable(struct lacp_port *); 167 static void lacp_port_disable(struct lacp_port *); 168 static void lacp_select(struct lacp_port *); 169 static void lacp_unselect(struct lacp_port *); 170 static void lacp_disable_collecting(struct lacp_port *); 171 static void lacp_enable_collecting(struct lacp_port *); 172 static void lacp_disable_distributing(struct lacp_port *); 173 static void lacp_enable_distributing(struct lacp_port *); 174 static int lacp_xmit_lacpdu(struct lacp_port *); 175 static int lacp_xmit_marker(struct lacp_port *); 176 177 /* Debugging */ 178 179 static void lacp_dump_lacpdu(const struct lacpdu *); 180 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 181 size_t); 182 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 183 const struct lacp_peerinfo *, char *, size_t); 184 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 185 char *, size_t); 186 static const char *lacp_format_state(uint8_t, char *, size_t); 187 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 188 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 189 size_t); 190 static const char *lacp_format_portid(const struct lacp_portid *, char *, 191 size_t); 192 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 193 __attribute__((__format__(__printf__, 2, 3))); 194 195 static VNET_DEFINE(int, lacp_debug); 196 #define V_lacp_debug VNET(lacp_debug) 197 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); 198 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET, 199 &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)"); 200 201 static VNET_DEFINE(int, lacp_default_strict_mode) = 1; 202 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode, CTLFLAG_RWTUN, 203 &VNET_NAME(lacp_default_strict_mode), 0, 204 "LACP strict protocol compliance default"); 205 206 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; } 207 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 208 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; } 209 210 /* 211 * partner administration variables. 212 * XXX should be configurable. 213 */ 214 215 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 216 .lip_systemid = { .lsi_prio = 0xffff }, 217 .lip_portid = { .lpi_prio = 0xffff }, 218 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 219 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 220 }; 221 222 static const struct lacp_peerinfo lacp_partner_admin_strict = { 223 .lip_systemid = { .lsi_prio = 0xffff }, 224 .lip_portid = { .lpi_prio = 0xffff }, 225 .lip_state = 0, 226 }; 227 228 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 229 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 230 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 231 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 232 }; 233 234 struct mbuf * 235 lacp_input(struct lagg_port *lgp, struct mbuf *m) 236 { 237 struct lacp_port *lp = LACP_PORT(lgp); 238 uint8_t subtype; 239 240 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 241 m_freem(m); 242 return (NULL); 243 } 244 245 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 246 switch (subtype) { 247 case SLOWPROTOCOLS_SUBTYPE_LACP: 248 lacp_pdu_input(lp, m); 249 return (NULL); 250 251 case SLOWPROTOCOLS_SUBTYPE_MARKER: 252 lacp_marker_input(lp, m); 253 return (NULL); 254 } 255 256 /* Not a subtype we are interested in */ 257 return (m); 258 } 259 260 /* 261 * lacp_pdu_input: process lacpdu 262 */ 263 static int 264 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 265 { 266 struct lacp_softc *lsc = lp->lp_lsc; 267 struct lacpdu *du; 268 int error = 0; 269 270 if (m->m_pkthdr.len != sizeof(*du)) { 271 goto bad; 272 } 273 274 if ((m->m_flags & M_MCAST) == 0) { 275 goto bad; 276 } 277 278 if (m->m_len < sizeof(*du)) { 279 m = m_pullup(m, sizeof(*du)); 280 if (m == NULL) { 281 return (ENOMEM); 282 } 283 } 284 285 du = mtod(m, struct lacpdu *); 286 287 if (memcmp(&du->ldu_eh.ether_dhost, 288 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 289 goto bad; 290 } 291 292 /* 293 * ignore the version for compatibility with 294 * the future protocol revisions. 295 */ 296 #if 0 297 if (du->ldu_sph.sph_version != 1) { 298 goto bad; 299 } 300 #endif 301 302 /* 303 * ignore tlv types for compatibility with 304 * the future protocol revisions. 305 */ 306 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 307 lacp_info_tlv_template, FALSE)) { 308 goto bad; 309 } 310 311 if (V_lacp_debug > 0) { 312 lacp_dprintf(lp, "lacpdu receive\n"); 313 lacp_dump_lacpdu(du); 314 } 315 316 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 317 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 318 goto bad; 319 } 320 321 LACP_LOCK(lsc); 322 lacp_sm_rx(lp, du); 323 LACP_UNLOCK(lsc); 324 325 m_freem(m); 326 return (error); 327 328 bad: 329 m_freem(m); 330 return (EINVAL); 331 } 332 333 static void 334 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 335 { 336 struct lagg_port *lgp = lp->lp_lagg; 337 struct lagg_softc *sc = lgp->lp_softc; 338 339 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 340 memcpy(&info->lip_systemid.lsi_mac, 341 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 342 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 343 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 344 info->lip_state = lp->lp_state; 345 } 346 347 static void 348 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 349 { 350 struct ifnet *ifp = lp->lp_ifp; 351 352 /* Fill in the port index and system id (encoded as the MAC) */ 353 info->mi_rq_port = htons(ifp->if_index); 354 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 355 info->mi_rq_xid = htonl(0); 356 } 357 358 static int 359 lacp_xmit_lacpdu(struct lacp_port *lp) 360 { 361 struct lagg_port *lgp = lp->lp_lagg; 362 struct mbuf *m; 363 struct lacpdu *du; 364 int error; 365 366 LACP_LOCK_ASSERT(lp->lp_lsc); 367 368 m = m_gethdr(M_NOWAIT, MT_DATA); 369 if (m == NULL) { 370 return (ENOMEM); 371 } 372 m->m_len = m->m_pkthdr.len = sizeof(*du); 373 374 du = mtod(m, struct lacpdu *); 375 memset(du, 0, sizeof(*du)); 376 377 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 378 ETHER_ADDR_LEN); 379 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 380 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 381 382 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 383 du->ldu_sph.sph_version = 1; 384 385 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 386 du->ldu_actor = lp->lp_actor; 387 388 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 389 sizeof(du->ldu_partner)); 390 du->ldu_partner = lp->lp_partner; 391 392 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 393 sizeof(du->ldu_collector)); 394 du->ldu_collector.lci_maxdelay = 0; 395 396 if (V_lacp_debug > 0) { 397 lacp_dprintf(lp, "lacpdu transmit\n"); 398 lacp_dump_lacpdu(du); 399 } 400 401 m->m_flags |= M_MCAST; 402 403 /* 404 * XXX should use higher priority queue. 405 * otherwise network congestion can break aggregation. 406 */ 407 408 error = lagg_enqueue(lp->lp_ifp, m); 409 return (error); 410 } 411 412 static int 413 lacp_xmit_marker(struct lacp_port *lp) 414 { 415 struct lagg_port *lgp = lp->lp_lagg; 416 struct mbuf *m; 417 struct markerdu *mdu; 418 int error; 419 420 LACP_LOCK_ASSERT(lp->lp_lsc); 421 422 m = m_gethdr(M_NOWAIT, MT_DATA); 423 if (m == NULL) { 424 return (ENOMEM); 425 } 426 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 427 428 mdu = mtod(m, struct markerdu *); 429 memset(mdu, 0, sizeof(*mdu)); 430 431 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 432 ETHER_ADDR_LEN); 433 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 434 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 435 436 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 437 mdu->mdu_sph.sph_version = 1; 438 439 /* Bump the transaction id and copy over the marker info */ 440 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 441 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 442 mdu->mdu_info = lp->lp_marker; 443 444 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 445 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 446 ntohl(mdu->mdu_info.mi_rq_xid))); 447 448 m->m_flags |= M_MCAST; 449 error = lagg_enqueue(lp->lp_ifp, m); 450 return (error); 451 } 452 453 void 454 lacp_linkstate(struct lagg_port *lgp) 455 { 456 struct lacp_port *lp = LACP_PORT(lgp); 457 struct lacp_softc *lsc = lp->lp_lsc; 458 struct ifnet *ifp = lgp->lp_ifp; 459 struct ifmediareq ifmr; 460 int error = 0; 461 u_int media; 462 uint8_t old_state; 463 uint16_t old_key; 464 465 bzero((char *)&ifmr, sizeof(ifmr)); 466 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 467 if (error != 0) 468 return; 469 470 LACP_LOCK(lsc); 471 media = ifmr.ifm_active; 472 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 473 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 474 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 475 old_state = lp->lp_state; 476 old_key = lp->lp_key; 477 478 lp->lp_media = media; 479 /* 480 * If the port is not an active full duplex Ethernet link then it can 481 * not be aggregated. 482 */ 483 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 484 ifp->if_link_state != LINK_STATE_UP) { 485 lacp_port_disable(lp); 486 } else { 487 lacp_port_enable(lp); 488 } 489 lp->lp_key = lacp_compose_key(lp); 490 491 if (old_state != lp->lp_state || old_key != lp->lp_key) { 492 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 493 lp->lp_selected = LACP_UNSELECTED; 494 } 495 LACP_UNLOCK(lsc); 496 } 497 498 static void 499 lacp_tick(void *arg) 500 { 501 struct lacp_softc *lsc = arg; 502 struct lacp_port *lp; 503 504 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 505 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 506 continue; 507 508 CURVNET_SET(lp->lp_ifp->if_vnet); 509 lacp_run_timers(lp); 510 511 lacp_select(lp); 512 lacp_sm_mux(lp); 513 lacp_sm_tx(lp); 514 lacp_sm_ptx_tx_schedule(lp); 515 CURVNET_RESTORE(); 516 } 517 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 518 } 519 520 int 521 lacp_port_create(struct lagg_port *lgp) 522 { 523 struct lagg_softc *sc = lgp->lp_softc; 524 struct lacp_softc *lsc = LACP_SOFTC(sc); 525 struct lacp_port *lp; 526 struct ifnet *ifp = lgp->lp_ifp; 527 struct sockaddr_dl sdl; 528 struct ifmultiaddr *rifma = NULL; 529 int error; 530 531 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 532 sdl.sdl_alen = ETHER_ADDR_LEN; 533 534 bcopy(ðermulticastaddr_slowprotocols, 535 LLADDR(&sdl), ETHER_ADDR_LEN); 536 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 537 if (error) { 538 printf("%s: ADDMULTI failed on %s\n", __func__, 539 lgp->lp_ifp->if_xname); 540 return (error); 541 } 542 543 lp = malloc(sizeof(struct lacp_port), 544 M_DEVBUF, M_NOWAIT|M_ZERO); 545 if (lp == NULL) 546 return (ENOMEM); 547 548 LACP_LOCK(lsc); 549 lgp->lp_psc = lp; 550 lp->lp_ifp = ifp; 551 lp->lp_lagg = lgp; 552 lp->lp_lsc = lsc; 553 lp->lp_ifma = rifma; 554 555 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 556 557 lacp_fill_actorinfo(lp, &lp->lp_actor); 558 lacp_fill_markerinfo(lp, &lp->lp_marker); 559 lp->lp_state = LACP_STATE_ACTIVITY; 560 lp->lp_aggregator = NULL; 561 lacp_sm_rx_set_expired(lp); 562 LACP_UNLOCK(lsc); 563 lacp_linkstate(lgp); 564 565 return (0); 566 } 567 568 void 569 lacp_port_destroy(struct lagg_port *lgp) 570 { 571 struct lacp_port *lp = LACP_PORT(lgp); 572 struct lacp_softc *lsc = lp->lp_lsc; 573 int i; 574 575 LACP_LOCK(lsc); 576 for (i = 0; i < LACP_NTIMER; i++) { 577 LACP_TIMER_DISARM(lp, i); 578 } 579 580 lacp_disable_collecting(lp); 581 lacp_disable_distributing(lp); 582 lacp_unselect(lp); 583 584 LIST_REMOVE(lp, lp_next); 585 LACP_UNLOCK(lsc); 586 587 /* The address may have already been removed by if_purgemaddrs() */ 588 if (!lgp->lp_detaching) 589 if_delmulti_ifma(lp->lp_ifma); 590 591 free(lp, M_DEVBUF); 592 } 593 594 void 595 lacp_req(struct lagg_softc *sc, void *data) 596 { 597 struct lacp_opreq *req = (struct lacp_opreq *)data; 598 struct lacp_softc *lsc = LACP_SOFTC(sc); 599 struct lacp_aggregator *la; 600 601 bzero(req, sizeof(struct lacp_opreq)); 602 603 /* 604 * If the LACP softc is NULL, return with the opreq structure full of 605 * zeros. It is normal for the softc to be NULL while the lagg is 606 * being destroyed. 607 */ 608 if (NULL == lsc) 609 return; 610 611 la = lsc->lsc_active_aggregator; 612 LACP_LOCK(lsc); 613 if (la != NULL) { 614 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 615 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 616 ETHER_ADDR_LEN); 617 req->actor_key = ntohs(la->la_actor.lip_key); 618 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 619 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 620 req->actor_state = la->la_actor.lip_state; 621 622 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 623 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 624 ETHER_ADDR_LEN); 625 req->partner_key = ntohs(la->la_partner.lip_key); 626 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 627 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 628 req->partner_state = la->la_partner.lip_state; 629 } 630 LACP_UNLOCK(lsc); 631 } 632 633 void 634 lacp_portreq(struct lagg_port *lgp, void *data) 635 { 636 struct lacp_opreq *req = (struct lacp_opreq *)data; 637 struct lacp_port *lp = LACP_PORT(lgp); 638 struct lacp_softc *lsc = lp->lp_lsc; 639 640 LACP_LOCK(lsc); 641 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 642 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 643 ETHER_ADDR_LEN); 644 req->actor_key = ntohs(lp->lp_actor.lip_key); 645 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 646 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 647 req->actor_state = lp->lp_actor.lip_state; 648 649 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 650 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 651 ETHER_ADDR_LEN); 652 req->partner_key = ntohs(lp->lp_partner.lip_key); 653 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 654 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 655 req->partner_state = lp->lp_partner.lip_state; 656 LACP_UNLOCK(lsc); 657 } 658 659 static void 660 lacp_disable_collecting(struct lacp_port *lp) 661 { 662 LACP_DPRINTF((lp, "collecting disabled\n")); 663 lp->lp_state &= ~LACP_STATE_COLLECTING; 664 } 665 666 static void 667 lacp_enable_collecting(struct lacp_port *lp) 668 { 669 LACP_DPRINTF((lp, "collecting enabled\n")); 670 lp->lp_state |= LACP_STATE_COLLECTING; 671 } 672 673 static void 674 lacp_disable_distributing(struct lacp_port *lp) 675 { 676 struct lacp_aggregator *la = lp->lp_aggregator; 677 struct lacp_softc *lsc = lp->lp_lsc; 678 struct lagg_softc *sc = lsc->lsc_softc; 679 char buf[LACP_LAGIDSTR_MAX+1]; 680 681 LACP_LOCK_ASSERT(lsc); 682 683 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 684 return; 685 } 686 687 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 688 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 689 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 690 691 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 692 "nports %d -> %d\n", 693 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 694 la->la_nports, la->la_nports - 1)); 695 696 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 697 la->la_nports--; 698 sc->sc_active = la->la_nports; 699 700 if (lsc->lsc_active_aggregator == la) { 701 lacp_suppress_distributing(lsc, la); 702 lacp_select_active_aggregator(lsc); 703 /* regenerate the port map, the active aggregator has changed */ 704 lacp_update_portmap(lsc); 705 } 706 707 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 708 } 709 710 static void 711 lacp_enable_distributing(struct lacp_port *lp) 712 { 713 struct lacp_aggregator *la = lp->lp_aggregator; 714 struct lacp_softc *lsc = lp->lp_lsc; 715 struct lagg_softc *sc = lsc->lsc_softc; 716 char buf[LACP_LAGIDSTR_MAX+1]; 717 718 LACP_LOCK_ASSERT(lsc); 719 720 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 721 return; 722 } 723 724 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 725 "nports %d -> %d\n", 726 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 727 la->la_nports, la->la_nports + 1)); 728 729 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 730 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 731 la->la_nports++; 732 sc->sc_active = la->la_nports; 733 734 lp->lp_state |= LACP_STATE_DISTRIBUTING; 735 736 if (lsc->lsc_active_aggregator == la) { 737 lacp_suppress_distributing(lsc, la); 738 lacp_update_portmap(lsc); 739 } else 740 /* try to become the active aggregator */ 741 lacp_select_active_aggregator(lsc); 742 } 743 744 static void 745 lacp_transit_expire(void *vp) 746 { 747 struct lacp_softc *lsc = vp; 748 749 LACP_LOCK_ASSERT(lsc); 750 751 CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet); 752 LACP_TRACE(NULL); 753 CURVNET_RESTORE(); 754 755 lsc->lsc_suppress_distributing = FALSE; 756 } 757 758 void 759 lacp_attach(struct lagg_softc *sc) 760 { 761 struct lacp_softc *lsc; 762 763 lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); 764 765 sc->sc_psc = lsc; 766 lsc->lsc_softc = sc; 767 768 lsc->lsc_hashkey = m_ether_tcpip_hash_init(); 769 lsc->lsc_active_aggregator = NULL; 770 lsc->lsc_strict_mode = VNET(lacp_default_strict_mode); 771 LACP_LOCK_INIT(lsc); 772 TAILQ_INIT(&lsc->lsc_aggregators); 773 LIST_INIT(&lsc->lsc_ports); 774 775 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 776 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 777 778 /* if the lagg is already up then do the same */ 779 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 780 lacp_init(sc); 781 } 782 783 void 784 lacp_detach(void *psc) 785 { 786 struct lacp_softc *lsc = (struct lacp_softc *)psc; 787 788 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 789 ("aggregators still active")); 790 KASSERT(lsc->lsc_active_aggregator == NULL, 791 ("aggregator still attached")); 792 793 callout_drain(&lsc->lsc_transit_callout); 794 callout_drain(&lsc->lsc_callout); 795 796 LACP_LOCK_DESTROY(lsc); 797 free(lsc, M_DEVBUF); 798 } 799 800 void 801 lacp_init(struct lagg_softc *sc) 802 { 803 struct lacp_softc *lsc = LACP_SOFTC(sc); 804 805 LACP_LOCK(lsc); 806 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 807 LACP_UNLOCK(lsc); 808 } 809 810 void 811 lacp_stop(struct lagg_softc *sc) 812 { 813 struct lacp_softc *lsc = LACP_SOFTC(sc); 814 815 LACP_LOCK(lsc); 816 callout_stop(&lsc->lsc_transit_callout); 817 callout_stop(&lsc->lsc_callout); 818 LACP_UNLOCK(lsc); 819 } 820 821 struct lagg_port * 822 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) 823 { 824 struct lacp_softc *lsc = LACP_SOFTC(sc); 825 struct lacp_portmap *pm; 826 struct lacp_port *lp; 827 uint32_t hash; 828 829 if (__predict_false(lsc->lsc_suppress_distributing)) { 830 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 831 return (NULL); 832 } 833 834 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 835 if (pm->pm_count == 0) { 836 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 837 return (NULL); 838 } 839 840 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && 841 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 842 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 843 else 844 hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); 845 hash %= pm->pm_count; 846 lp = pm->pm_map[hash]; 847 848 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, 849 ("aggregated port is not distributing")); 850 851 return (lp->lp_lagg); 852 } 853 854 #ifdef RATELIMIT 855 struct lagg_port * 856 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid) 857 { 858 struct lacp_softc *lsc = LACP_SOFTC(sc); 859 struct lacp_portmap *pm; 860 struct lacp_port *lp; 861 uint32_t hash; 862 863 if (__predict_false(lsc->lsc_suppress_distributing)) { 864 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 865 return (NULL); 866 } 867 868 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 869 if (pm->pm_count == 0) { 870 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 871 return (NULL); 872 } 873 874 hash = flowid >> sc->flowid_shift; 875 hash %= pm->pm_count; 876 lp = pm->pm_map[hash]; 877 878 return (lp->lp_lagg); 879 } 880 #endif 881 882 /* 883 * lacp_suppress_distributing: drop transmit packets for a while 884 * to preserve packet ordering. 885 */ 886 887 static void 888 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 889 { 890 struct lacp_port *lp; 891 892 if (lsc->lsc_active_aggregator != la) { 893 return; 894 } 895 896 LACP_TRACE(NULL); 897 898 lsc->lsc_suppress_distributing = TRUE; 899 900 /* send a marker frame down each port to verify the queues are empty */ 901 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 902 lp->lp_flags |= LACP_PORT_MARK; 903 lacp_xmit_marker(lp); 904 } 905 906 /* set a timeout for the marker frames */ 907 callout_reset(&lsc->lsc_transit_callout, 908 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 909 } 910 911 static int 912 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 913 const struct lacp_peerinfo *b) 914 { 915 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 916 } 917 918 static int 919 lacp_compare_systemid(const struct lacp_systemid *a, 920 const struct lacp_systemid *b) 921 { 922 return (memcmp(a, b, sizeof(*a))); 923 } 924 925 #if 0 /* unused */ 926 static int 927 lacp_compare_portid(const struct lacp_portid *a, 928 const struct lacp_portid *b) 929 { 930 return (memcmp(a, b, sizeof(*a))); 931 } 932 #endif 933 934 static uint64_t 935 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 936 { 937 struct lacp_port *lp; 938 uint64_t speed; 939 940 lp = TAILQ_FIRST(&la->la_ports); 941 if (lp == NULL) { 942 return (0); 943 } 944 945 speed = ifmedia_baudrate(lp->lp_media); 946 speed *= la->la_nports; 947 if (speed == 0) { 948 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 949 lp->lp_media, la->la_nports)); 950 } 951 952 return (speed); 953 } 954 955 /* 956 * lacp_select_active_aggregator: select an aggregator to be used to transmit 957 * packets from lagg(4) interface. 958 */ 959 960 static void 961 lacp_select_active_aggregator(struct lacp_softc *lsc) 962 { 963 struct lacp_aggregator *la; 964 struct lacp_aggregator *best_la = NULL; 965 uint64_t best_speed = 0; 966 char buf[LACP_LAGIDSTR_MAX+1]; 967 968 LACP_TRACE(NULL); 969 970 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 971 uint64_t speed; 972 973 if (la->la_nports == 0) { 974 continue; 975 } 976 977 speed = lacp_aggregator_bandwidth(la); 978 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 979 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 980 speed, la->la_nports)); 981 982 /* 983 * This aggregator is chosen if the partner has a better 984 * system priority or, the total aggregated speed is higher 985 * or, it is already the chosen aggregator 986 */ 987 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 988 LACP_SYS_PRI(best_la->la_partner)) || 989 speed > best_speed || 990 (speed == best_speed && 991 la == lsc->lsc_active_aggregator)) { 992 best_la = la; 993 best_speed = speed; 994 } 995 } 996 997 KASSERT(best_la == NULL || best_la->la_nports > 0, 998 ("invalid aggregator refcnt")); 999 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1000 ("invalid aggregator list")); 1001 1002 if (lsc->lsc_active_aggregator != best_la) { 1003 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1004 LACP_DPRINTF((NULL, "old %s\n", 1005 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1006 buf, sizeof(buf)))); 1007 } else { 1008 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1009 } 1010 LACP_DPRINTF((NULL, "new %s\n", 1011 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1012 1013 if (lsc->lsc_active_aggregator != best_la) { 1014 lsc->lsc_active_aggregator = best_la; 1015 lacp_update_portmap(lsc); 1016 if (best_la) { 1017 lacp_suppress_distributing(lsc, best_la); 1018 } 1019 } 1020 } 1021 1022 /* 1023 * Updated the inactive portmap array with the new list of ports and 1024 * make it live. 1025 */ 1026 static void 1027 lacp_update_portmap(struct lacp_softc *lsc) 1028 { 1029 struct lagg_softc *sc = lsc->lsc_softc; 1030 struct lacp_aggregator *la; 1031 struct lacp_portmap *p; 1032 struct lacp_port *lp; 1033 uint64_t speed; 1034 u_int newmap; 1035 int i; 1036 1037 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1038 p = &lsc->lsc_pmap[newmap]; 1039 la = lsc->lsc_active_aggregator; 1040 speed = 0; 1041 bzero(p, sizeof(struct lacp_portmap)); 1042 1043 if (la != NULL && la->la_nports > 0) { 1044 p->pm_count = la->la_nports; 1045 i = 0; 1046 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) 1047 p->pm_map[i++] = lp; 1048 KASSERT(i == p->pm_count, ("Invalid port count")); 1049 speed = lacp_aggregator_bandwidth(la); 1050 } 1051 sc->sc_ifp->if_baudrate = speed; 1052 1053 /* switch the active portmap over */ 1054 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1055 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1056 lsc->lsc_activemap, 1057 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1058 } 1059 1060 static uint16_t 1061 lacp_compose_key(struct lacp_port *lp) 1062 { 1063 struct lagg_port *lgp = lp->lp_lagg; 1064 struct lagg_softc *sc = lgp->lp_softc; 1065 u_int media = lp->lp_media; 1066 uint16_t key; 1067 1068 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1069 1070 /* 1071 * non-aggregatable links should have unique keys. 1072 * 1073 * XXX this isn't really unique as if_index is 16 bit. 1074 */ 1075 1076 /* bit 0..14: (some bits of) if_index of this port */ 1077 key = lp->lp_ifp->if_index; 1078 /* bit 15: 1 */ 1079 key |= 0x8000; 1080 } else { 1081 u_int subtype = IFM_SUBTYPE(media); 1082 1083 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1084 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1085 1086 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1087 switch (subtype) { 1088 case IFM_10_T: 1089 case IFM_10_2: 1090 case IFM_10_5: 1091 case IFM_10_STP: 1092 case IFM_10_FL: 1093 key = IFM_10_T; 1094 break; 1095 case IFM_100_TX: 1096 case IFM_100_FX: 1097 case IFM_100_T4: 1098 case IFM_100_VG: 1099 case IFM_100_T2: 1100 case IFM_100_T: 1101 key = IFM_100_TX; 1102 break; 1103 case IFM_1000_SX: 1104 case IFM_1000_LX: 1105 case IFM_1000_CX: 1106 case IFM_1000_T: 1107 case IFM_1000_KX: 1108 case IFM_1000_SGMII: 1109 case IFM_1000_CX_SGMII: 1110 key = IFM_1000_SX; 1111 break; 1112 case IFM_10G_LR: 1113 case IFM_10G_SR: 1114 case IFM_10G_CX4: 1115 case IFM_10G_TWINAX: 1116 case IFM_10G_TWINAX_LONG: 1117 case IFM_10G_LRM: 1118 case IFM_10G_T: 1119 case IFM_10G_KX4: 1120 case IFM_10G_KR: 1121 case IFM_10G_CR1: 1122 case IFM_10G_ER: 1123 case IFM_10G_SFI: 1124 case IFM_10G_AOC: 1125 key = IFM_10G_LR; 1126 break; 1127 case IFM_20G_KR2: 1128 key = IFM_20G_KR2; 1129 break; 1130 case IFM_2500_KX: 1131 case IFM_2500_T: 1132 key = IFM_2500_KX; 1133 break; 1134 case IFM_5000_T: 1135 key = IFM_5000_T; 1136 break; 1137 case IFM_50G_PCIE: 1138 case IFM_50G_CR2: 1139 case IFM_50G_KR2: 1140 key = IFM_50G_PCIE; 1141 break; 1142 case IFM_56G_R4: 1143 key = IFM_56G_R4; 1144 break; 1145 case IFM_25G_PCIE: 1146 case IFM_25G_CR: 1147 case IFM_25G_KR: 1148 case IFM_25G_SR: 1149 case IFM_25G_LR: 1150 case IFM_25G_ACC: 1151 case IFM_25G_AOC: 1152 key = IFM_25G_PCIE; 1153 break; 1154 case IFM_40G_CR4: 1155 case IFM_40G_SR4: 1156 case IFM_40G_LR4: 1157 case IFM_40G_XLPPI: 1158 case IFM_40G_KR4: 1159 key = IFM_40G_CR4; 1160 break; 1161 case IFM_100G_CR4: 1162 case IFM_100G_SR4: 1163 case IFM_100G_KR4: 1164 case IFM_100G_LR4: 1165 key = IFM_100G_CR4; 1166 break; 1167 default: 1168 key = subtype; 1169 break; 1170 } 1171 /* bit 5..14: (some bits of) if_index of lagg device */ 1172 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1173 /* bit 15: 0 */ 1174 } 1175 return (htons(key)); 1176 } 1177 1178 static void 1179 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1180 { 1181 char buf[LACP_LAGIDSTR_MAX+1]; 1182 1183 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1184 __func__, 1185 lacp_format_lagid(&la->la_actor, &la->la_partner, 1186 buf, sizeof(buf)), 1187 la->la_refcnt, la->la_refcnt + 1)); 1188 1189 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1190 la->la_refcnt++; 1191 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1192 } 1193 1194 static void 1195 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1196 { 1197 char buf[LACP_LAGIDSTR_MAX+1]; 1198 1199 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1200 __func__, 1201 lacp_format_lagid(&la->la_actor, &la->la_partner, 1202 buf, sizeof(buf)), 1203 la->la_refcnt, la->la_refcnt - 1)); 1204 1205 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1206 la->la_refcnt--; 1207 if (la->la_refcnt > 0) { 1208 return; 1209 } 1210 1211 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1212 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1213 1214 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1215 1216 free(la, M_DEVBUF); 1217 } 1218 1219 /* 1220 * lacp_aggregator_get: allocate an aggregator. 1221 */ 1222 1223 static struct lacp_aggregator * 1224 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1225 { 1226 struct lacp_aggregator *la; 1227 1228 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1229 if (la) { 1230 la->la_refcnt = 1; 1231 la->la_nports = 0; 1232 TAILQ_INIT(&la->la_ports); 1233 la->la_pending = 0; 1234 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1235 } 1236 1237 return (la); 1238 } 1239 1240 /* 1241 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1242 */ 1243 1244 static void 1245 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1246 { 1247 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1248 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1249 1250 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1251 } 1252 1253 static void 1254 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1255 const struct lacp_peerinfo *lpi_port) 1256 { 1257 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1258 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1259 lpi_aggr->lip_key = lpi_port->lip_key; 1260 } 1261 1262 /* 1263 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1264 */ 1265 1266 static int 1267 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1268 const struct lacp_port *lp) 1269 { 1270 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1271 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1272 return (0); 1273 } 1274 1275 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1276 return (0); 1277 } 1278 1279 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1280 return (0); 1281 } 1282 1283 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1284 return (0); 1285 } 1286 1287 return (1); 1288 } 1289 1290 static int 1291 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1292 const struct lacp_peerinfo *b) 1293 { 1294 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1295 sizeof(a->lip_systemid))) { 1296 return (0); 1297 } 1298 1299 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1300 return (0); 1301 } 1302 1303 return (1); 1304 } 1305 1306 static void 1307 lacp_port_enable(struct lacp_port *lp) 1308 { 1309 lp->lp_state |= LACP_STATE_AGGREGATION; 1310 } 1311 1312 static void 1313 lacp_port_disable(struct lacp_port *lp) 1314 { 1315 lacp_set_mux(lp, LACP_MUX_DETACHED); 1316 1317 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1318 lp->lp_selected = LACP_UNSELECTED; 1319 lacp_sm_rx_record_default(lp); 1320 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1321 lp->lp_state &= ~LACP_STATE_EXPIRED; 1322 } 1323 1324 /* 1325 * lacp_select: select an aggregator. create one if necessary. 1326 */ 1327 static void 1328 lacp_select(struct lacp_port *lp) 1329 { 1330 struct lacp_softc *lsc = lp->lp_lsc; 1331 struct lacp_aggregator *la; 1332 char buf[LACP_LAGIDSTR_MAX+1]; 1333 1334 if (lp->lp_aggregator) { 1335 return; 1336 } 1337 1338 /* If we haven't heard from our peer, skip this step. */ 1339 if (lp->lp_state & LACP_STATE_DEFAULTED) 1340 return; 1341 1342 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1343 ("timer_wait_while still active")); 1344 1345 LACP_DPRINTF((lp, "port lagid=%s\n", 1346 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1347 buf, sizeof(buf)))); 1348 1349 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1350 if (lacp_aggregator_is_compatible(la, lp)) { 1351 break; 1352 } 1353 } 1354 1355 if (la == NULL) { 1356 la = lacp_aggregator_get(lsc, lp); 1357 if (la == NULL) { 1358 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1359 1360 /* 1361 * will retry on the next tick. 1362 */ 1363 1364 return; 1365 } 1366 lacp_fill_aggregator_id(la, lp); 1367 LACP_DPRINTF((lp, "aggregator created\n")); 1368 } else { 1369 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1370 if (la->la_refcnt == LACP_MAX_PORTS) 1371 return; 1372 lacp_aggregator_addref(lsc, la); 1373 } 1374 1375 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1376 lacp_format_lagid(&la->la_actor, &la->la_partner, 1377 buf, sizeof(buf)))); 1378 1379 lp->lp_aggregator = la; 1380 lp->lp_selected = LACP_SELECTED; 1381 } 1382 1383 /* 1384 * lacp_unselect: finish unselect/detach process. 1385 */ 1386 1387 static void 1388 lacp_unselect(struct lacp_port *lp) 1389 { 1390 struct lacp_softc *lsc = lp->lp_lsc; 1391 struct lacp_aggregator *la = lp->lp_aggregator; 1392 1393 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1394 ("timer_wait_while still active")); 1395 1396 if (la == NULL) { 1397 return; 1398 } 1399 1400 lp->lp_aggregator = NULL; 1401 lacp_aggregator_delref(lsc, la); 1402 } 1403 1404 /* mux machine */ 1405 1406 static void 1407 lacp_sm_mux(struct lacp_port *lp) 1408 { 1409 struct lagg_port *lgp = lp->lp_lagg; 1410 struct lagg_softc *sc = lgp->lp_softc; 1411 enum lacp_mux_state new_state; 1412 boolean_t p_sync = 1413 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1414 boolean_t p_collecting = 1415 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1416 enum lacp_selected selected = lp->lp_selected; 1417 struct lacp_aggregator *la; 1418 1419 if (V_lacp_debug > 1) 1420 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1421 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1422 lp->lp_mux_state, selected, p_sync, p_collecting); 1423 1424 re_eval: 1425 la = lp->lp_aggregator; 1426 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1427 ("MUX not detached")); 1428 new_state = lp->lp_mux_state; 1429 switch (lp->lp_mux_state) { 1430 case LACP_MUX_DETACHED: 1431 if (selected != LACP_UNSELECTED) { 1432 new_state = LACP_MUX_WAITING; 1433 } 1434 break; 1435 case LACP_MUX_WAITING: 1436 KASSERT(la->la_pending > 0 || 1437 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1438 ("timer_wait_while still active")); 1439 if (selected == LACP_SELECTED && la->la_pending == 0) { 1440 new_state = LACP_MUX_ATTACHED; 1441 } else if (selected == LACP_UNSELECTED) { 1442 new_state = LACP_MUX_DETACHED; 1443 } 1444 break; 1445 case LACP_MUX_ATTACHED: 1446 if (selected == LACP_SELECTED && p_sync) { 1447 new_state = LACP_MUX_COLLECTING; 1448 } else if (selected != LACP_SELECTED) { 1449 new_state = LACP_MUX_DETACHED; 1450 } 1451 break; 1452 case LACP_MUX_COLLECTING: 1453 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1454 new_state = LACP_MUX_DISTRIBUTING; 1455 } else if (selected != LACP_SELECTED || !p_sync) { 1456 new_state = LACP_MUX_ATTACHED; 1457 } 1458 break; 1459 case LACP_MUX_DISTRIBUTING: 1460 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1461 new_state = LACP_MUX_COLLECTING; 1462 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1463 sc->sc_flapping++; 1464 } 1465 break; 1466 default: 1467 panic("%s: unknown state", __func__); 1468 } 1469 1470 if (lp->lp_mux_state == new_state) { 1471 return; 1472 } 1473 1474 lacp_set_mux(lp, new_state); 1475 goto re_eval; 1476 } 1477 1478 static void 1479 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1480 { 1481 struct lacp_aggregator *la = lp->lp_aggregator; 1482 1483 if (lp->lp_mux_state == new_state) { 1484 return; 1485 } 1486 1487 switch (new_state) { 1488 case LACP_MUX_DETACHED: 1489 lp->lp_state &= ~LACP_STATE_SYNC; 1490 lacp_disable_distributing(lp); 1491 lacp_disable_collecting(lp); 1492 lacp_sm_assert_ntt(lp); 1493 /* cancel timer */ 1494 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1495 KASSERT(la->la_pending > 0, 1496 ("timer_wait_while not active")); 1497 la->la_pending--; 1498 } 1499 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1500 lacp_unselect(lp); 1501 break; 1502 case LACP_MUX_WAITING: 1503 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1504 LACP_AGGREGATE_WAIT_TIME); 1505 la->la_pending++; 1506 break; 1507 case LACP_MUX_ATTACHED: 1508 lp->lp_state |= LACP_STATE_SYNC; 1509 lacp_disable_collecting(lp); 1510 lacp_sm_assert_ntt(lp); 1511 break; 1512 case LACP_MUX_COLLECTING: 1513 lacp_enable_collecting(lp); 1514 lacp_disable_distributing(lp); 1515 lacp_sm_assert_ntt(lp); 1516 break; 1517 case LACP_MUX_DISTRIBUTING: 1518 lacp_enable_distributing(lp); 1519 break; 1520 default: 1521 panic("%s: unknown state", __func__); 1522 } 1523 1524 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1525 1526 lp->lp_mux_state = new_state; 1527 } 1528 1529 static void 1530 lacp_sm_mux_timer(struct lacp_port *lp) 1531 { 1532 struct lacp_aggregator *la = lp->lp_aggregator; 1533 char buf[LACP_LAGIDSTR_MAX+1]; 1534 1535 KASSERT(la->la_pending > 0, ("no pending event")); 1536 1537 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1538 lacp_format_lagid(&la->la_actor, &la->la_partner, 1539 buf, sizeof(buf)), 1540 la->la_pending, la->la_pending - 1)); 1541 1542 la->la_pending--; 1543 } 1544 1545 /* periodic transmit machine */ 1546 1547 static void 1548 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1549 { 1550 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1551 LACP_STATE_TIMEOUT)) { 1552 return; 1553 } 1554 1555 LACP_DPRINTF((lp, "partner timeout changed\n")); 1556 1557 /* 1558 * FAST_PERIODIC -> SLOW_PERIODIC 1559 * or 1560 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1561 * 1562 * let lacp_sm_ptx_tx_schedule to update timeout. 1563 */ 1564 1565 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1566 1567 /* 1568 * if timeout has been shortened, assert NTT. 1569 */ 1570 1571 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1572 lacp_sm_assert_ntt(lp); 1573 } 1574 } 1575 1576 static void 1577 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1578 { 1579 int timeout; 1580 1581 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1582 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1583 1584 /* 1585 * NO_PERIODIC 1586 */ 1587 1588 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1589 return; 1590 } 1591 1592 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1593 return; 1594 } 1595 1596 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1597 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1598 1599 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1600 } 1601 1602 static void 1603 lacp_sm_ptx_timer(struct lacp_port *lp) 1604 { 1605 lacp_sm_assert_ntt(lp); 1606 } 1607 1608 static void 1609 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1610 { 1611 int timeout; 1612 1613 /* 1614 * check LACP_DISABLED first 1615 */ 1616 1617 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1618 return; 1619 } 1620 1621 /* 1622 * check loopback condition. 1623 */ 1624 1625 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1626 &lp->lp_actor.lip_systemid)) { 1627 return; 1628 } 1629 1630 /* 1631 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1632 */ 1633 1634 lacp_sm_rx_update_selected(lp, du); 1635 lacp_sm_rx_update_ntt(lp, du); 1636 lacp_sm_rx_record_pdu(lp, du); 1637 1638 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1639 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1640 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1641 1642 lp->lp_state &= ~LACP_STATE_EXPIRED; 1643 1644 /* 1645 * kick transmit machine without waiting the next tick. 1646 */ 1647 1648 lacp_sm_tx(lp); 1649 } 1650 1651 static void 1652 lacp_sm_rx_set_expired(struct lacp_port *lp) 1653 { 1654 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1655 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1656 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1657 lp->lp_state |= LACP_STATE_EXPIRED; 1658 } 1659 1660 static void 1661 lacp_sm_rx_timer(struct lacp_port *lp) 1662 { 1663 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1664 /* CURRENT -> EXPIRED */ 1665 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1666 lacp_sm_rx_set_expired(lp); 1667 } else { 1668 /* EXPIRED -> DEFAULTED */ 1669 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1670 lacp_sm_rx_update_default_selected(lp); 1671 lacp_sm_rx_record_default(lp); 1672 lp->lp_state &= ~LACP_STATE_EXPIRED; 1673 } 1674 } 1675 1676 static void 1677 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1678 { 1679 boolean_t active; 1680 uint8_t oldpstate; 1681 char buf[LACP_STATESTR_MAX+1]; 1682 1683 LACP_TRACE(lp); 1684 1685 oldpstate = lp->lp_partner.lip_state; 1686 1687 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1688 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1689 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1690 1691 lp->lp_partner = du->ldu_actor; 1692 if (active && 1693 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1694 LACP_STATE_AGGREGATION) && 1695 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1696 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1697 /* 1698 * XXX Maintain legacy behavior of leaving the 1699 * LACP_STATE_SYNC bit unchanged from the partner's 1700 * advertisement if lsc_strict_mode is false. 1701 * TODO: We should re-examine the concept of the "strict mode" 1702 * to ensure it makes sense to maintain a non-strict mode. 1703 */ 1704 if (lp->lp_lsc->lsc_strict_mode) 1705 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1706 } else { 1707 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1708 } 1709 1710 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1711 1712 if (oldpstate != lp->lp_partner.lip_state) { 1713 LACP_DPRINTF((lp, "old pstate %s\n", 1714 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1715 LACP_DPRINTF((lp, "new pstate %s\n", 1716 lacp_format_state(lp->lp_partner.lip_state, buf, 1717 sizeof(buf)))); 1718 } 1719 1720 lacp_sm_ptx_update_timeout(lp, oldpstate); 1721 } 1722 1723 static void 1724 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1725 { 1726 1727 LACP_TRACE(lp); 1728 1729 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1730 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1731 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1732 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1733 lacp_sm_assert_ntt(lp); 1734 } 1735 } 1736 1737 static void 1738 lacp_sm_rx_record_default(struct lacp_port *lp) 1739 { 1740 uint8_t oldpstate; 1741 1742 LACP_TRACE(lp); 1743 1744 oldpstate = lp->lp_partner.lip_state; 1745 if (lp->lp_lsc->lsc_strict_mode) 1746 lp->lp_partner = lacp_partner_admin_strict; 1747 else 1748 lp->lp_partner = lacp_partner_admin_optimistic; 1749 lp->lp_state |= LACP_STATE_DEFAULTED; 1750 lacp_sm_ptx_update_timeout(lp, oldpstate); 1751 } 1752 1753 static void 1754 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1755 const struct lacp_peerinfo *info) 1756 { 1757 1758 LACP_TRACE(lp); 1759 1760 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1761 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1762 LACP_STATE_AGGREGATION)) { 1763 lp->lp_selected = LACP_UNSELECTED; 1764 /* mux machine will clean up lp->lp_aggregator */ 1765 } 1766 } 1767 1768 static void 1769 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1770 { 1771 1772 LACP_TRACE(lp); 1773 1774 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1775 } 1776 1777 static void 1778 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1779 { 1780 1781 LACP_TRACE(lp); 1782 1783 if (lp->lp_lsc->lsc_strict_mode) 1784 lacp_sm_rx_update_selected_from_peerinfo(lp, 1785 &lacp_partner_admin_strict); 1786 else 1787 lacp_sm_rx_update_selected_from_peerinfo(lp, 1788 &lacp_partner_admin_optimistic); 1789 } 1790 1791 /* transmit machine */ 1792 1793 static void 1794 lacp_sm_tx(struct lacp_port *lp) 1795 { 1796 int error = 0; 1797 1798 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1799 #if 1 1800 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1801 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1802 #endif 1803 ) { 1804 lp->lp_flags &= ~LACP_PORT_NTT; 1805 } 1806 1807 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1808 return; 1809 } 1810 1811 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1812 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1813 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1814 LACP_DPRINTF((lp, "rate limited pdu\n")); 1815 return; 1816 } 1817 1818 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1819 error = lacp_xmit_lacpdu(lp); 1820 } else { 1821 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1822 } 1823 1824 if (error == 0) { 1825 lp->lp_flags &= ~LACP_PORT_NTT; 1826 } else { 1827 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1828 error)); 1829 } 1830 } 1831 1832 static void 1833 lacp_sm_assert_ntt(struct lacp_port *lp) 1834 { 1835 1836 lp->lp_flags |= LACP_PORT_NTT; 1837 } 1838 1839 static void 1840 lacp_run_timers(struct lacp_port *lp) 1841 { 1842 int i; 1843 1844 for (i = 0; i < LACP_NTIMER; i++) { 1845 KASSERT(lp->lp_timer[i] >= 0, 1846 ("invalid timer value %d", lp->lp_timer[i])); 1847 if (lp->lp_timer[i] == 0) { 1848 continue; 1849 } else if (--lp->lp_timer[i] <= 0) { 1850 if (lacp_timer_funcs[i]) { 1851 (*lacp_timer_funcs[i])(lp); 1852 } 1853 } 1854 } 1855 } 1856 1857 int 1858 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1859 { 1860 struct lacp_softc *lsc = lp->lp_lsc; 1861 struct lagg_port *lgp = lp->lp_lagg; 1862 struct lacp_port *lp2; 1863 struct markerdu *mdu; 1864 int error = 0; 1865 int pending = 0; 1866 1867 if (m->m_pkthdr.len != sizeof(*mdu)) { 1868 goto bad; 1869 } 1870 1871 if ((m->m_flags & M_MCAST) == 0) { 1872 goto bad; 1873 } 1874 1875 if (m->m_len < sizeof(*mdu)) { 1876 m = m_pullup(m, sizeof(*mdu)); 1877 if (m == NULL) { 1878 return (ENOMEM); 1879 } 1880 } 1881 1882 mdu = mtod(m, struct markerdu *); 1883 1884 if (memcmp(&mdu->mdu_eh.ether_dhost, 1885 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1886 goto bad; 1887 } 1888 1889 if (mdu->mdu_sph.sph_version != 1) { 1890 goto bad; 1891 } 1892 1893 switch (mdu->mdu_tlv.tlv_type) { 1894 case MARKER_TYPE_INFO: 1895 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1896 marker_info_tlv_template, TRUE)) { 1897 goto bad; 1898 } 1899 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 1900 memcpy(&mdu->mdu_eh.ether_dhost, 1901 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 1902 memcpy(&mdu->mdu_eh.ether_shost, 1903 lgp->lp_lladdr, ETHER_ADDR_LEN); 1904 error = lagg_enqueue(lp->lp_ifp, m); 1905 break; 1906 1907 case MARKER_TYPE_RESPONSE: 1908 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1909 marker_response_tlv_template, TRUE)) { 1910 goto bad; 1911 } 1912 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 1913 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 1914 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 1915 1916 /* Verify that it is the last marker we sent out */ 1917 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 1918 sizeof(struct lacp_markerinfo))) 1919 goto bad; 1920 1921 LACP_LOCK(lsc); 1922 lp->lp_flags &= ~LACP_PORT_MARK; 1923 1924 if (lsc->lsc_suppress_distributing) { 1925 /* Check if any ports are waiting for a response */ 1926 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 1927 if (lp2->lp_flags & LACP_PORT_MARK) { 1928 pending = 1; 1929 break; 1930 } 1931 } 1932 1933 if (pending == 0) { 1934 /* All interface queues are clear */ 1935 LACP_DPRINTF((NULL, "queue flush complete\n")); 1936 lsc->lsc_suppress_distributing = FALSE; 1937 } 1938 } 1939 LACP_UNLOCK(lsc); 1940 m_freem(m); 1941 break; 1942 1943 default: 1944 goto bad; 1945 } 1946 1947 return (error); 1948 1949 bad: 1950 LACP_DPRINTF((lp, "bad marker frame\n")); 1951 m_freem(m); 1952 return (EINVAL); 1953 } 1954 1955 static int 1956 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 1957 const struct tlv_template *tmpl, boolean_t check_type) 1958 { 1959 while (/* CONSTCOND */ 1) { 1960 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 1961 return (EINVAL); 1962 } 1963 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 1964 tlv->tlv_length != tmpl->tmpl_length) { 1965 return (EINVAL); 1966 } 1967 if (tmpl->tmpl_type == 0) { 1968 break; 1969 } 1970 tlv = (const struct tlvhdr *) 1971 ((const char *)tlv + tlv->tlv_length); 1972 tmpl++; 1973 } 1974 1975 return (0); 1976 } 1977 1978 /* Debugging */ 1979 const char * 1980 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 1981 { 1982 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 1983 (int)mac[0], 1984 (int)mac[1], 1985 (int)mac[2], 1986 (int)mac[3], 1987 (int)mac[4], 1988 (int)mac[5]); 1989 1990 return (buf); 1991 } 1992 1993 const char * 1994 lacp_format_systemid(const struct lacp_systemid *sysid, 1995 char *buf, size_t buflen) 1996 { 1997 char macbuf[LACP_MACSTR_MAX+1]; 1998 1999 snprintf(buf, buflen, "%04X,%s", 2000 ntohs(sysid->lsi_prio), 2001 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 2002 2003 return (buf); 2004 } 2005 2006 const char * 2007 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 2008 { 2009 snprintf(buf, buflen, "%04X,%04X", 2010 ntohs(portid->lpi_prio), 2011 ntohs(portid->lpi_portno)); 2012 2013 return (buf); 2014 } 2015 2016 const char * 2017 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 2018 { 2019 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 2020 char portid[LACP_PORTIDSTR_MAX+1]; 2021 2022 snprintf(buf, buflen, "(%s,%04X,%s)", 2023 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 2024 ntohs(peer->lip_key), 2025 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2026 2027 return (buf); 2028 } 2029 2030 const char * 2031 lacp_format_lagid(const struct lacp_peerinfo *a, 2032 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2033 { 2034 char astr[LACP_PARTNERSTR_MAX+1]; 2035 char bstr[LACP_PARTNERSTR_MAX+1]; 2036 2037 #if 0 2038 /* 2039 * there's a convention to display small numbered peer 2040 * in the left. 2041 */ 2042 2043 if (lacp_compare_peerinfo(a, b) > 0) { 2044 const struct lacp_peerinfo *t; 2045 2046 t = a; 2047 a = b; 2048 b = t; 2049 } 2050 #endif 2051 2052 snprintf(buf, buflen, "[%s,%s]", 2053 lacp_format_partner(a, astr, sizeof(astr)), 2054 lacp_format_partner(b, bstr, sizeof(bstr))); 2055 2056 return (buf); 2057 } 2058 2059 const char * 2060 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2061 char *buf, size_t buflen) 2062 { 2063 if (la == NULL) { 2064 return ("(none)"); 2065 } 2066 2067 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2068 } 2069 2070 const char * 2071 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2072 { 2073 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2074 return (buf); 2075 } 2076 2077 static void 2078 lacp_dump_lacpdu(const struct lacpdu *du) 2079 { 2080 char buf[LACP_PARTNERSTR_MAX+1]; 2081 char buf2[LACP_STATESTR_MAX+1]; 2082 2083 printf("actor=%s\n", 2084 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2085 printf("actor.state=%s\n", 2086 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2087 printf("partner=%s\n", 2088 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2089 printf("partner.state=%s\n", 2090 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2091 2092 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2093 } 2094 2095 static void 2096 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2097 { 2098 va_list va; 2099 2100 if (lp) { 2101 printf("%s: ", lp->lp_ifp->if_xname); 2102 } 2103 2104 va_start(va, fmt); 2105 vprintf(fmt, va); 2106 va_end(va); 2107 } 2108