1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ */ 2 3 /*- 4 * Copyright (c)2005 YAMAMOTO Takashi, 5 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/callout.h> 35 #include <sys/eventhandler.h> 36 #include <sys/mbuf.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> /* hz */ 40 #include <sys/socket.h> /* for net/if.h */ 41 #include <sys/sockio.h> 42 #include <sys/sysctl.h> 43 #include <machine/stdarg.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/taskqueue.h> 47 48 #include <net/if.h> 49 #include <net/if_var.h> 50 #include <net/if_dl.h> 51 #include <net/ethernet.h> 52 #include <net/if_media.h> 53 #include <net/if_types.h> 54 55 #include <net/if_lagg.h> 56 #include <net/ieee8023ad_lacp.h> 57 58 /* 59 * actor system priority and port priority. 60 * XXX should be configurable. 61 */ 62 63 #define LACP_SYSTEM_PRIO 0x8000 64 #define LACP_PORT_PRIO 0x8000 65 66 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 67 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 68 69 static const struct tlv_template lacp_info_tlv_template[] = { 70 { LACP_TYPE_ACTORINFO, 71 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 72 { LACP_TYPE_PARTNERINFO, 73 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 74 { LACP_TYPE_COLLECTORINFO, 75 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 76 { 0, 0 }, 77 }; 78 79 static const struct tlv_template marker_info_tlv_template[] = { 80 { MARKER_TYPE_INFO, 81 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 82 { 0, 0 }, 83 }; 84 85 static const struct tlv_template marker_response_tlv_template[] = { 86 { MARKER_TYPE_RESPONSE, 87 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 88 { 0, 0 }, 89 }; 90 91 typedef void (*lacp_timer_func_t)(struct lacp_port *); 92 93 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 94 static void lacp_fill_markerinfo(struct lacp_port *, 95 struct lacp_markerinfo *); 96 97 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 98 static void lacp_suppress_distributing(struct lacp_softc *, 99 struct lacp_aggregator *); 100 static void lacp_transit_expire(void *); 101 static void lacp_update_portmap(struct lacp_softc *); 102 static void lacp_select_active_aggregator(struct lacp_softc *); 103 static uint16_t lacp_compose_key(struct lacp_port *); 104 static int tlv_check(const void *, size_t, const struct tlvhdr *, 105 const struct tlv_template *, boolean_t); 106 static void lacp_tick(void *); 107 108 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 109 const struct lacp_port *); 110 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 111 const struct lacp_peerinfo *); 112 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 113 const struct lacp_port *); 114 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 115 const struct lacp_peerinfo *); 116 117 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 118 struct lacp_port *); 119 static void lacp_aggregator_addref(struct lacp_softc *, 120 struct lacp_aggregator *); 121 static void lacp_aggregator_delref(struct lacp_softc *, 122 struct lacp_aggregator *); 123 124 /* receive machine */ 125 126 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 127 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 128 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 129 static void lacp_sm_rx_timer(struct lacp_port *); 130 static void lacp_sm_rx_set_expired(struct lacp_port *); 131 static void lacp_sm_rx_update_ntt(struct lacp_port *, 132 const struct lacpdu *); 133 static void lacp_sm_rx_record_pdu(struct lacp_port *, 134 const struct lacpdu *); 135 static void lacp_sm_rx_update_selected(struct lacp_port *, 136 const struct lacpdu *); 137 static void lacp_sm_rx_record_default(struct lacp_port *); 138 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 139 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 140 const struct lacp_peerinfo *); 141 142 /* mux machine */ 143 144 static void lacp_sm_mux(struct lacp_port *); 145 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 146 static void lacp_sm_mux_timer(struct lacp_port *); 147 148 /* periodic transmit machine */ 149 150 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 151 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 152 static void lacp_sm_ptx_timer(struct lacp_port *); 153 154 /* transmit machine */ 155 156 static void lacp_sm_tx(struct lacp_port *); 157 static void lacp_sm_assert_ntt(struct lacp_port *); 158 159 static void lacp_run_timers(struct lacp_port *); 160 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 161 const struct lacp_peerinfo *); 162 static int lacp_compare_systemid(const struct lacp_systemid *, 163 const struct lacp_systemid *); 164 static void lacp_port_enable(struct lacp_port *); 165 static void lacp_port_disable(struct lacp_port *); 166 static void lacp_select(struct lacp_port *); 167 static void lacp_unselect(struct lacp_port *); 168 static void lacp_disable_collecting(struct lacp_port *); 169 static void lacp_enable_collecting(struct lacp_port *); 170 static void lacp_disable_distributing(struct lacp_port *); 171 static void lacp_enable_distributing(struct lacp_port *); 172 static int lacp_xmit_lacpdu(struct lacp_port *); 173 static int lacp_xmit_marker(struct lacp_port *); 174 175 /* Debugging */ 176 177 static void lacp_dump_lacpdu(const struct lacpdu *); 178 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 179 size_t); 180 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 181 const struct lacp_peerinfo *, char *, size_t); 182 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 183 char *, size_t); 184 static const char *lacp_format_state(uint8_t, char *, size_t); 185 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 186 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 187 size_t); 188 static const char *lacp_format_portid(const struct lacp_portid *, char *, 189 size_t); 190 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 191 __attribute__((__format__(__printf__, 2, 3))); 192 193 static int lacp_debug = 0; 194 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); 195 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_TUN, 196 &lacp_debug, 0, "Enable LACP debug logging (1=debug, 2=trace)"); 197 TUNABLE_INT("net.link.lagg.lacp.debug", &lacp_debug); 198 199 #define LACP_DPRINTF(a) if (lacp_debug & 0x01) { lacp_dprintf a ; } 200 #define LACP_TRACE(a) if (lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 201 #define LACP_TPRINTF(a) if (lacp_debug & 0x04) { lacp_dprintf a ; } 202 203 /* 204 * partner administration variables. 205 * XXX should be configurable. 206 */ 207 208 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 209 .lip_systemid = { .lsi_prio = 0xffff }, 210 .lip_portid = { .lpi_prio = 0xffff }, 211 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 212 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 213 }; 214 215 static const struct lacp_peerinfo lacp_partner_admin_strict = { 216 .lip_systemid = { .lsi_prio = 0xffff }, 217 .lip_portid = { .lpi_prio = 0xffff }, 218 .lip_state = 0, 219 }; 220 221 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 222 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 223 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 224 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 225 }; 226 227 struct mbuf * 228 lacp_input(struct lagg_port *lgp, struct mbuf *m) 229 { 230 struct lacp_port *lp = LACP_PORT(lgp); 231 uint8_t subtype; 232 233 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 234 m_freem(m); 235 return (NULL); 236 } 237 238 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 239 switch (subtype) { 240 case SLOWPROTOCOLS_SUBTYPE_LACP: 241 lacp_pdu_input(lp, m); 242 return (NULL); 243 244 case SLOWPROTOCOLS_SUBTYPE_MARKER: 245 lacp_marker_input(lp, m); 246 return (NULL); 247 } 248 249 /* Not a subtype we are interested in */ 250 return (m); 251 } 252 253 /* 254 * lacp_pdu_input: process lacpdu 255 */ 256 static int 257 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 258 { 259 struct lacp_softc *lsc = lp->lp_lsc; 260 struct lacpdu *du; 261 int error = 0; 262 263 if (m->m_pkthdr.len != sizeof(*du)) { 264 goto bad; 265 } 266 267 if ((m->m_flags & M_MCAST) == 0) { 268 goto bad; 269 } 270 271 if (m->m_len < sizeof(*du)) { 272 m = m_pullup(m, sizeof(*du)); 273 if (m == NULL) { 274 return (ENOMEM); 275 } 276 } 277 278 du = mtod(m, struct lacpdu *); 279 280 if (memcmp(&du->ldu_eh.ether_dhost, 281 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 282 goto bad; 283 } 284 285 /* 286 * ignore the version for compatibility with 287 * the future protocol revisions. 288 */ 289 #if 0 290 if (du->ldu_sph.sph_version != 1) { 291 goto bad; 292 } 293 #endif 294 295 /* 296 * ignore tlv types for compatibility with 297 * the future protocol revisions. 298 */ 299 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 300 lacp_info_tlv_template, FALSE)) { 301 goto bad; 302 } 303 304 if (lacp_debug > 0) { 305 lacp_dprintf(lp, "lacpdu receive\n"); 306 lacp_dump_lacpdu(du); 307 } 308 309 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 310 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 311 goto bad; 312 } 313 314 LACP_LOCK(lsc); 315 lacp_sm_rx(lp, du); 316 LACP_UNLOCK(lsc); 317 318 m_freem(m); 319 return (error); 320 321 bad: 322 m_freem(m); 323 return (EINVAL); 324 } 325 326 static void 327 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 328 { 329 struct lagg_port *lgp = lp->lp_lagg; 330 struct lagg_softc *sc = lgp->lp_softc; 331 332 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 333 memcpy(&info->lip_systemid.lsi_mac, 334 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 335 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 336 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 337 info->lip_state = lp->lp_state; 338 } 339 340 static void 341 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 342 { 343 struct ifnet *ifp = lp->lp_ifp; 344 345 /* Fill in the port index and system id (encoded as the MAC) */ 346 info->mi_rq_port = htons(ifp->if_index); 347 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 348 info->mi_rq_xid = htonl(0); 349 } 350 351 static int 352 lacp_xmit_lacpdu(struct lacp_port *lp) 353 { 354 struct lagg_port *lgp = lp->lp_lagg; 355 struct mbuf *m; 356 struct lacpdu *du; 357 int error; 358 359 LACP_LOCK_ASSERT(lp->lp_lsc); 360 361 m = m_gethdr(M_NOWAIT, MT_DATA); 362 if (m == NULL) { 363 return (ENOMEM); 364 } 365 m->m_len = m->m_pkthdr.len = sizeof(*du); 366 367 du = mtod(m, struct lacpdu *); 368 memset(du, 0, sizeof(*du)); 369 370 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 371 ETHER_ADDR_LEN); 372 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 373 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 374 375 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 376 du->ldu_sph.sph_version = 1; 377 378 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 379 du->ldu_actor = lp->lp_actor; 380 381 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 382 sizeof(du->ldu_partner)); 383 du->ldu_partner = lp->lp_partner; 384 385 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 386 sizeof(du->ldu_collector)); 387 du->ldu_collector.lci_maxdelay = 0; 388 389 if (lacp_debug > 0) { 390 lacp_dprintf(lp, "lacpdu transmit\n"); 391 lacp_dump_lacpdu(du); 392 } 393 394 m->m_flags |= M_MCAST; 395 396 /* 397 * XXX should use higher priority queue. 398 * otherwise network congestion can break aggregation. 399 */ 400 401 error = lagg_enqueue(lp->lp_ifp, m); 402 return (error); 403 } 404 405 static int 406 lacp_xmit_marker(struct lacp_port *lp) 407 { 408 struct lagg_port *lgp = lp->lp_lagg; 409 struct mbuf *m; 410 struct markerdu *mdu; 411 int error; 412 413 LACP_LOCK_ASSERT(lp->lp_lsc); 414 415 m = m_gethdr(M_NOWAIT, MT_DATA); 416 if (m == NULL) { 417 return (ENOMEM); 418 } 419 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 420 421 mdu = mtod(m, struct markerdu *); 422 memset(mdu, 0, sizeof(*mdu)); 423 424 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 425 ETHER_ADDR_LEN); 426 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 427 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 428 429 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 430 mdu->mdu_sph.sph_version = 1; 431 432 /* Bump the transaction id and copy over the marker info */ 433 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 434 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 435 mdu->mdu_info = lp->lp_marker; 436 437 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n", 438 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":", 439 ntohl(mdu->mdu_info.mi_rq_xid))); 440 441 m->m_flags |= M_MCAST; 442 error = lagg_enqueue(lp->lp_ifp, m); 443 return (error); 444 } 445 446 void 447 lacp_linkstate(struct lagg_port *lgp) 448 { 449 struct lacp_port *lp = LACP_PORT(lgp); 450 struct lacp_softc *lsc = lp->lp_lsc; 451 struct ifnet *ifp = lgp->lp_ifp; 452 struct ifmediareq ifmr; 453 int error = 0; 454 u_int media; 455 uint8_t old_state; 456 uint16_t old_key; 457 458 bzero((char *)&ifmr, sizeof(ifmr)); 459 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr); 460 if (error != 0) 461 return; 462 463 LACP_LOCK(lsc); 464 media = ifmr.ifm_active; 465 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 466 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 467 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 468 old_state = lp->lp_state; 469 old_key = lp->lp_key; 470 471 lp->lp_media = media; 472 /* 473 * If the port is not an active full duplex Ethernet link then it can 474 * not be aggregated. 475 */ 476 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 477 ifp->if_link_state != LINK_STATE_UP) { 478 lacp_port_disable(lp); 479 } else { 480 lacp_port_enable(lp); 481 } 482 lp->lp_key = lacp_compose_key(lp); 483 484 if (old_state != lp->lp_state || old_key != lp->lp_key) { 485 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 486 lp->lp_selected = LACP_UNSELECTED; 487 } 488 LACP_UNLOCK(lsc); 489 } 490 491 static void 492 lacp_tick(void *arg) 493 { 494 struct lacp_softc *lsc = arg; 495 struct lacp_port *lp; 496 497 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 498 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 499 continue; 500 501 lacp_run_timers(lp); 502 503 lacp_select(lp); 504 lacp_sm_mux(lp); 505 lacp_sm_tx(lp); 506 lacp_sm_ptx_tx_schedule(lp); 507 } 508 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 509 } 510 511 int 512 lacp_port_create(struct lagg_port *lgp) 513 { 514 struct lagg_softc *sc = lgp->lp_softc; 515 struct lacp_softc *lsc = LACP_SOFTC(sc); 516 struct lacp_port *lp; 517 struct ifnet *ifp = lgp->lp_ifp; 518 struct sockaddr_dl sdl; 519 struct ifmultiaddr *rifma = NULL; 520 int error; 521 522 boolean_t active = TRUE; /* XXX should be configurable */ 523 boolean_t fast = FALSE; /* XXX should be configurable */ 524 525 link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER); 526 sdl.sdl_alen = ETHER_ADDR_LEN; 527 528 bcopy(ðermulticastaddr_slowprotocols, 529 LLADDR(&sdl), ETHER_ADDR_LEN); 530 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 531 if (error) { 532 printf("%s: ADDMULTI failed on %s\n", __func__, lgp->lp_ifname); 533 return (error); 534 } 535 536 lp = malloc(sizeof(struct lacp_port), 537 M_DEVBUF, M_NOWAIT|M_ZERO); 538 if (lp == NULL) 539 return (ENOMEM); 540 541 LACP_LOCK(lsc); 542 lgp->lp_psc = (caddr_t)lp; 543 lp->lp_ifp = ifp; 544 lp->lp_lagg = lgp; 545 lp->lp_lsc = lsc; 546 lp->lp_ifma = rifma; 547 548 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 549 550 lacp_fill_actorinfo(lp, &lp->lp_actor); 551 lacp_fill_markerinfo(lp, &lp->lp_marker); 552 lp->lp_state = 553 (active ? LACP_STATE_ACTIVITY : 0) | 554 (fast ? LACP_STATE_TIMEOUT : 0); 555 lp->lp_aggregator = NULL; 556 lacp_sm_rx_set_expired(lp); 557 LACP_UNLOCK(lsc); 558 lacp_linkstate(lgp); 559 560 return (0); 561 } 562 563 void 564 lacp_port_destroy(struct lagg_port *lgp) 565 { 566 struct lacp_port *lp = LACP_PORT(lgp); 567 struct lacp_softc *lsc = lp->lp_lsc; 568 int i; 569 570 LACP_LOCK(lsc); 571 for (i = 0; i < LACP_NTIMER; i++) { 572 LACP_TIMER_DISARM(lp, i); 573 } 574 575 lacp_disable_collecting(lp); 576 lacp_disable_distributing(lp); 577 lacp_unselect(lp); 578 579 /* The address may have already been removed by if_purgemaddrs() */ 580 if (!lgp->lp_detaching) 581 if_delmulti_ifma(lp->lp_ifma); 582 583 LIST_REMOVE(lp, lp_next); 584 LACP_UNLOCK(lsc); 585 free(lp, M_DEVBUF); 586 } 587 588 void 589 lacp_req(struct lagg_softc *sc, caddr_t data) 590 { 591 struct lacp_opreq *req = (struct lacp_opreq *)data; 592 struct lacp_softc *lsc = LACP_SOFTC(sc); 593 struct lacp_aggregator *la = lsc->lsc_active_aggregator; 594 595 LACP_LOCK(lsc); 596 bzero(req, sizeof(struct lacp_opreq)); 597 if (la != NULL) { 598 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 599 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 600 ETHER_ADDR_LEN); 601 req->actor_key = ntohs(la->la_actor.lip_key); 602 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 603 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 604 req->actor_state = la->la_actor.lip_state; 605 606 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 607 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 608 ETHER_ADDR_LEN); 609 req->partner_key = ntohs(la->la_partner.lip_key); 610 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 611 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 612 req->partner_state = la->la_partner.lip_state; 613 } 614 LACP_UNLOCK(lsc); 615 } 616 617 void 618 lacp_portreq(struct lagg_port *lgp, caddr_t data) 619 { 620 struct lacp_opreq *req = (struct lacp_opreq *)data; 621 struct lacp_port *lp = LACP_PORT(lgp); 622 struct lacp_softc *lsc = lp->lp_lsc; 623 624 LACP_LOCK(lsc); 625 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 626 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 627 ETHER_ADDR_LEN); 628 req->actor_key = ntohs(lp->lp_actor.lip_key); 629 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 630 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 631 req->actor_state = lp->lp_actor.lip_state; 632 633 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 634 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 635 ETHER_ADDR_LEN); 636 req->partner_key = ntohs(lp->lp_partner.lip_key); 637 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 638 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 639 req->partner_state = lp->lp_partner.lip_state; 640 LACP_UNLOCK(lsc); 641 } 642 643 static void 644 lacp_disable_collecting(struct lacp_port *lp) 645 { 646 LACP_DPRINTF((lp, "collecting disabled\n")); 647 lp->lp_state &= ~LACP_STATE_COLLECTING; 648 } 649 650 static void 651 lacp_enable_collecting(struct lacp_port *lp) 652 { 653 LACP_DPRINTF((lp, "collecting enabled\n")); 654 lp->lp_state |= LACP_STATE_COLLECTING; 655 } 656 657 static void 658 lacp_disable_distributing(struct lacp_port *lp) 659 { 660 struct lacp_aggregator *la = lp->lp_aggregator; 661 struct lacp_softc *lsc = lp->lp_lsc; 662 struct lagg_softc *sc = lsc->lsc_softc; 663 char buf[LACP_LAGIDSTR_MAX+1]; 664 665 LACP_LOCK_ASSERT(lsc); 666 667 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 668 return; 669 } 670 671 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 672 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 673 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 674 675 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 676 "nports %d -> %d\n", 677 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 678 la->la_nports, la->la_nports - 1)); 679 680 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 681 la->la_nports--; 682 sc->sc_active = la->la_nports; 683 684 if (lsc->lsc_active_aggregator == la) { 685 lacp_suppress_distributing(lsc, la); 686 lacp_select_active_aggregator(lsc); 687 /* regenerate the port map, the active aggregator has changed */ 688 lacp_update_portmap(lsc); 689 } 690 691 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 692 } 693 694 static void 695 lacp_enable_distributing(struct lacp_port *lp) 696 { 697 struct lacp_aggregator *la = lp->lp_aggregator; 698 struct lacp_softc *lsc = lp->lp_lsc; 699 struct lagg_softc *sc = lsc->lsc_softc; 700 char buf[LACP_LAGIDSTR_MAX+1]; 701 702 LACP_LOCK_ASSERT(lsc); 703 704 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 705 return; 706 } 707 708 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 709 "nports %d -> %d\n", 710 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 711 la->la_nports, la->la_nports + 1)); 712 713 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 714 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 715 la->la_nports++; 716 sc->sc_active = la->la_nports; 717 718 lp->lp_state |= LACP_STATE_DISTRIBUTING; 719 720 if (lsc->lsc_active_aggregator == la) { 721 lacp_suppress_distributing(lsc, la); 722 lacp_update_portmap(lsc); 723 } else 724 /* try to become the active aggregator */ 725 lacp_select_active_aggregator(lsc); 726 } 727 728 static void 729 lacp_transit_expire(void *vp) 730 { 731 struct lacp_softc *lsc = vp; 732 733 LACP_LOCK_ASSERT(lsc); 734 735 LACP_TRACE(NULL); 736 737 lsc->lsc_suppress_distributing = FALSE; 738 } 739 740 static void 741 lacp_attach_sysctl(struct lacp_softc *lsc, struct sysctl_oid *p_oid) 742 { 743 struct lagg_softc *sc = lsc->lsc_softc; 744 745 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(p_oid), OID_AUTO, 746 "lacp_strict_mode", 747 CTLFLAG_RW, 748 &lsc->lsc_strict_mode, 749 lsc->lsc_strict_mode, 750 "Enable LACP strict mode"); 751 } 752 753 static void 754 lacp_attach_sysctl_debug(struct lacp_softc *lsc, struct sysctl_oid *p_oid) 755 { 756 struct lagg_softc *sc = lsc->lsc_softc; 757 struct sysctl_oid *oid; 758 759 /* Create a child of the parent lagg interface */ 760 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(p_oid), 761 OID_AUTO, "debug", CTLFLAG_RD, NULL, "DEBUG"); 762 763 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 764 "rx_test", 765 CTLFLAG_RW, 766 &lsc->lsc_debug.lsc_rx_test, 767 lsc->lsc_debug.lsc_rx_test, 768 "Bitmap of if_dunit entries to drop RX frames for"); 769 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 770 "tx_test", 771 CTLFLAG_RW, 772 &lsc->lsc_debug.lsc_tx_test, 773 lsc->lsc_debug.lsc_tx_test, 774 "Bitmap of if_dunit entries to drop TX frames for"); 775 } 776 777 int 778 lacp_attach(struct lagg_softc *sc) 779 { 780 struct lacp_softc *lsc; 781 struct sysctl_oid *oid; 782 783 lsc = malloc(sizeof(struct lacp_softc), 784 M_DEVBUF, M_NOWAIT|M_ZERO); 785 if (lsc == NULL) 786 return (ENOMEM); 787 788 sc->sc_psc = (caddr_t)lsc; 789 lsc->lsc_softc = sc; 790 791 lsc->lsc_hashkey = arc4random(); 792 lsc->lsc_active_aggregator = NULL; 793 lsc->lsc_strict_mode = 1; 794 LACP_LOCK_INIT(lsc); 795 TAILQ_INIT(&lsc->lsc_aggregators); 796 LIST_INIT(&lsc->lsc_ports); 797 798 /* Create a child of the parent lagg interface */ 799 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->sc_oid), 800 OID_AUTO, "lacp", CTLFLAG_RD, NULL, "LACP"); 801 802 /* Attach sysctl nodes */ 803 lacp_attach_sysctl(lsc, oid); 804 lacp_attach_sysctl_debug(lsc, oid); 805 806 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0); 807 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0); 808 809 /* if the lagg is already up then do the same */ 810 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 811 lacp_init(sc); 812 813 return (0); 814 } 815 816 int 817 lacp_detach(struct lagg_softc *sc) 818 { 819 struct lacp_softc *lsc = LACP_SOFTC(sc); 820 821 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 822 ("aggregators still active")); 823 KASSERT(lsc->lsc_active_aggregator == NULL, 824 ("aggregator still attached")); 825 826 sc->sc_psc = NULL; 827 callout_drain(&lsc->lsc_transit_callout); 828 callout_drain(&lsc->lsc_callout); 829 830 LACP_LOCK_DESTROY(lsc); 831 free(lsc, M_DEVBUF); 832 return (0); 833 } 834 835 void 836 lacp_init(struct lagg_softc *sc) 837 { 838 struct lacp_softc *lsc = LACP_SOFTC(sc); 839 840 LACP_LOCK(lsc); 841 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 842 LACP_UNLOCK(lsc); 843 } 844 845 void 846 lacp_stop(struct lagg_softc *sc) 847 { 848 struct lacp_softc *lsc = LACP_SOFTC(sc); 849 850 LACP_LOCK(lsc); 851 callout_stop(&lsc->lsc_transit_callout); 852 callout_stop(&lsc->lsc_callout); 853 LACP_UNLOCK(lsc); 854 } 855 856 struct lagg_port * 857 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) 858 { 859 struct lacp_softc *lsc = LACP_SOFTC(sc); 860 struct lacp_portmap *pm; 861 struct lacp_port *lp; 862 uint32_t hash; 863 864 if (__predict_false(lsc->lsc_suppress_distributing)) { 865 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 866 return (NULL); 867 } 868 869 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 870 if (pm->pm_count == 0) { 871 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 872 return (NULL); 873 } 874 875 if (sc->use_flowid && (m->m_flags & M_FLOWID)) 876 hash = m->m_pkthdr.flowid >> sc->flowid_shift; 877 else 878 hash = lagg_hashmbuf(sc, m, lsc->lsc_hashkey); 879 hash %= pm->pm_count; 880 lp = pm->pm_map[hash]; 881 882 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, 883 ("aggregated port is not distributing")); 884 885 return (lp->lp_lagg); 886 } 887 /* 888 * lacp_suppress_distributing: drop transmit packets for a while 889 * to preserve packet ordering. 890 */ 891 892 static void 893 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 894 { 895 struct lacp_port *lp; 896 897 if (lsc->lsc_active_aggregator != la) { 898 return; 899 } 900 901 LACP_TRACE(NULL); 902 903 lsc->lsc_suppress_distributing = TRUE; 904 905 /* send a marker frame down each port to verify the queues are empty */ 906 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 907 lp->lp_flags |= LACP_PORT_MARK; 908 lacp_xmit_marker(lp); 909 } 910 911 /* set a timeout for the marker frames */ 912 callout_reset(&lsc->lsc_transit_callout, 913 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 914 } 915 916 static int 917 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 918 const struct lacp_peerinfo *b) 919 { 920 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 921 } 922 923 static int 924 lacp_compare_systemid(const struct lacp_systemid *a, 925 const struct lacp_systemid *b) 926 { 927 return (memcmp(a, b, sizeof(*a))); 928 } 929 930 #if 0 /* unused */ 931 static int 932 lacp_compare_portid(const struct lacp_portid *a, 933 const struct lacp_portid *b) 934 { 935 return (memcmp(a, b, sizeof(*a))); 936 } 937 #endif 938 939 static uint64_t 940 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 941 { 942 struct lacp_port *lp; 943 uint64_t speed; 944 945 lp = TAILQ_FIRST(&la->la_ports); 946 if (lp == NULL) { 947 return (0); 948 } 949 950 speed = ifmedia_baudrate(lp->lp_media); 951 speed *= la->la_nports; 952 if (speed == 0) { 953 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 954 lp->lp_media, la->la_nports)); 955 } 956 957 return (speed); 958 } 959 960 /* 961 * lacp_select_active_aggregator: select an aggregator to be used to transmit 962 * packets from lagg(4) interface. 963 */ 964 965 static void 966 lacp_select_active_aggregator(struct lacp_softc *lsc) 967 { 968 struct lacp_aggregator *la; 969 struct lacp_aggregator *best_la = NULL; 970 uint64_t best_speed = 0; 971 char buf[LACP_LAGIDSTR_MAX+1]; 972 973 LACP_TRACE(NULL); 974 975 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 976 uint64_t speed; 977 978 if (la->la_nports == 0) { 979 continue; 980 } 981 982 speed = lacp_aggregator_bandwidth(la); 983 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 984 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 985 speed, la->la_nports)); 986 987 /* This aggregator is chosen if 988 * the partner has a better system priority 989 * or, the total aggregated speed is higher 990 * or, it is already the chosen aggregator 991 */ 992 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 993 LACP_SYS_PRI(best_la->la_partner)) || 994 speed > best_speed || 995 (speed == best_speed && 996 la == lsc->lsc_active_aggregator)) { 997 best_la = la; 998 best_speed = speed; 999 } 1000 } 1001 1002 KASSERT(best_la == NULL || best_la->la_nports > 0, 1003 ("invalid aggregator refcnt")); 1004 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1005 ("invalid aggregator list")); 1006 1007 if (lsc->lsc_active_aggregator != best_la) { 1008 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1009 LACP_DPRINTF((NULL, "old %s\n", 1010 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1011 buf, sizeof(buf)))); 1012 } else { 1013 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1014 } 1015 LACP_DPRINTF((NULL, "new %s\n", 1016 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1017 1018 if (lsc->lsc_active_aggregator != best_la) { 1019 lsc->lsc_active_aggregator = best_la; 1020 lacp_update_portmap(lsc); 1021 if (best_la) { 1022 lacp_suppress_distributing(lsc, best_la); 1023 } 1024 } 1025 } 1026 1027 /* 1028 * Updated the inactive portmap array with the new list of ports and 1029 * make it live. 1030 */ 1031 static void 1032 lacp_update_portmap(struct lacp_softc *lsc) 1033 { 1034 struct lagg_softc *sc = lsc->lsc_softc; 1035 struct lacp_aggregator *la; 1036 struct lacp_portmap *p; 1037 struct lacp_port *lp; 1038 uint64_t speed; 1039 u_int newmap; 1040 int i; 1041 1042 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1043 p = &lsc->lsc_pmap[newmap]; 1044 la = lsc->lsc_active_aggregator; 1045 speed = 0; 1046 bzero(p, sizeof(struct lacp_portmap)); 1047 1048 if (la != NULL && la->la_nports > 0) { 1049 p->pm_count = la->la_nports; 1050 i = 0; 1051 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) 1052 p->pm_map[i++] = lp; 1053 KASSERT(i == p->pm_count, ("Invalid port count")); 1054 speed = lacp_aggregator_bandwidth(la); 1055 } 1056 sc->sc_ifp->if_baudrate = speed; 1057 1058 /* switch the active portmap over */ 1059 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1060 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1061 lsc->lsc_activemap, 1062 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1063 } 1064 1065 static uint16_t 1066 lacp_compose_key(struct lacp_port *lp) 1067 { 1068 struct lagg_port *lgp = lp->lp_lagg; 1069 struct lagg_softc *sc = lgp->lp_softc; 1070 u_int media = lp->lp_media; 1071 uint16_t key; 1072 1073 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1074 1075 /* 1076 * non-aggregatable links should have unique keys. 1077 * 1078 * XXX this isn't really unique as if_index is 16 bit. 1079 */ 1080 1081 /* bit 0..14: (some bits of) if_index of this port */ 1082 key = lp->lp_ifp->if_index; 1083 /* bit 15: 1 */ 1084 key |= 0x8000; 1085 } else { 1086 u_int subtype = IFM_SUBTYPE(media); 1087 1088 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1089 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1090 1091 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1092 switch (subtype) { 1093 case IFM_10_T: 1094 case IFM_10_2: 1095 case IFM_10_5: 1096 case IFM_10_STP: 1097 case IFM_10_FL: 1098 key = IFM_10_T; 1099 break; 1100 case IFM_100_TX: 1101 case IFM_100_FX: 1102 case IFM_100_T4: 1103 case IFM_100_VG: 1104 case IFM_100_T2: 1105 key = IFM_100_TX; 1106 break; 1107 case IFM_1000_SX: 1108 case IFM_1000_LX: 1109 case IFM_1000_CX: 1110 case IFM_1000_T: 1111 key = IFM_1000_SX; 1112 break; 1113 case IFM_10G_LR: 1114 case IFM_10G_SR: 1115 case IFM_10G_CX4: 1116 case IFM_10G_TWINAX: 1117 case IFM_10G_TWINAX_LONG: 1118 case IFM_10G_LRM: 1119 case IFM_10G_T: 1120 key = IFM_10G_LR; 1121 break; 1122 case IFM_40G_CR4: 1123 case IFM_40G_SR4: 1124 case IFM_40G_LR4: 1125 key = IFM_40G_CR4; 1126 break; 1127 default: 1128 key = subtype; 1129 } 1130 /* bit 5..14: (some bits of) if_index of lagg device */ 1131 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1132 /* bit 15: 0 */ 1133 } 1134 return (htons(key)); 1135 } 1136 1137 static void 1138 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1139 { 1140 char buf[LACP_LAGIDSTR_MAX+1]; 1141 1142 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1143 __func__, 1144 lacp_format_lagid(&la->la_actor, &la->la_partner, 1145 buf, sizeof(buf)), 1146 la->la_refcnt, la->la_refcnt + 1)); 1147 1148 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1149 la->la_refcnt++; 1150 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1151 } 1152 1153 static void 1154 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1155 { 1156 char buf[LACP_LAGIDSTR_MAX+1]; 1157 1158 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1159 __func__, 1160 lacp_format_lagid(&la->la_actor, &la->la_partner, 1161 buf, sizeof(buf)), 1162 la->la_refcnt, la->la_refcnt - 1)); 1163 1164 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1165 la->la_refcnt--; 1166 if (la->la_refcnt > 0) { 1167 return; 1168 } 1169 1170 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1171 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1172 1173 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1174 1175 free(la, M_DEVBUF); 1176 } 1177 1178 /* 1179 * lacp_aggregator_get: allocate an aggregator. 1180 */ 1181 1182 static struct lacp_aggregator * 1183 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1184 { 1185 struct lacp_aggregator *la; 1186 1187 la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1188 if (la) { 1189 la->la_refcnt = 1; 1190 la->la_nports = 0; 1191 TAILQ_INIT(&la->la_ports); 1192 la->la_pending = 0; 1193 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1194 } 1195 1196 return (la); 1197 } 1198 1199 /* 1200 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1201 */ 1202 1203 static void 1204 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1205 { 1206 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1207 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1208 1209 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1210 } 1211 1212 static void 1213 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1214 const struct lacp_peerinfo *lpi_port) 1215 { 1216 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1217 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1218 lpi_aggr->lip_key = lpi_port->lip_key; 1219 } 1220 1221 /* 1222 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1223 */ 1224 1225 static int 1226 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1227 const struct lacp_port *lp) 1228 { 1229 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1230 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1231 return (0); 1232 } 1233 1234 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1235 return (0); 1236 } 1237 1238 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1239 return (0); 1240 } 1241 1242 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1243 return (0); 1244 } 1245 1246 return (1); 1247 } 1248 1249 static int 1250 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1251 const struct lacp_peerinfo *b) 1252 { 1253 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1254 sizeof(a->lip_systemid))) { 1255 return (0); 1256 } 1257 1258 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1259 return (0); 1260 } 1261 1262 return (1); 1263 } 1264 1265 static void 1266 lacp_port_enable(struct lacp_port *lp) 1267 { 1268 lp->lp_state |= LACP_STATE_AGGREGATION; 1269 } 1270 1271 static void 1272 lacp_port_disable(struct lacp_port *lp) 1273 { 1274 lacp_set_mux(lp, LACP_MUX_DETACHED); 1275 1276 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1277 lp->lp_selected = LACP_UNSELECTED; 1278 lacp_sm_rx_record_default(lp); 1279 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1280 lp->lp_state &= ~LACP_STATE_EXPIRED; 1281 } 1282 1283 /* 1284 * lacp_select: select an aggregator. create one if necessary. 1285 */ 1286 static void 1287 lacp_select(struct lacp_port *lp) 1288 { 1289 struct lacp_softc *lsc = lp->lp_lsc; 1290 struct lacp_aggregator *la; 1291 char buf[LACP_LAGIDSTR_MAX+1]; 1292 1293 if (lp->lp_aggregator) { 1294 return; 1295 } 1296 1297 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1298 ("timer_wait_while still active")); 1299 1300 LACP_DPRINTF((lp, "port lagid=%s\n", 1301 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1302 buf, sizeof(buf)))); 1303 1304 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1305 if (lacp_aggregator_is_compatible(la, lp)) { 1306 break; 1307 } 1308 } 1309 1310 if (la == NULL) { 1311 la = lacp_aggregator_get(lsc, lp); 1312 if (la == NULL) { 1313 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1314 1315 /* 1316 * will retry on the next tick. 1317 */ 1318 1319 return; 1320 } 1321 lacp_fill_aggregator_id(la, lp); 1322 LACP_DPRINTF((lp, "aggregator created\n")); 1323 } else { 1324 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1325 if (la->la_refcnt == LACP_MAX_PORTS) 1326 return; 1327 lacp_aggregator_addref(lsc, la); 1328 } 1329 1330 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1331 lacp_format_lagid(&la->la_actor, &la->la_partner, 1332 buf, sizeof(buf)))); 1333 1334 lp->lp_aggregator = la; 1335 lp->lp_selected = LACP_SELECTED; 1336 } 1337 1338 /* 1339 * lacp_unselect: finish unselect/detach process. 1340 */ 1341 1342 static void 1343 lacp_unselect(struct lacp_port *lp) 1344 { 1345 struct lacp_softc *lsc = lp->lp_lsc; 1346 struct lacp_aggregator *la = lp->lp_aggregator; 1347 1348 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1349 ("timer_wait_while still active")); 1350 1351 if (la == NULL) { 1352 return; 1353 } 1354 1355 lp->lp_aggregator = NULL; 1356 lacp_aggregator_delref(lsc, la); 1357 } 1358 1359 /* mux machine */ 1360 1361 static void 1362 lacp_sm_mux(struct lacp_port *lp) 1363 { 1364 struct lagg_port *lgp = lp->lp_lagg; 1365 struct lagg_softc *sc = lgp->lp_softc; 1366 enum lacp_mux_state new_state; 1367 boolean_t p_sync = 1368 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1369 boolean_t p_collecting = 1370 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1371 enum lacp_selected selected = lp->lp_selected; 1372 struct lacp_aggregator *la; 1373 1374 if (lacp_debug > 1) 1375 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1376 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1377 lp->lp_mux_state, selected, p_sync, p_collecting); 1378 1379 re_eval: 1380 la = lp->lp_aggregator; 1381 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1382 ("MUX not detached")); 1383 new_state = lp->lp_mux_state; 1384 switch (lp->lp_mux_state) { 1385 case LACP_MUX_DETACHED: 1386 if (selected != LACP_UNSELECTED) { 1387 new_state = LACP_MUX_WAITING; 1388 } 1389 break; 1390 case LACP_MUX_WAITING: 1391 KASSERT(la->la_pending > 0 || 1392 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1393 ("timer_wait_while still active")); 1394 if (selected == LACP_SELECTED && la->la_pending == 0) { 1395 new_state = LACP_MUX_ATTACHED; 1396 } else if (selected == LACP_UNSELECTED) { 1397 new_state = LACP_MUX_DETACHED; 1398 } 1399 break; 1400 case LACP_MUX_ATTACHED: 1401 if (selected == LACP_SELECTED && p_sync) { 1402 new_state = LACP_MUX_COLLECTING; 1403 } else if (selected != LACP_SELECTED) { 1404 new_state = LACP_MUX_DETACHED; 1405 } 1406 break; 1407 case LACP_MUX_COLLECTING: 1408 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1409 new_state = LACP_MUX_DISTRIBUTING; 1410 } else if (selected != LACP_SELECTED || !p_sync) { 1411 new_state = LACP_MUX_ATTACHED; 1412 } 1413 break; 1414 case LACP_MUX_DISTRIBUTING: 1415 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1416 new_state = LACP_MUX_COLLECTING; 1417 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1418 sc->sc_flapping++; 1419 } 1420 break; 1421 default: 1422 panic("%s: unknown state", __func__); 1423 } 1424 1425 if (lp->lp_mux_state == new_state) { 1426 return; 1427 } 1428 1429 lacp_set_mux(lp, new_state); 1430 goto re_eval; 1431 } 1432 1433 static void 1434 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1435 { 1436 struct lacp_aggregator *la = lp->lp_aggregator; 1437 1438 if (lp->lp_mux_state == new_state) { 1439 return; 1440 } 1441 1442 switch (new_state) { 1443 case LACP_MUX_DETACHED: 1444 lp->lp_state &= ~LACP_STATE_SYNC; 1445 lacp_disable_distributing(lp); 1446 lacp_disable_collecting(lp); 1447 lacp_sm_assert_ntt(lp); 1448 /* cancel timer */ 1449 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1450 KASSERT(la->la_pending > 0, 1451 ("timer_wait_while not active")); 1452 la->la_pending--; 1453 } 1454 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1455 lacp_unselect(lp); 1456 break; 1457 case LACP_MUX_WAITING: 1458 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1459 LACP_AGGREGATE_WAIT_TIME); 1460 la->la_pending++; 1461 break; 1462 case LACP_MUX_ATTACHED: 1463 lp->lp_state |= LACP_STATE_SYNC; 1464 lacp_disable_collecting(lp); 1465 lacp_sm_assert_ntt(lp); 1466 break; 1467 case LACP_MUX_COLLECTING: 1468 lacp_enable_collecting(lp); 1469 lacp_disable_distributing(lp); 1470 lacp_sm_assert_ntt(lp); 1471 break; 1472 case LACP_MUX_DISTRIBUTING: 1473 lacp_enable_distributing(lp); 1474 break; 1475 default: 1476 panic("%s: unknown state", __func__); 1477 } 1478 1479 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1480 1481 lp->lp_mux_state = new_state; 1482 } 1483 1484 static void 1485 lacp_sm_mux_timer(struct lacp_port *lp) 1486 { 1487 struct lacp_aggregator *la = lp->lp_aggregator; 1488 char buf[LACP_LAGIDSTR_MAX+1]; 1489 1490 KASSERT(la->la_pending > 0, ("no pending event")); 1491 1492 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1493 lacp_format_lagid(&la->la_actor, &la->la_partner, 1494 buf, sizeof(buf)), 1495 la->la_pending, la->la_pending - 1)); 1496 1497 la->la_pending--; 1498 } 1499 1500 /* periodic transmit machine */ 1501 1502 static void 1503 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1504 { 1505 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1506 LACP_STATE_TIMEOUT)) { 1507 return; 1508 } 1509 1510 LACP_DPRINTF((lp, "partner timeout changed\n")); 1511 1512 /* 1513 * FAST_PERIODIC -> SLOW_PERIODIC 1514 * or 1515 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1516 * 1517 * let lacp_sm_ptx_tx_schedule to update timeout. 1518 */ 1519 1520 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1521 1522 /* 1523 * if timeout has been shortened, assert NTT. 1524 */ 1525 1526 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1527 lacp_sm_assert_ntt(lp); 1528 } 1529 } 1530 1531 static void 1532 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1533 { 1534 int timeout; 1535 1536 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1537 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1538 1539 /* 1540 * NO_PERIODIC 1541 */ 1542 1543 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1544 return; 1545 } 1546 1547 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1548 return; 1549 } 1550 1551 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1552 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1553 1554 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1555 } 1556 1557 static void 1558 lacp_sm_ptx_timer(struct lacp_port *lp) 1559 { 1560 lacp_sm_assert_ntt(lp); 1561 } 1562 1563 static void 1564 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1565 { 1566 int timeout; 1567 1568 /* 1569 * check LACP_DISABLED first 1570 */ 1571 1572 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1573 return; 1574 } 1575 1576 /* 1577 * check loopback condition. 1578 */ 1579 1580 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1581 &lp->lp_actor.lip_systemid)) { 1582 return; 1583 } 1584 1585 /* 1586 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1587 */ 1588 1589 lacp_sm_rx_update_selected(lp, du); 1590 lacp_sm_rx_update_ntt(lp, du); 1591 lacp_sm_rx_record_pdu(lp, du); 1592 1593 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1594 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1595 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1596 1597 lp->lp_state &= ~LACP_STATE_EXPIRED; 1598 1599 /* 1600 * kick transmit machine without waiting the next tick. 1601 */ 1602 1603 lacp_sm_tx(lp); 1604 } 1605 1606 static void 1607 lacp_sm_rx_set_expired(struct lacp_port *lp) 1608 { 1609 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1610 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1611 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1612 lp->lp_state |= LACP_STATE_EXPIRED; 1613 } 1614 1615 static void 1616 lacp_sm_rx_timer(struct lacp_port *lp) 1617 { 1618 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1619 /* CURRENT -> EXPIRED */ 1620 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1621 lacp_sm_rx_set_expired(lp); 1622 } else { 1623 /* EXPIRED -> DEFAULTED */ 1624 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1625 lacp_sm_rx_update_default_selected(lp); 1626 lacp_sm_rx_record_default(lp); 1627 lp->lp_state &= ~LACP_STATE_EXPIRED; 1628 } 1629 } 1630 1631 static void 1632 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1633 { 1634 boolean_t active; 1635 uint8_t oldpstate; 1636 char buf[LACP_STATESTR_MAX+1]; 1637 1638 LACP_TRACE(lp); 1639 1640 oldpstate = lp->lp_partner.lip_state; 1641 1642 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1643 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1644 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1645 1646 lp->lp_partner = du->ldu_actor; 1647 if (active && 1648 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1649 LACP_STATE_AGGREGATION) && 1650 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1651 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1652 /* XXX nothing? */ 1653 } else { 1654 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1655 } 1656 1657 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1658 1659 if (oldpstate != lp->lp_partner.lip_state) { 1660 LACP_DPRINTF((lp, "old pstate %s\n", 1661 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1662 LACP_DPRINTF((lp, "new pstate %s\n", 1663 lacp_format_state(lp->lp_partner.lip_state, buf, 1664 sizeof(buf)))); 1665 } 1666 1667 /* XXX Hack, still need to implement 5.4.9 para 2,3,4 */ 1668 if (lp->lp_lsc->lsc_strict_mode) 1669 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1670 1671 lacp_sm_ptx_update_timeout(lp, oldpstate); 1672 } 1673 1674 static void 1675 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1676 { 1677 1678 LACP_TRACE(lp); 1679 1680 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1681 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1682 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1683 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1684 lacp_sm_assert_ntt(lp); 1685 } 1686 } 1687 1688 static void 1689 lacp_sm_rx_record_default(struct lacp_port *lp) 1690 { 1691 uint8_t oldpstate; 1692 1693 LACP_TRACE(lp); 1694 1695 oldpstate = lp->lp_partner.lip_state; 1696 if (lp->lp_lsc->lsc_strict_mode) 1697 lp->lp_partner = lacp_partner_admin_strict; 1698 else 1699 lp->lp_partner = lacp_partner_admin_optimistic;; 1700 lp->lp_state |= LACP_STATE_DEFAULTED; 1701 lacp_sm_ptx_update_timeout(lp, oldpstate); 1702 } 1703 1704 static void 1705 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1706 const struct lacp_peerinfo *info) 1707 { 1708 1709 LACP_TRACE(lp); 1710 1711 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1712 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1713 LACP_STATE_AGGREGATION)) { 1714 lp->lp_selected = LACP_UNSELECTED; 1715 /* mux machine will clean up lp->lp_aggregator */ 1716 } 1717 } 1718 1719 static void 1720 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1721 { 1722 1723 LACP_TRACE(lp); 1724 1725 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1726 } 1727 1728 static void 1729 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1730 { 1731 1732 LACP_TRACE(lp); 1733 1734 if (lp->lp_lsc->lsc_strict_mode) 1735 lacp_sm_rx_update_selected_from_peerinfo(lp, 1736 &lacp_partner_admin_strict); 1737 else 1738 lacp_sm_rx_update_selected_from_peerinfo(lp, 1739 &lacp_partner_admin_optimistic); 1740 } 1741 1742 /* transmit machine */ 1743 1744 static void 1745 lacp_sm_tx(struct lacp_port *lp) 1746 { 1747 int error = 0; 1748 1749 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1750 #if 1 1751 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1752 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1753 #endif 1754 ) { 1755 lp->lp_flags &= ~LACP_PORT_NTT; 1756 } 1757 1758 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1759 return; 1760 } 1761 1762 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1763 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1764 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1765 LACP_DPRINTF((lp, "rate limited pdu\n")); 1766 return; 1767 } 1768 1769 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1770 error = lacp_xmit_lacpdu(lp); 1771 } else { 1772 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1773 } 1774 1775 if (error == 0) { 1776 lp->lp_flags &= ~LACP_PORT_NTT; 1777 } else { 1778 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1779 error)); 1780 } 1781 } 1782 1783 static void 1784 lacp_sm_assert_ntt(struct lacp_port *lp) 1785 { 1786 1787 lp->lp_flags |= LACP_PORT_NTT; 1788 } 1789 1790 static void 1791 lacp_run_timers(struct lacp_port *lp) 1792 { 1793 int i; 1794 1795 for (i = 0; i < LACP_NTIMER; i++) { 1796 KASSERT(lp->lp_timer[i] >= 0, 1797 ("invalid timer value %d", lp->lp_timer[i])); 1798 if (lp->lp_timer[i] == 0) { 1799 continue; 1800 } else if (--lp->lp_timer[i] <= 0) { 1801 if (lacp_timer_funcs[i]) { 1802 (*lacp_timer_funcs[i])(lp); 1803 } 1804 } 1805 } 1806 } 1807 1808 int 1809 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1810 { 1811 struct lacp_softc *lsc = lp->lp_lsc; 1812 struct lagg_port *lgp = lp->lp_lagg; 1813 struct lacp_port *lp2; 1814 struct markerdu *mdu; 1815 int error = 0; 1816 int pending = 0; 1817 1818 if (m->m_pkthdr.len != sizeof(*mdu)) { 1819 goto bad; 1820 } 1821 1822 if ((m->m_flags & M_MCAST) == 0) { 1823 goto bad; 1824 } 1825 1826 if (m->m_len < sizeof(*mdu)) { 1827 m = m_pullup(m, sizeof(*mdu)); 1828 if (m == NULL) { 1829 return (ENOMEM); 1830 } 1831 } 1832 1833 mdu = mtod(m, struct markerdu *); 1834 1835 if (memcmp(&mdu->mdu_eh.ether_dhost, 1836 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1837 goto bad; 1838 } 1839 1840 if (mdu->mdu_sph.sph_version != 1) { 1841 goto bad; 1842 } 1843 1844 switch (mdu->mdu_tlv.tlv_type) { 1845 case MARKER_TYPE_INFO: 1846 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1847 marker_info_tlv_template, TRUE)) { 1848 goto bad; 1849 } 1850 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 1851 memcpy(&mdu->mdu_eh.ether_dhost, 1852 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 1853 memcpy(&mdu->mdu_eh.ether_shost, 1854 lgp->lp_lladdr, ETHER_ADDR_LEN); 1855 error = lagg_enqueue(lp->lp_ifp, m); 1856 break; 1857 1858 case MARKER_TYPE_RESPONSE: 1859 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1860 marker_response_tlv_template, TRUE)) { 1861 goto bad; 1862 } 1863 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n", 1864 ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, 1865 ":", ntohl(mdu->mdu_info.mi_rq_xid))); 1866 1867 /* Verify that it is the last marker we sent out */ 1868 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 1869 sizeof(struct lacp_markerinfo))) 1870 goto bad; 1871 1872 LACP_LOCK(lsc); 1873 lp->lp_flags &= ~LACP_PORT_MARK; 1874 1875 if (lsc->lsc_suppress_distributing) { 1876 /* Check if any ports are waiting for a response */ 1877 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 1878 if (lp2->lp_flags & LACP_PORT_MARK) { 1879 pending = 1; 1880 break; 1881 } 1882 } 1883 1884 if (pending == 0) { 1885 /* All interface queues are clear */ 1886 LACP_DPRINTF((NULL, "queue flush complete\n")); 1887 lsc->lsc_suppress_distributing = FALSE; 1888 } 1889 } 1890 LACP_UNLOCK(lsc); 1891 m_freem(m); 1892 break; 1893 1894 default: 1895 goto bad; 1896 } 1897 1898 return (error); 1899 1900 bad: 1901 LACP_DPRINTF((lp, "bad marker frame\n")); 1902 m_freem(m); 1903 return (EINVAL); 1904 } 1905 1906 static int 1907 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 1908 const struct tlv_template *tmpl, boolean_t check_type) 1909 { 1910 while (/* CONSTCOND */ 1) { 1911 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 1912 return (EINVAL); 1913 } 1914 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 1915 tlv->tlv_length != tmpl->tmpl_length) { 1916 return (EINVAL); 1917 } 1918 if (tmpl->tmpl_type == 0) { 1919 break; 1920 } 1921 tlv = (const struct tlvhdr *) 1922 ((const char *)tlv + tlv->tlv_length); 1923 tmpl++; 1924 } 1925 1926 return (0); 1927 } 1928 1929 /* Debugging */ 1930 const char * 1931 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 1932 { 1933 snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 1934 (int)mac[0], 1935 (int)mac[1], 1936 (int)mac[2], 1937 (int)mac[3], 1938 (int)mac[4], 1939 (int)mac[5]); 1940 1941 return (buf); 1942 } 1943 1944 const char * 1945 lacp_format_systemid(const struct lacp_systemid *sysid, 1946 char *buf, size_t buflen) 1947 { 1948 char macbuf[LACP_MACSTR_MAX+1]; 1949 1950 snprintf(buf, buflen, "%04X,%s", 1951 ntohs(sysid->lsi_prio), 1952 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 1953 1954 return (buf); 1955 } 1956 1957 const char * 1958 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 1959 { 1960 snprintf(buf, buflen, "%04X,%04X", 1961 ntohs(portid->lpi_prio), 1962 ntohs(portid->lpi_portno)); 1963 1964 return (buf); 1965 } 1966 1967 const char * 1968 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 1969 { 1970 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 1971 char portid[LACP_PORTIDSTR_MAX+1]; 1972 1973 snprintf(buf, buflen, "(%s,%04X,%s)", 1974 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 1975 ntohs(peer->lip_key), 1976 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 1977 1978 return (buf); 1979 } 1980 1981 const char * 1982 lacp_format_lagid(const struct lacp_peerinfo *a, 1983 const struct lacp_peerinfo *b, char *buf, size_t buflen) 1984 { 1985 char astr[LACP_PARTNERSTR_MAX+1]; 1986 char bstr[LACP_PARTNERSTR_MAX+1]; 1987 1988 #if 0 1989 /* 1990 * there's a convention to display small numbered peer 1991 * in the left. 1992 */ 1993 1994 if (lacp_compare_peerinfo(a, b) > 0) { 1995 const struct lacp_peerinfo *t; 1996 1997 t = a; 1998 a = b; 1999 b = t; 2000 } 2001 #endif 2002 2003 snprintf(buf, buflen, "[%s,%s]", 2004 lacp_format_partner(a, astr, sizeof(astr)), 2005 lacp_format_partner(b, bstr, sizeof(bstr))); 2006 2007 return (buf); 2008 } 2009 2010 const char * 2011 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2012 char *buf, size_t buflen) 2013 { 2014 if (la == NULL) { 2015 return ("(none)"); 2016 } 2017 2018 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2019 } 2020 2021 const char * 2022 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2023 { 2024 snprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2025 return (buf); 2026 } 2027 2028 static void 2029 lacp_dump_lacpdu(const struct lacpdu *du) 2030 { 2031 char buf[LACP_PARTNERSTR_MAX+1]; 2032 char buf2[LACP_STATESTR_MAX+1]; 2033 2034 printf("actor=%s\n", 2035 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2036 printf("actor.state=%s\n", 2037 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2038 printf("partner=%s\n", 2039 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2040 printf("partner.state=%s\n", 2041 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2042 2043 printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2044 } 2045 2046 static void 2047 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2048 { 2049 va_list va; 2050 2051 if (lp) { 2052 printf("%s: ", lp->lp_ifp->if_xname); 2053 } 2054 2055 va_start(va, fmt); 2056 vprintf(fmt, va); 2057 va_end(va); 2058 } 2059