1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org> 5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/cdefs.h> 21 __FBSDID("$FreeBSD$"); 22 23 #include "opt_inet.h" 24 #include "opt_inet6.h" 25 26 #include <sys/param.h> 27 #include <sys/kernel.h> 28 #include <sys/malloc.h> 29 #include <sys/mbuf.h> 30 #include <sys/queue.h> 31 #include <sys/socket.h> 32 #include <sys/sockio.h> 33 #include <sys/sysctl.h> 34 #include <sys/module.h> 35 #include <sys/priv.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/hash.h> 39 #include <sys/lock.h> 40 #include <sys/rwlock.h> 41 #include <sys/taskqueue.h> 42 43 #include <net/ethernet.h> 44 #include <net/if.h> 45 #include <net/if_clone.h> 46 #include <net/if_arp.h> 47 #include <net/if_dl.h> 48 #include <net/if_llc.h> 49 #include <net/if_media.h> 50 #include <net/if_types.h> 51 #include <net/if_var.h> 52 #include <net/bpf.h> 53 54 #ifdef INET 55 #include <netinet/in.h> 56 #include <netinet/in_systm.h> 57 #include <netinet/if_ether.h> 58 #include <netinet/ip.h> 59 #endif 60 61 #ifdef INET6 62 #include <netinet/ip6.h> 63 #endif 64 65 #include <net/if_vlan_var.h> 66 #include <net/if_lagg.h> 67 #include <net/ieee8023ad_lacp.h> 68 69 /* Special flags we should propagate to the lagg ports. */ 70 static struct { 71 int flag; 72 int (*func)(struct ifnet *, int); 73 } lagg_pflags[] = { 74 {IFF_PROMISC, ifpromisc}, 75 {IFF_ALLMULTI, if_allmulti}, 76 {0, NULL} 77 }; 78 79 SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */ 80 static struct mtx lagg_list_mtx; 81 eventhandler_tag lagg_detach_cookie = NULL; 82 83 static int lagg_clone_create(struct if_clone *, int, caddr_t); 84 static void lagg_clone_destroy(struct ifnet *); 85 static void lagg_lladdr(struct lagg_softc *, uint8_t *); 86 static void lagg_capabilities(struct lagg_softc *); 87 static void lagg_port_lladdr(struct lagg_port *, uint8_t *); 88 static void lagg_port_setlladdr(void *, int); 89 static int lagg_port_create(struct lagg_softc *, struct ifnet *); 90 static int lagg_port_destroy(struct lagg_port *, int); 91 static struct mbuf *lagg_input(struct ifnet *, struct mbuf *); 92 static void lagg_linkstate(struct lagg_softc *); 93 static void lagg_port_state(struct ifnet *, int); 94 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t); 95 static int lagg_port_output(struct ifnet *, struct mbuf *, 96 struct sockaddr *, struct rtentry *); 97 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *); 98 static int lagg_port_checkstacking(struct lagg_softc *); 99 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *); 100 static void lagg_init(void *); 101 static void lagg_stop(struct lagg_softc *); 102 static int lagg_ioctl(struct ifnet *, u_long, caddr_t); 103 static int lagg_ether_setmulti(struct lagg_softc *); 104 static int lagg_ether_cmdmulti(struct lagg_port *, int); 105 static int lagg_setflag(struct lagg_port *, int, int, 106 int (*func)(struct ifnet *, int)); 107 static int lagg_setflags(struct lagg_port *, int status); 108 static void lagg_start(struct ifnet *); 109 static int lagg_media_change(struct ifnet *); 110 static void lagg_media_status(struct ifnet *, struct ifmediareq *); 111 static struct lagg_port *lagg_link_active(struct lagg_softc *, 112 struct lagg_port *); 113 static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *); 114 115 IFC_SIMPLE_DECLARE(lagg, 0); 116 117 /* Simple round robin */ 118 static int lagg_rr_attach(struct lagg_softc *); 119 static int lagg_rr_detach(struct lagg_softc *); 120 static int lagg_rr_start(struct lagg_softc *, struct mbuf *); 121 static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *, 122 struct mbuf *); 123 124 /* Active failover */ 125 static int lagg_fail_attach(struct lagg_softc *); 126 static int lagg_fail_detach(struct lagg_softc *); 127 static int lagg_fail_start(struct lagg_softc *, struct mbuf *); 128 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *, 129 struct mbuf *); 130 131 /* Loadbalancing */ 132 static int lagg_lb_attach(struct lagg_softc *); 133 static int lagg_lb_detach(struct lagg_softc *); 134 static int lagg_lb_port_create(struct lagg_port *); 135 static void lagg_lb_port_destroy(struct lagg_port *); 136 static int lagg_lb_start(struct lagg_softc *, struct mbuf *); 137 static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *, 138 struct mbuf *); 139 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *); 140 141 /* 802.3ad LACP */ 142 static int lagg_lacp_attach(struct lagg_softc *); 143 static int lagg_lacp_detach(struct lagg_softc *); 144 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *); 145 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *, 146 struct mbuf *); 147 static void lagg_lacp_lladdr(struct lagg_softc *); 148 149 /* lagg protocol table */ 150 static const struct { 151 int ti_proto; 152 int (*ti_attach)(struct lagg_softc *); 153 } lagg_protos[] = { 154 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach }, 155 { LAGG_PROTO_FAILOVER, lagg_fail_attach }, 156 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach }, 157 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach }, 158 { LAGG_PROTO_LACP, lagg_lacp_attach }, 159 { LAGG_PROTO_NONE, NULL } 160 }; 161 162 static int 163 lagg_modevent(module_t mod, int type, void *data) 164 { 165 166 switch (type) { 167 case MOD_LOAD: 168 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF); 169 SLIST_INIT(&lagg_list); 170 if_clone_attach(&lagg_cloner); 171 lagg_input_p = lagg_input; 172 lagg_linkstate_p = lagg_port_state; 173 lagg_detach_cookie = EVENTHANDLER_REGISTER( 174 ifnet_departure_event, lagg_port_ifdetach, NULL, 175 EVENTHANDLER_PRI_ANY); 176 break; 177 case MOD_UNLOAD: 178 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 179 lagg_detach_cookie); 180 if_clone_detach(&lagg_cloner); 181 lagg_input_p = NULL; 182 lagg_linkstate_p = NULL; 183 mtx_destroy(&lagg_list_mtx); 184 break; 185 default: 186 return (EOPNOTSUPP); 187 } 188 return (0); 189 } 190 191 static moduledata_t lagg_mod = { 192 "if_lagg", 193 lagg_modevent, 194 0 195 }; 196 197 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 198 199 static int 200 lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) 201 { 202 struct lagg_softc *sc; 203 struct ifnet *ifp; 204 int i, error = 0; 205 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */ 206 207 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 208 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 209 if (ifp == NULL) { 210 free(sc, M_DEVBUF); 211 return (ENOSPC); 212 } 213 214 sc->sc_proto = LAGG_PROTO_NONE; 215 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) { 216 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) { 217 sc->sc_proto = lagg_protos[i].ti_proto; 218 if ((error = lagg_protos[i].ti_attach(sc)) != 0) { 219 if_free_type(ifp, IFT_ETHER); 220 free(sc, M_DEVBUF); 221 return (error); 222 } 223 break; 224 } 225 } 226 LAGG_LOCK_INIT(sc); 227 SLIST_INIT(&sc->sc_ports); 228 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc); 229 230 /* Initialise pseudo media types */ 231 ifmedia_init(&sc->sc_media, 0, lagg_media_change, 232 lagg_media_status); 233 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 234 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 235 236 if_initname(ifp, ifc->ifc_name, unit); 237 ifp->if_type = IFT_ETHER; 238 ifp->if_softc = sc; 239 ifp->if_start = lagg_start; 240 ifp->if_init = lagg_init; 241 ifp->if_ioctl = lagg_ioctl; 242 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; 243 244 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 245 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 246 IFQ_SET_READY(&ifp->if_snd); 247 248 /* 249 * Attach as an ordinary ethernet device, childs will be attached 250 * as special device IFT_IEEE8023ADLAG. 251 */ 252 ether_ifattach(ifp, eaddr); 253 254 /* Insert into the global list of laggs */ 255 mtx_lock(&lagg_list_mtx); 256 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries); 257 mtx_unlock(&lagg_list_mtx); 258 259 return (0); 260 } 261 262 static void 263 lagg_clone_destroy(struct ifnet *ifp) 264 { 265 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 266 struct lagg_port *lp; 267 268 LAGG_WLOCK(sc); 269 270 lagg_stop(sc); 271 ifp->if_flags &= ~IFF_UP; 272 273 /* Shutdown and remove lagg ports */ 274 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL) 275 lagg_port_destroy(lp, 1); 276 /* Unhook the aggregation protocol */ 277 (*sc->sc_detach)(sc); 278 279 LAGG_WUNLOCK(sc); 280 281 ifmedia_removeall(&sc->sc_media); 282 ether_ifdetach(ifp); 283 if_free_type(ifp, IFT_ETHER); 284 285 mtx_lock(&lagg_list_mtx); 286 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries); 287 mtx_unlock(&lagg_list_mtx); 288 289 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task); 290 LAGG_LOCK_DESTROY(sc); 291 free(sc, M_DEVBUF); 292 } 293 294 static void 295 lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr) 296 { 297 struct ifnet *ifp = sc->sc_ifp; 298 299 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) 300 return; 301 302 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN); 303 /* Let the protocol know the MAC has changed */ 304 if (sc->sc_lladdr != NULL) 305 (*sc->sc_lladdr)(sc); 306 } 307 308 static void 309 lagg_capabilities(struct lagg_softc *sc) 310 { 311 struct lagg_port *lp; 312 int cap = ~0, ena = ~0; 313 u_long hwa = ~0UL; 314 315 LAGG_WLOCK_ASSERT(sc); 316 317 /* Get capabilities from the lagg ports */ 318 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 319 cap &= lp->lp_ifp->if_capabilities; 320 ena &= lp->lp_ifp->if_capenable; 321 hwa &= lp->lp_ifp->if_hwassist; 322 } 323 cap = (cap == ~0 ? 0 : cap); 324 ena = (ena == ~0 ? 0 : ena); 325 hwa = (hwa == ~0 ? 0 : hwa); 326 327 if (sc->sc_ifp->if_capabilities != cap || 328 sc->sc_ifp->if_capenable != ena || 329 sc->sc_ifp->if_hwassist != hwa) { 330 sc->sc_ifp->if_capabilities = cap; 331 sc->sc_ifp->if_capenable = ena; 332 sc->sc_ifp->if_hwassist = hwa; 333 getmicrotime(&sc->sc_ifp->if_lastchange); 334 335 if (sc->sc_ifflags & IFF_DEBUG) 336 if_printf(sc->sc_ifp, 337 "capabilities 0x%08x enabled 0x%08x\n", cap, ena); 338 } 339 } 340 341 static void 342 lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr) 343 { 344 struct lagg_softc *sc = lp->lp_softc; 345 struct ifnet *ifp = lp->lp_ifp; 346 struct lagg_llq *llq; 347 int pending = 0; 348 349 LAGG_WLOCK_ASSERT(sc); 350 351 if (lp->lp_detaching || 352 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) 353 return; 354 355 /* Check to make sure its not already queued to be changed */ 356 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) { 357 if (llq->llq_ifp == ifp) { 358 pending = 1; 359 break; 360 } 361 } 362 363 if (!pending) { 364 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT); 365 if (llq == NULL) /* XXX what to do */ 366 return; 367 } 368 369 /* Update the lladdr even if pending, it may have changed */ 370 llq->llq_ifp = ifp; 371 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN); 372 373 if (!pending) 374 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries); 375 376 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task); 377 } 378 379 /* 380 * Set the interface MAC address from a taskqueue to avoid a LOR. 381 */ 382 static void 383 lagg_port_setlladdr(void *arg, int pending) 384 { 385 struct lagg_softc *sc = (struct lagg_softc *)arg; 386 struct lagg_llq *llq, *head; 387 struct ifnet *ifp; 388 int error; 389 390 /* Grab a local reference of the queue and remove it from the softc */ 391 LAGG_WLOCK(sc); 392 head = SLIST_FIRST(&sc->sc_llq_head); 393 SLIST_FIRST(&sc->sc_llq_head) = NULL; 394 LAGG_WUNLOCK(sc); 395 396 /* 397 * Traverse the queue and set the lladdr on each ifp. It is safe to do 398 * unlocked as we have the only reference to it. 399 */ 400 for (llq = head; llq != NULL; llq = head) { 401 ifp = llq->llq_ifp; 402 403 /* Set the link layer address */ 404 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN); 405 if (error) 406 printf("%s: setlladdr failed on %s\n", __func__, 407 ifp->if_xname); 408 409 head = SLIST_NEXT(llq, llq_entries); 410 free(llq, M_DEVBUF); 411 } 412 } 413 414 static int 415 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp) 416 { 417 struct lagg_softc *sc_ptr; 418 struct lagg_port *lp; 419 int error = 0; 420 421 LAGG_WLOCK_ASSERT(sc); 422 423 /* Limit the maximal number of lagg ports */ 424 if (sc->sc_count >= LAGG_MAX_PORTS) 425 return (ENOSPC); 426 427 /* New lagg port has to be in an idle state */ 428 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 429 return (EBUSY); 430 431 /* Check if port has already been associated to a lagg */ 432 if (ifp->if_lagg != NULL) 433 return (EBUSY); 434 435 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */ 436 if (ifp->if_type != IFT_ETHER) 437 return (EPROTONOSUPPORT); 438 439 /* Allow the first Ethernet member to define the MTU */ 440 if (SLIST_EMPTY(&sc->sc_ports)) 441 sc->sc_ifp->if_mtu = ifp->if_mtu; 442 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) { 443 if_printf(sc->sc_ifp, "invalid MTU for %s\n", 444 ifp->if_xname); 445 return (EINVAL); 446 } 447 448 if ((lp = malloc(sizeof(struct lagg_port), 449 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 450 return (ENOMEM); 451 452 /* Check if port is a stacked lagg */ 453 mtx_lock(&lagg_list_mtx); 454 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) { 455 if (ifp == sc_ptr->sc_ifp) { 456 mtx_unlock(&lagg_list_mtx); 457 free(lp, M_DEVBUF); 458 return (EINVAL); 459 /* XXX disable stacking for the moment, its untested 460 lp->lp_flags |= LAGG_PORT_STACK; 461 if (lagg_port_checkstacking(sc_ptr) >= 462 LAGG_MAX_STACKING) { 463 mtx_unlock(&lagg_list_mtx); 464 free(lp, M_DEVBUF); 465 return (E2BIG); 466 } 467 */ 468 } 469 } 470 mtx_unlock(&lagg_list_mtx); 471 472 /* Change the interface type */ 473 lp->lp_iftype = ifp->if_type; 474 ifp->if_type = IFT_IEEE8023ADLAG; 475 ifp->if_lagg = lp; 476 lp->lp_ioctl = ifp->if_ioctl; 477 ifp->if_ioctl = lagg_port_ioctl; 478 lp->lp_output = ifp->if_output; 479 ifp->if_output = lagg_port_output; 480 481 lp->lp_ifp = ifp; 482 lp->lp_softc = sc; 483 484 /* Save port link layer address */ 485 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN); 486 487 if (SLIST_EMPTY(&sc->sc_ports)) { 488 sc->sc_primary = lp; 489 lagg_lladdr(sc, IF_LLADDR(ifp)); 490 } else { 491 /* Update link layer address for this port */ 492 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp)); 493 } 494 495 /* Insert into the list of ports */ 496 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries); 497 sc->sc_count++; 498 499 /* Update lagg capabilities */ 500 lagg_capabilities(sc); 501 lagg_linkstate(sc); 502 503 /* Add multicast addresses and interface flags to this port */ 504 lagg_ether_cmdmulti(lp, 1); 505 lagg_setflags(lp, 1); 506 507 if (sc->sc_port_create != NULL) 508 error = (*sc->sc_port_create)(lp); 509 if (error) { 510 /* remove the port again, without calling sc_port_destroy */ 511 lagg_port_destroy(lp, 0); 512 return (error); 513 } 514 515 return (error); 516 } 517 518 static int 519 lagg_port_checkstacking(struct lagg_softc *sc) 520 { 521 struct lagg_softc *sc_ptr; 522 struct lagg_port *lp; 523 int m = 0; 524 525 LAGG_WLOCK_ASSERT(sc); 526 527 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 528 if (lp->lp_flags & LAGG_PORT_STACK) { 529 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc; 530 m = MAX(m, lagg_port_checkstacking(sc_ptr)); 531 } 532 } 533 534 return (m + 1); 535 } 536 537 static int 538 lagg_port_destroy(struct lagg_port *lp, int runpd) 539 { 540 struct lagg_softc *sc = lp->lp_softc; 541 struct lagg_port *lp_ptr; 542 struct lagg_llq *llq; 543 struct ifnet *ifp = lp->lp_ifp; 544 545 LAGG_WLOCK_ASSERT(sc); 546 547 if (runpd && sc->sc_port_destroy != NULL) 548 (*sc->sc_port_destroy)(lp); 549 550 /* 551 * Remove multicast addresses and interface flags from this port and 552 * reset the MAC address, skip if the interface is being detached. 553 */ 554 if (!lp->lp_detaching) { 555 lagg_ether_cmdmulti(lp, 0); 556 lagg_setflags(lp, 0); 557 lagg_port_lladdr(lp, lp->lp_lladdr); 558 } 559 560 /* Restore interface */ 561 ifp->if_type = lp->lp_iftype; 562 ifp->if_ioctl = lp->lp_ioctl; 563 ifp->if_output = lp->lp_output; 564 ifp->if_lagg = NULL; 565 566 /* Finally, remove the port from the lagg */ 567 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries); 568 sc->sc_count--; 569 570 /* Update the primary interface */ 571 if (lp == sc->sc_primary) { 572 uint8_t lladdr[ETHER_ADDR_LEN]; 573 574 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) { 575 bzero(&lladdr, ETHER_ADDR_LEN); 576 } else { 577 bcopy(lp_ptr->lp_lladdr, 578 lladdr, ETHER_ADDR_LEN); 579 } 580 lagg_lladdr(sc, lladdr); 581 sc->sc_primary = lp_ptr; 582 583 /* Update link layer address for each port */ 584 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries) 585 lagg_port_lladdr(lp_ptr, lladdr); 586 } 587 588 /* Remove any pending lladdr changes from the queue */ 589 if (lp->lp_detaching) { 590 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) { 591 if (llq->llq_ifp == ifp) { 592 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq, 593 llq_entries); 594 free(llq, M_DEVBUF); 595 break; /* Only appears once */ 596 } 597 } 598 } 599 600 if (lp->lp_ifflags) 601 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__); 602 603 free(lp, M_DEVBUF); 604 605 /* Update lagg capabilities */ 606 lagg_capabilities(sc); 607 lagg_linkstate(sc); 608 609 return (0); 610 } 611 612 static int 613 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 614 { 615 struct lagg_reqport *rp = (struct lagg_reqport *)data; 616 struct lagg_softc *sc; 617 struct lagg_port *lp = NULL; 618 int error = 0; 619 620 /* Should be checked by the caller */ 621 if (ifp->if_type != IFT_IEEE8023ADLAG || 622 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL) 623 goto fallback; 624 625 switch (cmd) { 626 case SIOCGLAGGPORT: 627 if (rp->rp_portname[0] == '\0' || 628 ifunit(rp->rp_portname) != ifp) { 629 error = EINVAL; 630 break; 631 } 632 633 LAGG_RLOCK(sc); 634 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) { 635 error = ENOENT; 636 LAGG_RUNLOCK(sc); 637 break; 638 } 639 640 lagg_port2req(lp, rp); 641 LAGG_RUNLOCK(sc); 642 break; 643 644 case SIOCSIFCAP: 645 if (lp->lp_ioctl == NULL) { 646 error = EINVAL; 647 break; 648 } 649 error = (*lp->lp_ioctl)(ifp, cmd, data); 650 if (error) 651 break; 652 653 /* Update lagg interface capabilities */ 654 LAGG_WLOCK(sc); 655 lagg_capabilities(sc); 656 LAGG_WUNLOCK(sc); 657 break; 658 659 case SIOCSIFMTU: 660 /* Do not allow the MTU to be changed once joined */ 661 error = EINVAL; 662 break; 663 664 default: 665 goto fallback; 666 } 667 668 return (error); 669 670 fallback: 671 if (lp->lp_ioctl != NULL) 672 return ((*lp->lp_ioctl)(ifp, cmd, data)); 673 674 return (EINVAL); 675 } 676 677 static int 678 lagg_port_output(struct ifnet *ifp, struct mbuf *m, 679 struct sockaddr *dst, struct rtentry *rt0) 680 { 681 struct lagg_port *lp = ifp->if_lagg; 682 struct ether_header *eh; 683 short type = 0; 684 685 switch (dst->sa_family) { 686 case pseudo_AF_HDRCMPLT: 687 case AF_UNSPEC: 688 eh = (struct ether_header *)dst->sa_data; 689 type = eh->ether_type; 690 break; 691 } 692 693 /* 694 * Only allow ethernet types required to initiate or maintain the link, 695 * aggregated frames take a different path. 696 */ 697 switch (ntohs(type)) { 698 case ETHERTYPE_PAE: /* EAPOL PAE/802.1x */ 699 return ((*lp->lp_output)(ifp, m, dst, rt0)); 700 } 701 702 /* drop any other frames */ 703 m_freem(m); 704 return (EBUSY); 705 } 706 707 static void 708 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp) 709 { 710 struct lagg_port *lp; 711 struct lagg_softc *sc; 712 713 if ((lp = ifp->if_lagg) == NULL) 714 return; 715 716 sc = lp->lp_softc; 717 718 LAGG_WLOCK(sc); 719 lp->lp_detaching = 1; 720 lagg_port_destroy(lp, 1); 721 LAGG_WUNLOCK(sc); 722 } 723 724 static void 725 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp) 726 { 727 struct lagg_softc *sc = lp->lp_softc; 728 729 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname)); 730 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname)); 731 rp->rp_prio = lp->lp_prio; 732 rp->rp_flags = lp->lp_flags; 733 if (sc->sc_portreq != NULL) 734 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc); 735 736 /* Add protocol specific flags */ 737 switch (sc->sc_proto) { 738 case LAGG_PROTO_FAILOVER: 739 if (lp == sc->sc_primary) 740 rp->rp_flags |= LAGG_PORT_MASTER; 741 if (lp == lagg_link_active(sc, sc->sc_primary)) 742 rp->rp_flags |= LAGG_PORT_ACTIVE; 743 break; 744 745 case LAGG_PROTO_ROUNDROBIN: 746 case LAGG_PROTO_LOADBALANCE: 747 case LAGG_PROTO_ETHERCHANNEL: 748 if (LAGG_PORTACTIVE(lp)) 749 rp->rp_flags |= LAGG_PORT_ACTIVE; 750 break; 751 752 case LAGG_PROTO_LACP: 753 /* LACP has a different definition of active */ 754 if (lacp_isactive(lp)) 755 rp->rp_flags |= LAGG_PORT_ACTIVE; 756 if (lacp_iscollecting(lp)) 757 rp->rp_flags |= LAGG_PORT_COLLECTING; 758 if (lacp_isdistributing(lp)) 759 rp->rp_flags |= LAGG_PORT_DISTRIBUTING; 760 break; 761 } 762 763 } 764 765 static void 766 lagg_init(void *xsc) 767 { 768 struct lagg_softc *sc = (struct lagg_softc *)xsc; 769 struct lagg_port *lp; 770 struct ifnet *ifp = sc->sc_ifp; 771 772 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 773 return; 774 775 LAGG_WLOCK(sc); 776 777 ifp->if_drv_flags |= IFF_DRV_RUNNING; 778 /* Update the port lladdrs */ 779 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 780 lagg_port_lladdr(lp, IF_LLADDR(ifp)); 781 782 if (sc->sc_init != NULL) 783 (*sc->sc_init)(sc); 784 785 LAGG_WUNLOCK(sc); 786 } 787 788 static void 789 lagg_stop(struct lagg_softc *sc) 790 { 791 struct ifnet *ifp = sc->sc_ifp; 792 793 LAGG_WLOCK_ASSERT(sc); 794 795 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 796 return; 797 798 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 799 800 if (sc->sc_stop != NULL) 801 (*sc->sc_stop)(sc); 802 } 803 804 static int 805 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 806 { 807 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 808 struct lagg_reqall *ra = (struct lagg_reqall *)data; 809 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf; 810 struct ifreq *ifr = (struct ifreq *)data; 811 struct lagg_port *lp; 812 struct ifnet *tpif; 813 struct thread *td = curthread; 814 char *buf, *outbuf; 815 int count, buflen, len, error = 0; 816 817 bzero(&rpbuf, sizeof(rpbuf)); 818 819 switch (cmd) { 820 case SIOCGLAGG: 821 LAGG_RLOCK(sc); 822 count = 0; 823 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 824 count++; 825 buflen = count * sizeof(struct lagg_reqport); 826 LAGG_RUNLOCK(sc); 827 828 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 829 830 LAGG_RLOCK(sc); 831 ra->ra_proto = sc->sc_proto; 832 if (sc->sc_req != NULL) 833 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc); 834 835 count = 0; 836 buf = outbuf; 837 len = min(ra->ra_size, buflen); 838 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 839 if (len < sizeof(rpbuf)) 840 break; 841 842 lagg_port2req(lp, &rpbuf); 843 memcpy(buf, &rpbuf, sizeof(rpbuf)); 844 count++; 845 buf += sizeof(rpbuf); 846 len -= sizeof(rpbuf); 847 } 848 LAGG_RUNLOCK(sc); 849 ra->ra_ports = count; 850 ra->ra_size = count * sizeof(rpbuf); 851 error = copyout(outbuf, ra->ra_port, ra->ra_size); 852 free(outbuf, M_TEMP); 853 break; 854 case SIOCSLAGG: 855 error = priv_check(td, PRIV_NET_LAGG); 856 if (error) 857 break; 858 if (ra->ra_proto >= LAGG_PROTO_MAX) { 859 error = EPROTONOSUPPORT; 860 break; 861 } 862 if (sc->sc_proto != LAGG_PROTO_NONE) { 863 LAGG_WLOCK(sc); 864 error = sc->sc_detach(sc); 865 /* Reset protocol and pointers */ 866 sc->sc_proto = LAGG_PROTO_NONE; 867 sc->sc_detach = NULL; 868 sc->sc_start = NULL; 869 sc->sc_input = NULL; 870 sc->sc_port_create = NULL; 871 sc->sc_port_destroy = NULL; 872 sc->sc_linkstate = NULL; 873 sc->sc_init = NULL; 874 sc->sc_stop = NULL; 875 sc->sc_lladdr = NULL; 876 sc->sc_req = NULL; 877 sc->sc_portreq = NULL; 878 LAGG_WUNLOCK(sc); 879 } 880 if (error != 0) 881 break; 882 for (int i = 0; i < (sizeof(lagg_protos) / 883 sizeof(lagg_protos[0])); i++) { 884 if (lagg_protos[i].ti_proto == ra->ra_proto) { 885 if (sc->sc_ifflags & IFF_DEBUG) 886 printf("%s: using proto %u\n", 887 sc->sc_ifname, 888 lagg_protos[i].ti_proto); 889 LAGG_WLOCK(sc); 890 sc->sc_proto = lagg_protos[i].ti_proto; 891 if (sc->sc_proto != LAGG_PROTO_NONE) 892 error = lagg_protos[i].ti_attach(sc); 893 LAGG_WUNLOCK(sc); 894 return (error); 895 } 896 } 897 error = EPROTONOSUPPORT; 898 break; 899 case SIOCGLAGGPORT: 900 if (rp->rp_portname[0] == '\0' || 901 (tpif = ifunit(rp->rp_portname)) == NULL) { 902 error = EINVAL; 903 break; 904 } 905 906 LAGG_RLOCK(sc); 907 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || 908 lp->lp_softc != sc) { 909 error = ENOENT; 910 LAGG_RUNLOCK(sc); 911 break; 912 } 913 914 lagg_port2req(lp, rp); 915 LAGG_RUNLOCK(sc); 916 break; 917 case SIOCSLAGGPORT: 918 error = priv_check(td, PRIV_NET_LAGG); 919 if (error) 920 break; 921 if (rp->rp_portname[0] == '\0' || 922 (tpif = ifunit(rp->rp_portname)) == NULL) { 923 error = EINVAL; 924 break; 925 } 926 LAGG_WLOCK(sc); 927 error = lagg_port_create(sc, tpif); 928 LAGG_WUNLOCK(sc); 929 break; 930 case SIOCSLAGGDELPORT: 931 error = priv_check(td, PRIV_NET_LAGG); 932 if (error) 933 break; 934 if (rp->rp_portname[0] == '\0' || 935 (tpif = ifunit(rp->rp_portname)) == NULL) { 936 error = EINVAL; 937 break; 938 } 939 940 LAGG_WLOCK(sc); 941 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL || 942 lp->lp_softc != sc) { 943 error = ENOENT; 944 LAGG_WUNLOCK(sc); 945 break; 946 } 947 948 error = lagg_port_destroy(lp, 1); 949 LAGG_WUNLOCK(sc); 950 break; 951 case SIOCSIFFLAGS: 952 /* Set flags on ports too */ 953 LAGG_WLOCK(sc); 954 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 955 lagg_setflags(lp, 1); 956 } 957 LAGG_WUNLOCK(sc); 958 959 if (!(ifp->if_flags & IFF_UP) && 960 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 961 /* 962 * If interface is marked down and it is running, 963 * then stop and disable it. 964 */ 965 LAGG_WLOCK(sc); 966 lagg_stop(sc); 967 LAGG_WUNLOCK(sc); 968 } else if ((ifp->if_flags & IFF_UP) && 969 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 970 /* 971 * If interface is marked up and it is stopped, then 972 * start it. 973 */ 974 (*ifp->if_init)(sc); 975 } 976 break; 977 case SIOCADDMULTI: 978 case SIOCDELMULTI: 979 LAGG_WLOCK(sc); 980 error = lagg_ether_setmulti(sc); 981 LAGG_WUNLOCK(sc); 982 break; 983 case SIOCSIFMEDIA: 984 case SIOCGIFMEDIA: 985 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 986 break; 987 988 case SIOCSIFCAP: 989 case SIOCSIFMTU: 990 /* Do not allow the MTU or caps to be directly changed */ 991 error = EINVAL; 992 break; 993 994 default: 995 error = ether_ioctl(ifp, cmd, data); 996 break; 997 } 998 return (error); 999 } 1000 1001 static int 1002 lagg_ether_setmulti(struct lagg_softc *sc) 1003 { 1004 struct lagg_port *lp; 1005 1006 LAGG_WLOCK_ASSERT(sc); 1007 1008 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 1009 /* First, remove any existing filter entries. */ 1010 lagg_ether_cmdmulti(lp, 0); 1011 /* copy all addresses from the lagg interface to the port */ 1012 lagg_ether_cmdmulti(lp, 1); 1013 } 1014 return (0); 1015 } 1016 1017 static int 1018 lagg_ether_cmdmulti(struct lagg_port *lp, int set) 1019 { 1020 struct lagg_softc *sc = lp->lp_softc; 1021 struct ifnet *ifp = lp->lp_ifp; 1022 struct ifnet *scifp = sc->sc_ifp; 1023 struct lagg_mc *mc; 1024 struct ifmultiaddr *ifma, *rifma = NULL; 1025 struct sockaddr_dl sdl; 1026 int error; 1027 1028 LAGG_WLOCK_ASSERT(sc); 1029 1030 bzero((char *)&sdl, sizeof(sdl)); 1031 sdl.sdl_len = sizeof(sdl); 1032 sdl.sdl_family = AF_LINK; 1033 sdl.sdl_type = IFT_ETHER; 1034 sdl.sdl_alen = ETHER_ADDR_LEN; 1035 sdl.sdl_index = ifp->if_index; 1036 1037 if (set) { 1038 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) { 1039 if (ifma->ifma_addr->sa_family != AF_LINK) 1040 continue; 1041 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1042 LLADDR(&sdl), ETHER_ADDR_LEN); 1043 1044 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 1045 if (error) 1046 return (error); 1047 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT); 1048 if (mc == NULL) 1049 return (ENOMEM); 1050 mc->mc_ifma = rifma; 1051 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries); 1052 } 1053 } else { 1054 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) { 1055 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries); 1056 if_delmulti_ifma(mc->mc_ifma); 1057 free(mc, M_DEVBUF); 1058 } 1059 } 1060 return (0); 1061 } 1062 1063 /* Handle a ref counted flag that should be set on the lagg port as well */ 1064 static int 1065 lagg_setflag(struct lagg_port *lp, int flag, int status, 1066 int (*func)(struct ifnet *, int)) 1067 { 1068 struct lagg_softc *sc = lp->lp_softc; 1069 struct ifnet *scifp = sc->sc_ifp; 1070 struct ifnet *ifp = lp->lp_ifp; 1071 int error; 1072 1073 LAGG_WLOCK_ASSERT(sc); 1074 1075 status = status ? (scifp->if_flags & flag) : 0; 1076 /* Now "status" contains the flag value or 0 */ 1077 1078 /* 1079 * See if recorded ports status is different from what 1080 * we want it to be. If it is, flip it. We record ports 1081 * status in lp_ifflags so that we won't clear ports flag 1082 * we haven't set. In fact, we don't clear or set ports 1083 * flags directly, but get or release references to them. 1084 * That's why we can be sure that recorded flags still are 1085 * in accord with actual ports flags. 1086 */ 1087 if (status != (lp->lp_ifflags & flag)) { 1088 error = (*func)(ifp, status); 1089 if (error) 1090 return (error); 1091 lp->lp_ifflags &= ~flag; 1092 lp->lp_ifflags |= status; 1093 } 1094 return (0); 1095 } 1096 1097 /* 1098 * Handle IFF_* flags that require certain changes on the lagg port 1099 * if "status" is true, update ports flags respective to the lagg 1100 * if "status" is false, forcedly clear the flags set on port. 1101 */ 1102 static int 1103 lagg_setflags(struct lagg_port *lp, int status) 1104 { 1105 int error, i; 1106 1107 for (i = 0; lagg_pflags[i].flag; i++) { 1108 error = lagg_setflag(lp, lagg_pflags[i].flag, 1109 status, lagg_pflags[i].func); 1110 if (error) 1111 return (error); 1112 } 1113 return (0); 1114 } 1115 1116 static void 1117 lagg_start(struct ifnet *ifp) 1118 { 1119 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 1120 struct mbuf *m; 1121 int error = 0; 1122 1123 LAGG_RLOCK(sc); 1124 /* We need a Tx algorithm and at least one port */ 1125 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) { 1126 IF_DRAIN(&ifp->if_snd); 1127 LAGG_RUNLOCK(sc); 1128 return; 1129 } 1130 1131 for (;; error = 0) { 1132 IFQ_DEQUEUE(&ifp->if_snd, m); 1133 if (m == NULL) 1134 break; 1135 1136 ETHER_BPF_MTAP(ifp, m); 1137 1138 error = (*sc->sc_start)(sc, m); 1139 if (error == 0) 1140 ifp->if_opackets++; 1141 else 1142 ifp->if_oerrors++; 1143 } 1144 LAGG_RUNLOCK(sc); 1145 } 1146 1147 static struct mbuf * 1148 lagg_input(struct ifnet *ifp, struct mbuf *m) 1149 { 1150 struct lagg_port *lp = ifp->if_lagg; 1151 struct lagg_softc *sc = lp->lp_softc; 1152 struct ifnet *scifp = sc->sc_ifp; 1153 1154 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1155 (lp->lp_flags & LAGG_PORT_DISABLED) || 1156 sc->sc_proto == LAGG_PROTO_NONE) { 1157 m_freem(m); 1158 return (NULL); 1159 } 1160 1161 LAGG_RLOCK(sc); 1162 ETHER_BPF_MTAP(scifp, m); 1163 1164 m = (*sc->sc_input)(sc, lp, m); 1165 1166 if (m != NULL) { 1167 scifp->if_ipackets++; 1168 scifp->if_ibytes += m->m_pkthdr.len; 1169 1170 if (scifp->if_flags & IFF_MONITOR) { 1171 m_freem(m); 1172 m = NULL; 1173 } 1174 } 1175 1176 LAGG_RUNLOCK(sc); 1177 return (m); 1178 } 1179 1180 static int 1181 lagg_media_change(struct ifnet *ifp) 1182 { 1183 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 1184 1185 if (sc->sc_ifflags & IFF_DEBUG) 1186 printf("%s\n", __func__); 1187 1188 /* Ignore */ 1189 return (0); 1190 } 1191 1192 static void 1193 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1194 { 1195 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc; 1196 struct lagg_port *lp; 1197 1198 imr->ifm_status = IFM_AVALID; 1199 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1200 1201 LAGG_RLOCK(sc); 1202 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 1203 if (LAGG_PORTACTIVE(lp)) 1204 imr->ifm_status |= IFM_ACTIVE; 1205 } 1206 LAGG_RUNLOCK(sc); 1207 } 1208 1209 static void 1210 lagg_linkstate(struct lagg_softc *sc) 1211 { 1212 struct lagg_port *lp; 1213 int new_link = LINK_STATE_DOWN; 1214 uint64_t speed; 1215 1216 /* Our link is considered up if at least one of our ports is active */ 1217 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) { 1218 if (lp->lp_link_state == LINK_STATE_UP) { 1219 new_link = LINK_STATE_UP; 1220 break; 1221 } 1222 } 1223 if_link_state_change(sc->sc_ifp, new_link); 1224 1225 /* Update if_baudrate to reflect the max possible speed */ 1226 switch (sc->sc_proto) { 1227 case LAGG_PROTO_FAILOVER: 1228 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ? 1229 sc->sc_primary->lp_ifp->if_baudrate : 0; 1230 break; 1231 case LAGG_PROTO_ROUNDROBIN: 1232 case LAGG_PROTO_LOADBALANCE: 1233 case LAGG_PROTO_ETHERCHANNEL: 1234 speed = 0; 1235 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1236 speed += lp->lp_ifp->if_baudrate; 1237 sc->sc_ifp->if_baudrate = speed; 1238 break; 1239 case LAGG_PROTO_LACP: 1240 /* LACP updates if_baudrate itself */ 1241 break; 1242 } 1243 } 1244 1245 static void 1246 lagg_port_state(struct ifnet *ifp, int state) 1247 { 1248 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg; 1249 struct lagg_softc *sc = NULL; 1250 1251 if (lp != NULL) 1252 sc = lp->lp_softc; 1253 if (sc == NULL) 1254 return; 1255 1256 LAGG_WLOCK(sc); 1257 lagg_linkstate(sc); 1258 if (sc->sc_linkstate != NULL) 1259 (*sc->sc_linkstate)(lp); 1260 LAGG_WUNLOCK(sc); 1261 } 1262 1263 struct lagg_port * 1264 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp) 1265 { 1266 struct lagg_port *lp_next, *rval = NULL; 1267 // int new_link = LINK_STATE_DOWN; 1268 1269 LAGG_RLOCK_ASSERT(sc); 1270 /* 1271 * Search a port which reports an active link state. 1272 */ 1273 1274 if (lp == NULL) 1275 goto search; 1276 if (LAGG_PORTACTIVE(lp)) { 1277 rval = lp; 1278 goto found; 1279 } 1280 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL && 1281 LAGG_PORTACTIVE(lp_next)) { 1282 rval = lp_next; 1283 goto found; 1284 } 1285 1286 search: 1287 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) { 1288 if (LAGG_PORTACTIVE(lp_next)) { 1289 rval = lp_next; 1290 goto found; 1291 } 1292 } 1293 1294 found: 1295 if (rval != NULL) { 1296 /* 1297 * The IEEE 802.1D standard assumes that a lagg with 1298 * multiple ports is always full duplex. This is valid 1299 * for load sharing laggs and if at least two links 1300 * are active. Unfortunately, checking the latter would 1301 * be too expensive at this point. 1302 XXX 1303 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) && 1304 (sc->sc_count > 1)) 1305 new_link = LINK_STATE_FULL_DUPLEX; 1306 else 1307 new_link = rval->lp_link_state; 1308 */ 1309 } 1310 1311 return (rval); 1312 } 1313 1314 static const void * 1315 lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) 1316 { 1317 if (m->m_pkthdr.len < (off + len)) { 1318 return (NULL); 1319 } else if (m->m_len < (off + len)) { 1320 m_copydata(m, off, len, buf); 1321 return (buf); 1322 } 1323 return (mtod(m, char *) + off); 1324 } 1325 1326 uint32_t 1327 lagg_hashmbuf(struct mbuf *m, uint32_t key) 1328 { 1329 uint16_t etype; 1330 uint32_t p = 0; 1331 int off; 1332 struct ether_header *eh; 1333 struct ether_vlan_header vlanbuf; 1334 const struct ether_vlan_header *vlan; 1335 #ifdef INET 1336 const struct ip *ip; 1337 struct ip ipbuf; 1338 #endif 1339 #ifdef INET6 1340 const struct ip6_hdr *ip6; 1341 struct ip6_hdr ip6buf; 1342 uint32_t flow; 1343 #endif 1344 1345 off = sizeof(*eh); 1346 if (m->m_len < off) 1347 goto out; 1348 eh = mtod(m, struct ether_header *); 1349 etype = ntohs(eh->ether_type); 1350 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, key); 1351 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); 1352 1353 /* Special handling for encapsulating VLAN frames */ 1354 if (m->m_flags & M_VLANTAG) { 1355 p = hash32_buf(&m->m_pkthdr.ether_vtag, 1356 sizeof(m->m_pkthdr.ether_vtag), p); 1357 } else if (etype == ETHERTYPE_VLAN) { 1358 vlan = lagg_gethdr(m, off, sizeof(*vlan), &vlanbuf); 1359 if (vlan == NULL) 1360 goto out; 1361 1362 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p); 1363 etype = ntohs(vlan->evl_proto); 1364 off += sizeof(*vlan) - sizeof(*eh); 1365 } 1366 1367 switch (etype) { 1368 #ifdef INET 1369 case ETHERTYPE_IP: 1370 ip = lagg_gethdr(m, off, sizeof(*ip), &ipbuf); 1371 if (ip == NULL) 1372 goto out; 1373 1374 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p); 1375 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p); 1376 break; 1377 #endif 1378 #ifdef INET6 1379 case ETHERTYPE_IPV6: 1380 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &ip6buf); 1381 if (ip6 == NULL) 1382 goto out; 1383 1384 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); 1385 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); 1386 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; 1387 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */ 1388 break; 1389 #endif 1390 } 1391 out: 1392 return (p); 1393 } 1394 1395 int 1396 lagg_enqueue(struct ifnet *ifp, struct mbuf *m) 1397 { 1398 1399 return (ifp->if_transmit)(ifp, m); 1400 } 1401 1402 /* 1403 * Simple round robin aggregation 1404 */ 1405 1406 static int 1407 lagg_rr_attach(struct lagg_softc *sc) 1408 { 1409 sc->sc_detach = lagg_rr_detach; 1410 sc->sc_start = lagg_rr_start; 1411 sc->sc_input = lagg_rr_input; 1412 sc->sc_port_create = NULL; 1413 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX; 1414 sc->sc_seq = 0; 1415 1416 return (0); 1417 } 1418 1419 static int 1420 lagg_rr_detach(struct lagg_softc *sc) 1421 { 1422 return (0); 1423 } 1424 1425 static int 1426 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m) 1427 { 1428 struct lagg_port *lp; 1429 uint32_t p; 1430 1431 p = atomic_fetchadd_32(&sc->sc_seq, 1); 1432 p %= sc->sc_count; 1433 lp = SLIST_FIRST(&sc->sc_ports); 1434 while (p--) 1435 lp = SLIST_NEXT(lp, lp_entries); 1436 1437 /* 1438 * Check the port's link state. This will return the next active 1439 * port if the link is down or the port is NULL. 1440 */ 1441 if ((lp = lagg_link_active(sc, lp)) == NULL) { 1442 m_freem(m); 1443 return (ENOENT); 1444 } 1445 1446 /* Send mbuf */ 1447 return (lagg_enqueue(lp->lp_ifp, m)); 1448 } 1449 1450 static struct mbuf * 1451 lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 1452 { 1453 struct ifnet *ifp = sc->sc_ifp; 1454 1455 /* Just pass in the packet to our lagg device */ 1456 m->m_pkthdr.rcvif = ifp; 1457 1458 return (m); 1459 } 1460 1461 /* 1462 * Active failover 1463 */ 1464 1465 static int 1466 lagg_fail_attach(struct lagg_softc *sc) 1467 { 1468 sc->sc_detach = lagg_fail_detach; 1469 sc->sc_start = lagg_fail_start; 1470 sc->sc_input = lagg_fail_input; 1471 sc->sc_port_create = NULL; 1472 sc->sc_port_destroy = NULL; 1473 1474 return (0); 1475 } 1476 1477 static int 1478 lagg_fail_detach(struct lagg_softc *sc) 1479 { 1480 return (0); 1481 } 1482 1483 static int 1484 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m) 1485 { 1486 struct lagg_port *lp; 1487 1488 /* Use the master port if active or the next available port */ 1489 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) { 1490 m_freem(m); 1491 return (ENOENT); 1492 } 1493 1494 /* Send mbuf */ 1495 return (lagg_enqueue(lp->lp_ifp, m)); 1496 } 1497 1498 static struct mbuf * 1499 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 1500 { 1501 struct ifnet *ifp = sc->sc_ifp; 1502 struct lagg_port *tmp_tp; 1503 1504 if (lp == sc->sc_primary) { 1505 m->m_pkthdr.rcvif = ifp; 1506 return (m); 1507 } 1508 1509 if (!LAGG_PORTACTIVE(sc->sc_primary)) { 1510 tmp_tp = lagg_link_active(sc, sc->sc_primary); 1511 /* 1512 * If tmp_tp is null, we've recieved a packet when all 1513 * our links are down. Weird, but process it anyways. 1514 */ 1515 if ((tmp_tp == NULL || tmp_tp == lp)) { 1516 m->m_pkthdr.rcvif = ifp; 1517 return (m); 1518 } 1519 } 1520 1521 m_freem(m); 1522 return (NULL); 1523 } 1524 1525 /* 1526 * Loadbalancing 1527 */ 1528 1529 static int 1530 lagg_lb_attach(struct lagg_softc *sc) 1531 { 1532 struct lagg_port *lp; 1533 struct lagg_lb *lb; 1534 1535 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb), 1536 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) 1537 return (ENOMEM); 1538 1539 sc->sc_detach = lagg_lb_detach; 1540 sc->sc_start = lagg_lb_start; 1541 sc->sc_input = lagg_lb_input; 1542 sc->sc_port_create = lagg_lb_port_create; 1543 sc->sc_port_destroy = lagg_lb_port_destroy; 1544 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX; 1545 1546 lb->lb_key = arc4random(); 1547 sc->sc_psc = (caddr_t)lb; 1548 1549 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1550 lagg_lb_port_create(lp); 1551 1552 return (0); 1553 } 1554 1555 static int 1556 lagg_lb_detach(struct lagg_softc *sc) 1557 { 1558 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; 1559 if (lb != NULL) 1560 free(lb, M_DEVBUF); 1561 return (0); 1562 } 1563 1564 static int 1565 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp) 1566 { 1567 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; 1568 struct lagg_port *lp_next; 1569 int i = 0; 1570 1571 bzero(&lb->lb_ports, sizeof(lb->lb_ports)); 1572 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) { 1573 if (lp_next == lp) 1574 continue; 1575 if (i >= LAGG_MAX_PORTS) 1576 return (EINVAL); 1577 if (sc->sc_ifflags & IFF_DEBUG) 1578 printf("%s: port %s at index %d\n", 1579 sc->sc_ifname, lp_next->lp_ifname, i); 1580 lb->lb_ports[i++] = lp_next; 1581 } 1582 1583 return (0); 1584 } 1585 1586 static int 1587 lagg_lb_port_create(struct lagg_port *lp) 1588 { 1589 struct lagg_softc *sc = lp->lp_softc; 1590 return (lagg_lb_porttable(sc, NULL)); 1591 } 1592 1593 static void 1594 lagg_lb_port_destroy(struct lagg_port *lp) 1595 { 1596 struct lagg_softc *sc = lp->lp_softc; 1597 lagg_lb_porttable(sc, lp); 1598 } 1599 1600 static int 1601 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m) 1602 { 1603 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc; 1604 struct lagg_port *lp = NULL; 1605 uint32_t p = 0; 1606 1607 p = lagg_hashmbuf(m, lb->lb_key); 1608 p %= sc->sc_count; 1609 lp = lb->lb_ports[p]; 1610 1611 /* 1612 * Check the port's link state. This will return the next active 1613 * port if the link is down or the port is NULL. 1614 */ 1615 if ((lp = lagg_link_active(sc, lp)) == NULL) { 1616 m_freem(m); 1617 return (ENOENT); 1618 } 1619 1620 /* Send mbuf */ 1621 return (lagg_enqueue(lp->lp_ifp, m)); 1622 } 1623 1624 static struct mbuf * 1625 lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 1626 { 1627 struct ifnet *ifp = sc->sc_ifp; 1628 1629 /* Just pass in the packet to our lagg device */ 1630 m->m_pkthdr.rcvif = ifp; 1631 1632 return (m); 1633 } 1634 1635 /* 1636 * 802.3ad LACP 1637 */ 1638 1639 static int 1640 lagg_lacp_attach(struct lagg_softc *sc) 1641 { 1642 struct lagg_port *lp; 1643 int error; 1644 1645 sc->sc_detach = lagg_lacp_detach; 1646 sc->sc_port_create = lacp_port_create; 1647 sc->sc_port_destroy = lacp_port_destroy; 1648 sc->sc_linkstate = lacp_linkstate; 1649 sc->sc_start = lagg_lacp_start; 1650 sc->sc_input = lagg_lacp_input; 1651 sc->sc_init = lacp_init; 1652 sc->sc_stop = lacp_stop; 1653 sc->sc_lladdr = lagg_lacp_lladdr; 1654 sc->sc_req = lacp_req; 1655 sc->sc_portreq = lacp_portreq; 1656 1657 error = lacp_attach(sc); 1658 if (error) 1659 return (error); 1660 1661 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1662 lacp_port_create(lp); 1663 1664 return (error); 1665 } 1666 1667 static int 1668 lagg_lacp_detach(struct lagg_softc *sc) 1669 { 1670 struct lagg_port *lp; 1671 int error; 1672 1673 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1674 lacp_port_destroy(lp); 1675 1676 /* unlocking is safe here */ 1677 LAGG_WUNLOCK(sc); 1678 error = lacp_detach(sc); 1679 LAGG_WLOCK(sc); 1680 1681 return (error); 1682 } 1683 1684 static void 1685 lagg_lacp_lladdr(struct lagg_softc *sc) 1686 { 1687 struct lagg_port *lp; 1688 1689 /* purge all the lacp ports */ 1690 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1691 lacp_port_destroy(lp); 1692 1693 /* add them back in */ 1694 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) 1695 lacp_port_create(lp); 1696 } 1697 1698 static int 1699 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m) 1700 { 1701 struct lagg_port *lp; 1702 1703 lp = lacp_select_tx_port(sc, m); 1704 if (lp == NULL) { 1705 m_freem(m); 1706 return (EBUSY); 1707 } 1708 1709 /* Send mbuf */ 1710 return (lagg_enqueue(lp->lp_ifp, m)); 1711 } 1712 1713 static struct mbuf * 1714 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m) 1715 { 1716 struct ifnet *ifp = sc->sc_ifp; 1717 struct ether_header *eh; 1718 u_short etype; 1719 1720 eh = mtod(m, struct ether_header *); 1721 etype = ntohs(eh->ether_type); 1722 1723 /* Tap off LACP control messages */ 1724 if (etype == ETHERTYPE_SLOW) { 1725 m = lacp_input(lp, m); 1726 if (m == NULL) 1727 return (NULL); 1728 } 1729 1730 /* 1731 * If the port is not collecting or not in the active aggregator then 1732 * free and return. 1733 */ 1734 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) { 1735 m_freem(m); 1736 return (NULL); 1737 } 1738 1739 m->m_pkthdr.rcvif = ifp; 1740 return (m); 1741 } 1742