1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 34 * Might be extended some day to also handle IEEE 802.1p priority 35 * tagging. This is sort of sneaky in the implementation, since 36 * we need to pretend to be enough of an Ethernet implementation 37 * to make arp work. The way we do this is by telling everyone 38 * that we are an Ethernet, and then catch the packets that 39 * ether_output() left on our output queue when it calls 40 * if_start(), rewrite them for use by the real outgoing interface, 41 * and ask it to send them. 42 */ 43 44 #include "opt_vlan.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/module.h> 52 #include <sys/rwlock.h> 53 #include <sys/queue.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/sysctl.h> 57 #include <sys/systm.h> 58 #include <sys/vimage.h> 59 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/if.h> 63 #include <net/if_clone.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_vlan_var.h> 67 68 #define VLANNAME "vlan" 69 #define VLAN_DEF_HWIDTH 4 70 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 71 72 #define UP_AND_RUNNING(ifp) \ 73 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 74 75 LIST_HEAD(ifvlanhead, ifvlan); 76 77 struct ifvlantrunk { 78 struct ifnet *parent; /* parent interface of this trunk */ 79 struct rwlock rw; 80 #ifdef VLAN_ARRAY 81 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 82 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 83 #else 84 struct ifvlanhead *hash; /* dynamic hash-list table */ 85 uint16_t hmask; 86 uint16_t hwidth; 87 #endif 88 int refcnt; 89 }; 90 91 struct vlan_mc_entry { 92 struct ether_addr mc_addr; 93 SLIST_ENTRY(vlan_mc_entry) mc_entries; 94 }; 95 96 struct ifvlan { 97 struct ifvlantrunk *ifv_trunk; 98 struct ifnet *ifv_ifp; 99 #define TRUNK(ifv) ((ifv)->ifv_trunk) 100 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 101 int ifv_pflags; /* special flags we have set on parent */ 102 struct ifv_linkmib { 103 int ifvm_encaplen; /* encapsulation length */ 104 int ifvm_mtufudge; /* MTU fudged by this much */ 105 int ifvm_mintu; /* min transmission unit */ 106 uint16_t ifvm_proto; /* encapsulation ethertype */ 107 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 108 } ifv_mib; 109 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 110 #ifndef VLAN_ARRAY 111 LIST_ENTRY(ifvlan) ifv_list; 112 #endif 113 }; 114 #define ifv_proto ifv_mib.ifvm_proto 115 #define ifv_tag ifv_mib.ifvm_tag 116 #define ifv_encaplen ifv_mib.ifvm_encaplen 117 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 118 #define ifv_mintu ifv_mib.ifvm_mintu 119 120 /* Special flags we should propagate to parent. */ 121 static struct { 122 int flag; 123 int (*func)(struct ifnet *, int); 124 } vlan_pflags[] = { 125 {IFF_PROMISC, ifpromisc}, 126 {IFF_ALLMULTI, if_allmulti}, 127 {0, NULL} 128 }; 129 130 SYSCTL_DECL(_net_link); 131 SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN"); 132 SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency"); 133 134 static int soft_pad = 0; 135 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW, &soft_pad, 0, 136 "pad short frames before tagging"); 137 138 static MALLOC_DEFINE(M_VLAN, VLANNAME, "802.1Q Virtual LAN Interface"); 139 140 static eventhandler_tag ifdetach_tag; 141 142 /* 143 * We have a global mutex, that is used to serialize configuration 144 * changes and isn't used in normal packet delivery. 145 * 146 * We also have a per-trunk rwlock, that is locked shared on packet 147 * processing and exclusive when configuration is changed. 148 * 149 * The VLAN_ARRAY substitutes the dynamic hash with a static array 150 * with 4096 entries. In theory this can give a boost in processing, 151 * however on practice it does not. Probably this is because array 152 * is too big to fit into CPU cache. 153 */ 154 static struct mtx ifv_mtx; 155 #define VLAN_LOCK_INIT() mtx_init(&ifv_mtx, "vlan_global", NULL, MTX_DEF) 156 #define VLAN_LOCK_DESTROY() mtx_destroy(&ifv_mtx) 157 #define VLAN_LOCK_ASSERT() mtx_assert(&ifv_mtx, MA_OWNED) 158 #define VLAN_LOCK() mtx_lock(&ifv_mtx) 159 #define VLAN_UNLOCK() mtx_unlock(&ifv_mtx) 160 #define TRUNK_LOCK_INIT(trunk) rw_init(&(trunk)->rw, VLANNAME) 161 #define TRUNK_LOCK_DESTROY(trunk) rw_destroy(&(trunk)->rw) 162 #define TRUNK_LOCK(trunk) rw_wlock(&(trunk)->rw) 163 #define TRUNK_UNLOCK(trunk) rw_wunlock(&(trunk)->rw) 164 #define TRUNK_LOCK_ASSERT(trunk) rw_assert(&(trunk)->rw, RA_WLOCKED) 165 #define TRUNK_RLOCK(trunk) rw_rlock(&(trunk)->rw) 166 #define TRUNK_RUNLOCK(trunk) rw_runlock(&(trunk)->rw) 167 #define TRUNK_LOCK_RASSERT(trunk) rw_assert(&(trunk)->rw, RA_RLOCKED) 168 169 #ifndef VLAN_ARRAY 170 static void vlan_inithash(struct ifvlantrunk *trunk); 171 static void vlan_freehash(struct ifvlantrunk *trunk); 172 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 173 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 174 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 175 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 176 uint16_t tag); 177 #endif 178 static void trunk_destroy(struct ifvlantrunk *trunk); 179 180 static void vlan_start(struct ifnet *ifp); 181 static void vlan_init(void *foo); 182 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 183 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 184 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 185 int (*func)(struct ifnet *, int)); 186 static int vlan_setflags(struct ifnet *ifp, int status); 187 static int vlan_setmulti(struct ifnet *ifp); 188 static int vlan_unconfig(struct ifnet *ifp); 189 static int vlan_unconfig_locked(struct ifnet *ifp); 190 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 191 static void vlan_link_state(struct ifnet *ifp, int link); 192 static void vlan_capabilities(struct ifvlan *ifv); 193 static void vlan_trunk_capabilities(struct ifnet *ifp); 194 195 static struct ifnet *vlan_clone_match_ethertag(struct if_clone *, 196 const char *, int *); 197 static int vlan_clone_match(struct if_clone *, const char *); 198 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 199 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 200 201 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 202 203 static struct if_clone vlan_cloner = IFC_CLONE_INITIALIZER(VLANNAME, NULL, 204 IF_MAXUNIT, NULL, vlan_clone_match, vlan_clone_create, vlan_clone_destroy); 205 206 #ifndef VLAN_ARRAY 207 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 208 209 static void 210 vlan_inithash(struct ifvlantrunk *trunk) 211 { 212 int i, n; 213 214 /* 215 * The trunk must not be locked here since we call malloc(M_WAITOK). 216 * It is OK in case this function is called before the trunk struct 217 * gets hooked up and becomes visible from other threads. 218 */ 219 220 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 221 ("%s: hash already initialized", __func__)); 222 223 trunk->hwidth = VLAN_DEF_HWIDTH; 224 n = 1 << trunk->hwidth; 225 trunk->hmask = n - 1; 226 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 227 for (i = 0; i < n; i++) 228 LIST_INIT(&trunk->hash[i]); 229 } 230 231 static void 232 vlan_freehash(struct ifvlantrunk *trunk) 233 { 234 #ifdef INVARIANTS 235 int i; 236 237 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 238 for (i = 0; i < (1 << trunk->hwidth); i++) 239 KASSERT(LIST_EMPTY(&trunk->hash[i]), 240 ("%s: hash table not empty", __func__)); 241 #endif 242 free(trunk->hash, M_VLAN); 243 trunk->hash = NULL; 244 trunk->hwidth = trunk->hmask = 0; 245 } 246 247 static int 248 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 249 { 250 int i, b; 251 struct ifvlan *ifv2; 252 253 TRUNK_LOCK_ASSERT(trunk); 254 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 255 256 b = 1 << trunk->hwidth; 257 i = HASH(ifv->ifv_tag, trunk->hmask); 258 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 259 if (ifv->ifv_tag == ifv2->ifv_tag) 260 return (EEXIST); 261 262 /* 263 * Grow the hash when the number of vlans exceeds half of the number of 264 * hash buckets squared. This will make the average linked-list length 265 * buckets/2. 266 */ 267 if (trunk->refcnt > (b * b) / 2) { 268 vlan_growhash(trunk, 1); 269 i = HASH(ifv->ifv_tag, trunk->hmask); 270 } 271 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 272 trunk->refcnt++; 273 274 return (0); 275 } 276 277 static int 278 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 279 { 280 int i, b; 281 struct ifvlan *ifv2; 282 283 TRUNK_LOCK_ASSERT(trunk); 284 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 285 286 b = 1 << trunk->hwidth; 287 i = HASH(ifv->ifv_tag, trunk->hmask); 288 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 289 if (ifv2 == ifv) { 290 trunk->refcnt--; 291 LIST_REMOVE(ifv2, ifv_list); 292 if (trunk->refcnt < (b * b) / 2) 293 vlan_growhash(trunk, -1); 294 return (0); 295 } 296 297 panic("%s: vlan not found\n", __func__); 298 return (ENOENT); /*NOTREACHED*/ 299 } 300 301 /* 302 * Grow the hash larger or smaller if memory permits. 303 */ 304 static void 305 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 306 { 307 struct ifvlan *ifv; 308 struct ifvlanhead *hash2; 309 int hwidth2, i, j, n, n2; 310 311 TRUNK_LOCK_ASSERT(trunk); 312 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 313 314 if (howmuch == 0) { 315 /* Harmless yet obvious coding error */ 316 printf("%s: howmuch is 0\n", __func__); 317 return; 318 } 319 320 hwidth2 = trunk->hwidth + howmuch; 321 n = 1 << trunk->hwidth; 322 n2 = 1 << hwidth2; 323 /* Do not shrink the table below the default */ 324 if (hwidth2 < VLAN_DEF_HWIDTH) 325 return; 326 327 /* M_NOWAIT because we're called with trunk mutex held */ 328 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 329 if (hash2 == NULL) { 330 printf("%s: out of memory -- hash size not changed\n", 331 __func__); 332 return; /* We can live with the old hash table */ 333 } 334 for (j = 0; j < n2; j++) 335 LIST_INIT(&hash2[j]); 336 for (i = 0; i < n; i++) 337 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 338 LIST_REMOVE(ifv, ifv_list); 339 j = HASH(ifv->ifv_tag, n2 - 1); 340 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 341 } 342 free(trunk->hash, M_VLAN); 343 trunk->hash = hash2; 344 trunk->hwidth = hwidth2; 345 trunk->hmask = n2 - 1; 346 347 if (bootverbose) 348 if_printf(trunk->parent, 349 "VLAN hash table resized from %d to %d buckets\n", n, n2); 350 } 351 352 static __inline struct ifvlan * 353 vlan_gethash(struct ifvlantrunk *trunk, uint16_t tag) 354 { 355 struct ifvlan *ifv; 356 357 TRUNK_LOCK_RASSERT(trunk); 358 359 LIST_FOREACH(ifv, &trunk->hash[HASH(tag, trunk->hmask)], ifv_list) 360 if (ifv->ifv_tag == tag) 361 return (ifv); 362 return (NULL); 363 } 364 365 #if 0 366 /* Debugging code to view the hashtables. */ 367 static void 368 vlan_dumphash(struct ifvlantrunk *trunk) 369 { 370 int i; 371 struct ifvlan *ifv; 372 373 for (i = 0; i < (1 << trunk->hwidth); i++) { 374 printf("%d: ", i); 375 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 376 printf("%s ", ifv->ifv_ifp->if_xname); 377 printf("\n"); 378 } 379 } 380 #endif /* 0 */ 381 #endif /* !VLAN_ARRAY */ 382 383 static void 384 trunk_destroy(struct ifvlantrunk *trunk) 385 { 386 VLAN_LOCK_ASSERT(); 387 388 TRUNK_LOCK(trunk); 389 #ifndef VLAN_ARRAY 390 vlan_freehash(trunk); 391 #endif 392 trunk->parent->if_vlantrunk = NULL; 393 TRUNK_UNLOCK(trunk); 394 TRUNK_LOCK_DESTROY(trunk); 395 free(trunk, M_VLAN); 396 } 397 398 /* 399 * Program our multicast filter. What we're actually doing is 400 * programming the multicast filter of the parent. This has the 401 * side effect of causing the parent interface to receive multicast 402 * traffic that it doesn't really want, which ends up being discarded 403 * later by the upper protocol layers. Unfortunately, there's no way 404 * to avoid this: there really is only one physical interface. 405 * 406 * XXX: There is a possible race here if more than one thread is 407 * modifying the multicast state of the vlan interface at the same time. 408 */ 409 static int 410 vlan_setmulti(struct ifnet *ifp) 411 { 412 struct ifnet *ifp_p; 413 struct ifmultiaddr *ifma, *rifma = NULL; 414 struct ifvlan *sc; 415 struct vlan_mc_entry *mc; 416 struct sockaddr_dl sdl; 417 int error; 418 419 /*VLAN_LOCK_ASSERT();*/ 420 421 /* Find the parent. */ 422 sc = ifp->if_softc; 423 ifp_p = PARENT(sc); 424 425 bzero((char *)&sdl, sizeof(sdl)); 426 sdl.sdl_len = sizeof(sdl); 427 sdl.sdl_family = AF_LINK; 428 sdl.sdl_index = ifp_p->if_index; 429 sdl.sdl_type = IFT_ETHER; 430 sdl.sdl_alen = ETHER_ADDR_LEN; 431 432 /* First, remove any existing filter entries. */ 433 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 434 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); 435 error = if_delmulti(ifp_p, (struct sockaddr *)&sdl); 436 if (error) 437 return (error); 438 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 439 free(mc, M_VLAN); 440 } 441 442 /* Now program new ones. */ 443 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 444 if (ifma->ifma_addr->sa_family != AF_LINK) 445 continue; 446 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 447 if (mc == NULL) 448 return (ENOMEM); 449 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 450 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 451 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 452 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 453 LLADDR(&sdl), ETHER_ADDR_LEN); 454 error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma); 455 if (error) 456 return (error); 457 } 458 459 return (0); 460 } 461 462 /* 463 * A handler for network interface departure events. 464 * Track departure of trunks here so that we don't access invalid 465 * pointers or whatever if a trunk is ripped from under us, e.g., 466 * by ejecting its hot-plug card. 467 */ 468 static void 469 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 470 { 471 struct ifvlan *ifv; 472 int i; 473 474 /* 475 * Check if it's a trunk interface first of all 476 * to avoid needless locking. 477 */ 478 if (ifp->if_vlantrunk == NULL) 479 return; 480 481 VLAN_LOCK(); 482 /* 483 * OK, it's a trunk. Loop over and detach all vlan's on it. 484 * Check trunk pointer after each vlan_unconfig() as it will 485 * free it and set to NULL after the last vlan was detached. 486 */ 487 #ifdef VLAN_ARRAY 488 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 489 if ((ifv = ifp->if_vlantrunk->vlans[i])) { 490 vlan_unconfig_locked(ifv->ifv_ifp); 491 if (ifp->if_vlantrunk == NULL) 492 break; 493 } 494 #else /* VLAN_ARRAY */ 495 restart: 496 for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) 497 if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) { 498 vlan_unconfig_locked(ifv->ifv_ifp); 499 if (ifp->if_vlantrunk) 500 goto restart; /* trunk->hwidth can change */ 501 else 502 break; 503 } 504 #endif /* VLAN_ARRAY */ 505 /* Trunk should have been destroyed in vlan_unconfig(). */ 506 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 507 VLAN_UNLOCK(); 508 } 509 510 /* 511 * VLAN support can be loaded as a module. The only place in the 512 * system that's intimately aware of this is ether_input. We hook 513 * into this code through vlan_input_p which is defined there and 514 * set here. Noone else in the system should be aware of this so 515 * we use an explicit reference here. 516 */ 517 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 518 519 /* For if_link_state_change() eyes only... */ 520 extern void (*vlan_link_state_p)(struct ifnet *, int); 521 522 static int 523 vlan_modevent(module_t mod, int type, void *data) 524 { 525 526 switch (type) { 527 case MOD_LOAD: 528 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 529 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 530 if (ifdetach_tag == NULL) 531 return (ENOMEM); 532 VLAN_LOCK_INIT(); 533 vlan_input_p = vlan_input; 534 vlan_link_state_p = vlan_link_state; 535 vlan_trunk_cap_p = vlan_trunk_capabilities; 536 if_clone_attach(&vlan_cloner); 537 if (bootverbose) 538 printf("vlan: initialized, using " 539 #ifdef VLAN_ARRAY 540 "full-size arrays" 541 #else 542 "hash tables with chaining" 543 #endif 544 545 "\n"); 546 break; 547 case MOD_UNLOAD: 548 if_clone_detach(&vlan_cloner); 549 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 550 vlan_input_p = NULL; 551 vlan_link_state_p = NULL; 552 vlan_trunk_cap_p = NULL; 553 VLAN_LOCK_DESTROY(); 554 if (bootverbose) 555 printf("vlan: unloaded\n"); 556 break; 557 default: 558 return (EOPNOTSUPP); 559 } 560 return (0); 561 } 562 563 static moduledata_t vlan_mod = { 564 "if_vlan", 565 vlan_modevent, 566 0 567 }; 568 569 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 570 MODULE_VERSION(if_vlan, 3); 571 MODULE_DEPEND(if_vlan, miibus, 1, 1, 1); 572 573 static struct ifnet * 574 vlan_clone_match_ethertag(struct if_clone *ifc, const char *name, int *tag) 575 { 576 const char *cp; 577 struct ifnet *ifp; 578 int t = 0; 579 580 /* Check for <etherif>.<vlan> style interface names. */ 581 IFNET_RLOCK(); 582 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 583 if (ifp->if_type != IFT_ETHER) 584 continue; 585 if (strncmp(ifp->if_xname, name, strlen(ifp->if_xname)) != 0) 586 continue; 587 cp = name + strlen(ifp->if_xname); 588 if (*cp != '.') 589 continue; 590 for(; *cp != '\0'; cp++) { 591 if (*cp < '0' || *cp > '9') 592 continue; 593 t = (t * 10) + (*cp - '0'); 594 } 595 if (tag != NULL) 596 *tag = t; 597 break; 598 } 599 IFNET_RUNLOCK(); 600 601 return (ifp); 602 } 603 604 static int 605 vlan_clone_match(struct if_clone *ifc, const char *name) 606 { 607 const char *cp; 608 609 if (vlan_clone_match_ethertag(ifc, name, NULL) != NULL) 610 return (1); 611 612 if (strncmp(VLANNAME, name, strlen(VLANNAME)) != 0) 613 return (0); 614 for (cp = name + 4; *cp != '\0'; cp++) { 615 if (*cp < '0' || *cp > '9') 616 return (0); 617 } 618 619 return (1); 620 } 621 622 static int 623 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 624 { 625 char *dp; 626 int wildcard; 627 int unit; 628 int error; 629 int tag; 630 int ethertag; 631 struct ifvlan *ifv; 632 struct ifnet *ifp; 633 struct ifnet *p; 634 struct vlanreq vlr; 635 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 636 637 /* 638 * There are 3 (ugh) ways to specify the cloned device: 639 * o pass a parameter block with the clone request. 640 * o specify parameters in the text of the clone device name 641 * o specify no parameters and get an unattached device that 642 * must be configured separately. 643 * The first technique is preferred; the latter two are 644 * supported for backwards compatibilty. 645 */ 646 if (params) { 647 error = copyin(params, &vlr, sizeof(vlr)); 648 if (error) 649 return error; 650 p = ifunit(vlr.vlr_parent); 651 if (p == NULL) 652 return ENXIO; 653 /* 654 * Don't let the caller set up a VLAN tag with 655 * anything except VLID bits. 656 */ 657 if (vlr.vlr_tag & ~EVL_VLID_MASK) 658 return (EINVAL); 659 error = ifc_name2unit(name, &unit); 660 if (error != 0) 661 return (error); 662 663 ethertag = 1; 664 tag = vlr.vlr_tag; 665 wildcard = (unit < 0); 666 } else if ((p = vlan_clone_match_ethertag(ifc, name, &tag)) != NULL) { 667 ethertag = 1; 668 unit = -1; 669 wildcard = 0; 670 671 /* 672 * Don't let the caller set up a VLAN tag with 673 * anything except VLID bits. 674 */ 675 if (tag & ~EVL_VLID_MASK) 676 return (EINVAL); 677 } else { 678 ethertag = 0; 679 680 error = ifc_name2unit(name, &unit); 681 if (error != 0) 682 return (error); 683 684 wildcard = (unit < 0); 685 } 686 687 error = ifc_alloc_unit(ifc, &unit); 688 if (error != 0) 689 return (error); 690 691 /* In the wildcard case, we need to update the name. */ 692 if (wildcard) { 693 for (dp = name; *dp != '\0'; dp++); 694 if (snprintf(dp, len - (dp-name), "%d", unit) > 695 len - (dp-name) - 1) { 696 panic("%s: interface name too long", __func__); 697 } 698 } 699 700 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 701 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 702 if (ifp == NULL) { 703 ifc_free_unit(ifc, unit); 704 free(ifv, M_VLAN); 705 return (ENOSPC); 706 } 707 SLIST_INIT(&ifv->vlan_mc_listhead); 708 709 ifp->if_softc = ifv; 710 /* 711 * Set the name manually rather than using if_initname because 712 * we don't conform to the default naming convention for interfaces. 713 */ 714 strlcpy(ifp->if_xname, name, IFNAMSIZ); 715 ifp->if_dname = ifc->ifc_name; 716 ifp->if_dunit = unit; 717 /* NB: flags are not set here */ 718 ifp->if_linkmib = &ifv->ifv_mib; 719 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 720 /* NB: mtu is not set here */ 721 722 ifp->if_init = vlan_init; 723 ifp->if_start = vlan_start; 724 ifp->if_ioctl = vlan_ioctl; 725 ifp->if_snd.ifq_maxlen = ifqmaxlen; 726 ifp->if_flags = VLAN_IFFLAGS; 727 ether_ifattach(ifp, eaddr); 728 /* Now undo some of the damage... */ 729 ifp->if_baudrate = 0; 730 ifp->if_type = IFT_L2VLAN; 731 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 732 733 if (ethertag) { 734 error = vlan_config(ifv, p, tag); 735 if (error != 0) { 736 /* 737 * Since we've partialy failed, we need to back 738 * out all the way, otherwise userland could get 739 * confused. Thus, we destroy the interface. 740 */ 741 ether_ifdetach(ifp); 742 vlan_unconfig(ifp); 743 if_free_type(ifp, IFT_ETHER); 744 free(ifv, M_VLAN); 745 746 return (error); 747 } 748 749 /* Update flags on the parent, if necessary. */ 750 vlan_setflags(ifp, 1); 751 } 752 753 return (0); 754 } 755 756 static int 757 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 758 { 759 struct ifvlan *ifv = ifp->if_softc; 760 int unit = ifp->if_dunit; 761 762 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 763 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 764 if_free_type(ifp, IFT_ETHER); 765 free(ifv, M_VLAN); 766 ifc_free_unit(ifc, unit); 767 768 return (0); 769 } 770 771 /* 772 * The ifp->if_init entry point for vlan(4) is a no-op. 773 */ 774 static void 775 vlan_init(void *foo __unused) 776 { 777 } 778 779 /* 780 * The if_start method for vlan(4) interface. It doesn't 781 * raises the IFF_DRV_OACTIVE flag, since it is called 782 * only from IFQ_HANDOFF() macro in ether_output_frame(). 783 * If the interface queue is full, and vlan_start() is 784 * not called, the queue would never get emptied and 785 * interface would stall forever. 786 */ 787 static void 788 vlan_start(struct ifnet *ifp) 789 { 790 struct ifvlan *ifv; 791 struct ifnet *p; 792 struct mbuf *m; 793 int error; 794 795 ifv = ifp->if_softc; 796 p = PARENT(ifv); 797 798 for (;;) { 799 IF_DEQUEUE(&ifp->if_snd, m); 800 if (m == NULL) 801 break; 802 BPF_MTAP(ifp, m); 803 804 /* 805 * Do not run parent's if_start() if the parent is not up, 806 * or parent's driver will cause a system crash. 807 */ 808 if (!UP_AND_RUNNING(p)) { 809 m_freem(m); 810 ifp->if_collisions++; 811 continue; 812 } 813 814 /* 815 * Pad the frame to the minimum size allowed if told to. 816 * This option is in accord with IEEE Std 802.1Q, 2003 Ed., 817 * paragraph C.4.4.3.b. It can help to work around buggy 818 * bridges that violate paragraph C.4.4.3.a from the same 819 * document, i.e., fail to pad short frames after untagging. 820 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but 821 * untagging it will produce a 62-byte frame, which is a runt 822 * and requires padding. There are VLAN-enabled network 823 * devices that just discard such runts instead or mishandle 824 * them somehow. 825 */ 826 if (soft_pad) { 827 static char pad[8]; /* just zeros */ 828 int n; 829 830 for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len; 831 n > 0; n -= sizeof(pad)) 832 if (!m_append(m, min(n, sizeof(pad)), pad)) 833 break; 834 835 if (n > 0) { 836 if_printf(ifp, "cannot pad short frame\n"); 837 ifp->if_oerrors++; 838 m_freem(m); 839 continue; 840 } 841 } 842 843 /* 844 * If underlying interface can do VLAN tag insertion itself, 845 * just pass the packet along. However, we need some way to 846 * tell the interface where the packet came from so that it 847 * knows how to find the VLAN tag to use, so we attach a 848 * packet tag that holds it. 849 */ 850 if (p->if_capenable & IFCAP_VLAN_HWTAGGING) { 851 m->m_pkthdr.ether_vtag = ifv->ifv_tag; 852 m->m_flags |= M_VLANTAG; 853 } else { 854 m = ether_vlanencap(m, ifv->ifv_tag); 855 if (m == NULL) { 856 if_printf(ifp, 857 "unable to prepend VLAN header\n"); 858 ifp->if_oerrors++; 859 continue; 860 } 861 } 862 863 /* 864 * Send it, precisely as ether_output() would have. 865 * We are already running at splimp. 866 */ 867 IFQ_HANDOFF(p, m, error); 868 if (!error) 869 ifp->if_opackets++; 870 else 871 ifp->if_oerrors++; 872 } 873 } 874 875 static void 876 vlan_input(struct ifnet *ifp, struct mbuf *m) 877 { 878 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 879 struct ifvlan *ifv; 880 uint16_t tag; 881 882 KASSERT(trunk != NULL, ("%s: no trunk", __func__)); 883 884 if (m->m_flags & M_VLANTAG) { 885 /* 886 * Packet is tagged, but m contains a normal 887 * Ethernet frame; the tag is stored out-of-band. 888 */ 889 tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 890 m->m_flags &= ~M_VLANTAG; 891 } else { 892 struct ether_vlan_header *evl; 893 894 /* 895 * Packet is tagged in-band as specified by 802.1q. 896 */ 897 switch (ifp->if_type) { 898 case IFT_ETHER: 899 if (m->m_len < sizeof(*evl) && 900 (m = m_pullup(m, sizeof(*evl))) == NULL) { 901 if_printf(ifp, "cannot pullup VLAN header\n"); 902 return; 903 } 904 evl = mtod(m, struct ether_vlan_header *); 905 tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 906 907 /* 908 * Remove the 802.1q header by copying the Ethernet 909 * addresses over it and adjusting the beginning of 910 * the data in the mbuf. The encapsulated Ethernet 911 * type field is already in place. 912 */ 913 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 914 ETHER_HDR_LEN - ETHER_TYPE_LEN); 915 m_adj(m, ETHER_VLAN_ENCAP_LEN); 916 break; 917 918 default: 919 #ifdef INVARIANTS 920 panic("%s: %s has unsupported if_type %u", 921 __func__, ifp->if_xname, ifp->if_type); 922 #endif 923 m_freem(m); 924 ifp->if_noproto++; 925 return; 926 } 927 } 928 929 TRUNK_RLOCK(trunk); 930 #ifdef VLAN_ARRAY 931 ifv = trunk->vlans[tag]; 932 #else 933 ifv = vlan_gethash(trunk, tag); 934 #endif 935 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 936 TRUNK_RUNLOCK(trunk); 937 m_freem(m); 938 ifp->if_noproto++; 939 return; 940 } 941 TRUNK_RUNLOCK(trunk); 942 943 m->m_pkthdr.rcvif = ifv->ifv_ifp; 944 ifv->ifv_ifp->if_ipackets++; 945 946 /* Pass it back through the parent's input routine. */ 947 (*ifp->if_input)(ifv->ifv_ifp, m); 948 } 949 950 static int 951 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 952 { 953 struct ifvlantrunk *trunk; 954 struct ifnet *ifp; 955 int error = 0; 956 957 /* VID numbers 0x0 and 0xFFF are reserved */ 958 if (tag == 0 || tag == 0xFFF) 959 return (EINVAL); 960 if (p->if_type != IFT_ETHER) 961 return (EPROTONOSUPPORT); 962 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 963 return (EPROTONOSUPPORT); 964 if (ifv->ifv_trunk) 965 return (EBUSY); 966 967 if (p->if_vlantrunk == NULL) { 968 trunk = malloc(sizeof(struct ifvlantrunk), 969 M_VLAN, M_WAITOK | M_ZERO); 970 #ifndef VLAN_ARRAY 971 vlan_inithash(trunk); 972 #endif 973 VLAN_LOCK(); 974 if (p->if_vlantrunk != NULL) { 975 /* A race that that is very unlikely to be hit. */ 976 #ifndef VLAN_ARRAY 977 vlan_freehash(trunk); 978 #endif 979 free(trunk, M_VLAN); 980 goto exists; 981 } 982 TRUNK_LOCK_INIT(trunk); 983 TRUNK_LOCK(trunk); 984 p->if_vlantrunk = trunk; 985 trunk->parent = p; 986 } else { 987 VLAN_LOCK(); 988 exists: 989 trunk = p->if_vlantrunk; 990 TRUNK_LOCK(trunk); 991 } 992 993 ifv->ifv_tag = tag; /* must set this before vlan_inshash() */ 994 #ifdef VLAN_ARRAY 995 if (trunk->vlans[tag] != NULL) { 996 error = EEXIST; 997 goto done; 998 } 999 trunk->vlans[tag] = ifv; 1000 trunk->refcnt++; 1001 #else 1002 error = vlan_inshash(trunk, ifv); 1003 if (error) 1004 goto done; 1005 #endif 1006 ifv->ifv_proto = ETHERTYPE_VLAN; 1007 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1008 ifv->ifv_mintu = ETHERMIN; 1009 ifv->ifv_pflags = 0; 1010 1011 /* 1012 * If the parent supports the VLAN_MTU capability, 1013 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1014 * use it. 1015 */ 1016 if (p->if_capenable & IFCAP_VLAN_MTU) { 1017 /* 1018 * No need to fudge the MTU since the parent can 1019 * handle extended frames. 1020 */ 1021 ifv->ifv_mtufudge = 0; 1022 } else { 1023 /* 1024 * Fudge the MTU by the encapsulation size. This 1025 * makes us incompatible with strictly compliant 1026 * 802.1Q implementations, but allows us to use 1027 * the feature with other NetBSD implementations, 1028 * which might still be useful. 1029 */ 1030 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1031 } 1032 1033 ifv->ifv_trunk = trunk; 1034 ifp = ifv->ifv_ifp; 1035 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1036 ifp->if_baudrate = p->if_baudrate; 1037 /* 1038 * Copy only a selected subset of flags from the parent. 1039 * Other flags are none of our business. 1040 */ 1041 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1042 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1043 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1044 #undef VLAN_COPY_FLAGS 1045 1046 ifp->if_link_state = p->if_link_state; 1047 1048 vlan_capabilities(ifv); 1049 1050 /* 1051 * Set up our ``Ethernet address'' to reflect the underlying 1052 * physical interface's. 1053 */ 1054 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), ETHER_ADDR_LEN); 1055 1056 /* 1057 * Configure multicast addresses that may already be 1058 * joined on the vlan device. 1059 */ 1060 (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */ 1061 1062 /* We are ready for operation now. */ 1063 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1064 done: 1065 TRUNK_UNLOCK(trunk); 1066 if (error == 0) 1067 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_tag); 1068 VLAN_UNLOCK(); 1069 1070 return (error); 1071 } 1072 1073 static int 1074 vlan_unconfig(struct ifnet *ifp) 1075 { 1076 int ret; 1077 1078 VLAN_LOCK(); 1079 ret = vlan_unconfig_locked(ifp); 1080 VLAN_UNLOCK(); 1081 return (ret); 1082 } 1083 1084 static int 1085 vlan_unconfig_locked(struct ifnet *ifp) 1086 { 1087 struct ifvlantrunk *trunk; 1088 struct vlan_mc_entry *mc; 1089 struct ifvlan *ifv; 1090 struct ifnet *parent; 1091 int error; 1092 1093 VLAN_LOCK_ASSERT(); 1094 1095 ifv = ifp->if_softc; 1096 trunk = ifv->ifv_trunk; 1097 parent = PARENT(ifv); 1098 1099 if (trunk) { 1100 struct sockaddr_dl sdl; 1101 struct ifnet *p = trunk->parent; 1102 1103 TRUNK_LOCK(trunk); 1104 1105 /* 1106 * Since the interface is being unconfigured, we need to 1107 * empty the list of multicast groups that we may have joined 1108 * while we were alive from the parent's list. 1109 */ 1110 bzero((char *)&sdl, sizeof(sdl)); 1111 sdl.sdl_len = sizeof(sdl); 1112 sdl.sdl_family = AF_LINK; 1113 sdl.sdl_index = p->if_index; 1114 sdl.sdl_type = IFT_ETHER; 1115 sdl.sdl_alen = ETHER_ADDR_LEN; 1116 1117 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1118 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), 1119 ETHER_ADDR_LEN); 1120 error = if_delmulti(p, (struct sockaddr *)&sdl); 1121 if (error) 1122 return (error); 1123 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1124 free(mc, M_VLAN); 1125 } 1126 1127 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1128 #ifdef VLAN_ARRAY 1129 trunk->vlans[ifv->ifv_tag] = NULL; 1130 trunk->refcnt--; 1131 #else 1132 vlan_remhash(trunk, ifv); 1133 #endif 1134 ifv->ifv_trunk = NULL; 1135 1136 /* 1137 * Check if we were the last. 1138 */ 1139 if (trunk->refcnt == 0) { 1140 trunk->parent->if_vlantrunk = NULL; 1141 /* 1142 * XXXGL: If some ithread has already entered 1143 * vlan_input() and is now blocked on the trunk 1144 * lock, then it should preempt us right after 1145 * unlock and finish its work. Then we will acquire 1146 * lock again in trunk_destroy(). 1147 */ 1148 TRUNK_UNLOCK(trunk); 1149 trunk_destroy(trunk); 1150 } else 1151 TRUNK_UNLOCK(trunk); 1152 } 1153 1154 /* Disconnect from parent. */ 1155 if (ifv->ifv_pflags) 1156 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1157 ifp->if_mtu = ETHERMTU; 1158 ifp->if_link_state = LINK_STATE_UNKNOWN; 1159 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1160 1161 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_tag); 1162 1163 return (0); 1164 } 1165 1166 /* Handle a reference counted flag that should be set on the parent as well */ 1167 static int 1168 vlan_setflag(struct ifnet *ifp, int flag, int status, 1169 int (*func)(struct ifnet *, int)) 1170 { 1171 struct ifvlan *ifv; 1172 int error; 1173 1174 /* XXX VLAN_LOCK_ASSERT(); */ 1175 1176 ifv = ifp->if_softc; 1177 status = status ? (ifp->if_flags & flag) : 0; 1178 /* Now "status" contains the flag value or 0 */ 1179 1180 /* 1181 * See if recorded parent's status is different from what 1182 * we want it to be. If it is, flip it. We record parent's 1183 * status in ifv_pflags so that we won't clear parent's flag 1184 * we haven't set. In fact, we don't clear or set parent's 1185 * flags directly, but get or release references to them. 1186 * That's why we can be sure that recorded flags still are 1187 * in accord with actual parent's flags. 1188 */ 1189 if (status != (ifv->ifv_pflags & flag)) { 1190 error = (*func)(PARENT(ifv), status); 1191 if (error) 1192 return (error); 1193 ifv->ifv_pflags &= ~flag; 1194 ifv->ifv_pflags |= status; 1195 } 1196 return (0); 1197 } 1198 1199 /* 1200 * Handle IFF_* flags that require certain changes on the parent: 1201 * if "status" is true, update parent's flags respective to our if_flags; 1202 * if "status" is false, forcedly clear the flags set on parent. 1203 */ 1204 static int 1205 vlan_setflags(struct ifnet *ifp, int status) 1206 { 1207 int error, i; 1208 1209 for (i = 0; vlan_pflags[i].flag; i++) { 1210 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1211 status, vlan_pflags[i].func); 1212 if (error) 1213 return (error); 1214 } 1215 return (0); 1216 } 1217 1218 /* Inform all vlans that their parent has changed link state */ 1219 static void 1220 vlan_link_state(struct ifnet *ifp, int link) 1221 { 1222 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1223 struct ifvlan *ifv; 1224 int i; 1225 1226 TRUNK_LOCK(trunk); 1227 #ifdef VLAN_ARRAY 1228 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1229 if (trunk->vlans[i] != NULL) { 1230 ifv = trunk->vlans[i]; 1231 #else 1232 for (i = 0; i < (1 << trunk->hwidth); i++) 1233 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) { 1234 #endif 1235 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1236 if_link_state_change(ifv->ifv_ifp, 1237 trunk->parent->if_link_state); 1238 } 1239 TRUNK_UNLOCK(trunk); 1240 } 1241 1242 static void 1243 vlan_capabilities(struct ifvlan *ifv) 1244 { 1245 struct ifnet *p = PARENT(ifv); 1246 struct ifnet *ifp = ifv->ifv_ifp; 1247 1248 TRUNK_LOCK_ASSERT(TRUNK(ifv)); 1249 1250 /* 1251 * If the parent interface can do checksum offloading 1252 * on VLANs, then propagate its hardware-assisted 1253 * checksumming flags. Also assert that checksum 1254 * offloading requires hardware VLAN tagging. 1255 */ 1256 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1257 ifp->if_capabilities = p->if_capabilities & IFCAP_HWCSUM; 1258 1259 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1260 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1261 ifp->if_capenable = p->if_capenable & IFCAP_HWCSUM; 1262 ifp->if_hwassist = p->if_hwassist; 1263 } else { 1264 ifp->if_capenable = 0; 1265 ifp->if_hwassist = 0; 1266 } 1267 } 1268 1269 static void 1270 vlan_trunk_capabilities(struct ifnet *ifp) 1271 { 1272 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1273 struct ifvlan *ifv; 1274 int i; 1275 1276 TRUNK_LOCK(trunk); 1277 #ifdef VLAN_ARRAY 1278 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1279 if (trunk->vlans[i] != NULL) { 1280 ifv = trunk->vlans[i]; 1281 #else 1282 for (i = 0; i < (1 << trunk->hwidth); i++) { 1283 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 1284 #endif 1285 vlan_capabilities(ifv); 1286 } 1287 TRUNK_UNLOCK(trunk); 1288 } 1289 1290 static int 1291 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1292 { 1293 struct ifaddr *ifa; 1294 struct ifnet *p; 1295 struct ifreq *ifr; 1296 struct ifvlan *ifv; 1297 struct vlanreq vlr; 1298 int error = 0; 1299 1300 ifr = (struct ifreq *)data; 1301 ifa = (struct ifaddr *)data; 1302 ifv = ifp->if_softc; 1303 1304 switch (cmd) { 1305 case SIOCGIFMEDIA: 1306 VLAN_LOCK(); 1307 if (TRUNK(ifv) != NULL) { 1308 error = (*PARENT(ifv)->if_ioctl)(PARENT(ifv), 1309 SIOCGIFMEDIA, data); 1310 VLAN_UNLOCK(); 1311 /* Limit the result to the parent's current config. */ 1312 if (error == 0) { 1313 struct ifmediareq *ifmr; 1314 1315 ifmr = (struct ifmediareq *)data; 1316 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1317 ifmr->ifm_count = 1; 1318 error = copyout(&ifmr->ifm_current, 1319 ifmr->ifm_ulist, 1320 sizeof(int)); 1321 } 1322 } 1323 } else { 1324 VLAN_UNLOCK(); 1325 error = EINVAL; 1326 } 1327 break; 1328 1329 case SIOCSIFMEDIA: 1330 error = EINVAL; 1331 break; 1332 1333 case SIOCSIFMTU: 1334 /* 1335 * Set the interface MTU. 1336 */ 1337 VLAN_LOCK(); 1338 if (TRUNK(ifv) != NULL) { 1339 if (ifr->ifr_mtu > 1340 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1341 ifr->ifr_mtu < 1342 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1343 error = EINVAL; 1344 else 1345 ifp->if_mtu = ifr->ifr_mtu; 1346 } else 1347 error = EINVAL; 1348 VLAN_UNLOCK(); 1349 break; 1350 1351 case SIOCSETVLAN: 1352 error = copyin(ifr->ifr_data, &vlr, sizeof(vlr)); 1353 if (error) 1354 break; 1355 if (vlr.vlr_parent[0] == '\0') { 1356 vlan_unconfig(ifp); 1357 break; 1358 } 1359 p = ifunit(vlr.vlr_parent); 1360 if (p == 0) { 1361 error = ENOENT; 1362 break; 1363 } 1364 /* 1365 * Don't let the caller set up a VLAN tag with 1366 * anything except VLID bits. 1367 */ 1368 if (vlr.vlr_tag & ~EVL_VLID_MASK) { 1369 error = EINVAL; 1370 break; 1371 } 1372 error = vlan_config(ifv, p, vlr.vlr_tag); 1373 if (error) 1374 break; 1375 1376 /* Update flags on the parent, if necessary. */ 1377 vlan_setflags(ifp, 1); 1378 break; 1379 1380 case SIOCGETVLAN: 1381 bzero(&vlr, sizeof(vlr)); 1382 VLAN_LOCK(); 1383 if (TRUNK(ifv) != NULL) { 1384 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1385 sizeof(vlr.vlr_parent)); 1386 vlr.vlr_tag = ifv->ifv_tag; 1387 } 1388 VLAN_UNLOCK(); 1389 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1390 break; 1391 1392 case SIOCSIFFLAGS: 1393 /* 1394 * We should propagate selected flags to the parent, 1395 * e.g., promiscuous mode. 1396 */ 1397 if (TRUNK(ifv) != NULL) 1398 error = vlan_setflags(ifp, 1); 1399 break; 1400 1401 case SIOCADDMULTI: 1402 case SIOCDELMULTI: 1403 /* 1404 * If we don't have a parent, just remember the membership for 1405 * when we do. 1406 */ 1407 if (TRUNK(ifv) != NULL) 1408 error = vlan_setmulti(ifp); 1409 break; 1410 1411 default: 1412 error = ether_ioctl(ifp, cmd, data); 1413 } 1414 1415 return (error); 1416 } 1417