1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 34 * Might be extended some day to also handle IEEE 802.1p priority 35 * tagging. This is sort of sneaky in the implementation, since 36 * we need to pretend to be enough of an Ethernet implementation 37 * to make arp work. The way we do this is by telling everyone 38 * that we are an Ethernet, and then catch the packets that 39 * ether_output() left on our output queue when it calls 40 * if_start(), rewrite them for use by the real outgoing interface, 41 * and ask it to send them. 42 */ 43 44 #include "opt_vlan.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/module.h> 52 #include <sys/rwlock.h> 53 #include <sys/queue.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/sysctl.h> 57 #include <sys/systm.h> 58 59 #include <net/bpf.h> 60 #include <net/ethernet.h> 61 #include <net/if.h> 62 #include <net/if_clone.h> 63 #include <net/if_dl.h> 64 #include <net/if_types.h> 65 #include <net/if_vlan_var.h> 66 #include <net/vnet.h> 67 68 #define VLANNAME "vlan" 69 #define VLAN_DEF_HWIDTH 4 70 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 71 72 #define UP_AND_RUNNING(ifp) \ 73 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 74 75 LIST_HEAD(ifvlanhead, ifvlan); 76 77 struct ifvlantrunk { 78 struct ifnet *parent; /* parent interface of this trunk */ 79 struct rwlock rw; 80 #ifdef VLAN_ARRAY 81 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 82 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 83 #else 84 struct ifvlanhead *hash; /* dynamic hash-list table */ 85 uint16_t hmask; 86 uint16_t hwidth; 87 #endif 88 int refcnt; 89 }; 90 91 struct vlan_mc_entry { 92 struct ether_addr mc_addr; 93 SLIST_ENTRY(vlan_mc_entry) mc_entries; 94 }; 95 96 struct ifvlan { 97 struct ifvlantrunk *ifv_trunk; 98 struct ifnet *ifv_ifp; 99 #define TRUNK(ifv) ((ifv)->ifv_trunk) 100 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 101 int ifv_pflags; /* special flags we have set on parent */ 102 struct ifv_linkmib { 103 int ifvm_encaplen; /* encapsulation length */ 104 int ifvm_mtufudge; /* MTU fudged by this much */ 105 int ifvm_mintu; /* min transmission unit */ 106 uint16_t ifvm_proto; /* encapsulation ethertype */ 107 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 108 } ifv_mib; 109 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 110 #ifndef VLAN_ARRAY 111 LIST_ENTRY(ifvlan) ifv_list; 112 #endif 113 }; 114 #define ifv_proto ifv_mib.ifvm_proto 115 #define ifv_tag ifv_mib.ifvm_tag 116 #define ifv_encaplen ifv_mib.ifvm_encaplen 117 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 118 #define ifv_mintu ifv_mib.ifvm_mintu 119 120 /* Special flags we should propagate to parent. */ 121 static struct { 122 int flag; 123 int (*func)(struct ifnet *, int); 124 } vlan_pflags[] = { 125 {IFF_PROMISC, ifpromisc}, 126 {IFF_ALLMULTI, if_allmulti}, 127 {0, NULL} 128 }; 129 130 SYSCTL_DECL(_net_link); 131 SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN"); 132 SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency"); 133 134 static int soft_pad = 0; 135 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW, &soft_pad, 0, 136 "pad short frames before tagging"); 137 138 static MALLOC_DEFINE(M_VLAN, VLANNAME, "802.1Q Virtual LAN Interface"); 139 140 static eventhandler_tag ifdetach_tag; 141 142 /* 143 * We have a global mutex, that is used to serialize configuration 144 * changes and isn't used in normal packet delivery. 145 * 146 * We also have a per-trunk rwlock, that is locked shared on packet 147 * processing and exclusive when configuration is changed. 148 * 149 * The VLAN_ARRAY substitutes the dynamic hash with a static array 150 * with 4096 entries. In theory this can give a boost in processing, 151 * however on practice it does not. Probably this is because array 152 * is too big to fit into CPU cache. 153 */ 154 static struct mtx ifv_mtx; 155 #define VLAN_LOCK_INIT() mtx_init(&ifv_mtx, "vlan_global", NULL, MTX_DEF) 156 #define VLAN_LOCK_DESTROY() mtx_destroy(&ifv_mtx) 157 #define VLAN_LOCK_ASSERT() mtx_assert(&ifv_mtx, MA_OWNED) 158 #define VLAN_LOCK() mtx_lock(&ifv_mtx) 159 #define VLAN_UNLOCK() mtx_unlock(&ifv_mtx) 160 #define TRUNK_LOCK_INIT(trunk) rw_init(&(trunk)->rw, VLANNAME) 161 #define TRUNK_LOCK_DESTROY(trunk) rw_destroy(&(trunk)->rw) 162 #define TRUNK_LOCK(trunk) rw_wlock(&(trunk)->rw) 163 #define TRUNK_UNLOCK(trunk) rw_wunlock(&(trunk)->rw) 164 #define TRUNK_LOCK_ASSERT(trunk) rw_assert(&(trunk)->rw, RA_WLOCKED) 165 #define TRUNK_RLOCK(trunk) rw_rlock(&(trunk)->rw) 166 #define TRUNK_RUNLOCK(trunk) rw_runlock(&(trunk)->rw) 167 #define TRUNK_LOCK_RASSERT(trunk) rw_assert(&(trunk)->rw, RA_RLOCKED) 168 169 #ifndef VLAN_ARRAY 170 static void vlan_inithash(struct ifvlantrunk *trunk); 171 static void vlan_freehash(struct ifvlantrunk *trunk); 172 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 173 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 174 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 175 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 176 uint16_t tag); 177 #endif 178 static void trunk_destroy(struct ifvlantrunk *trunk); 179 180 static void vlan_start(struct ifnet *ifp); 181 static void vlan_init(void *foo); 182 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 183 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 184 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 185 int (*func)(struct ifnet *, int)); 186 static int vlan_setflags(struct ifnet *ifp, int status); 187 static int vlan_setmulti(struct ifnet *ifp); 188 static int vlan_unconfig(struct ifnet *ifp); 189 static int vlan_unconfig_locked(struct ifnet *ifp); 190 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 191 static void vlan_link_state(struct ifnet *ifp, int link); 192 static void vlan_capabilities(struct ifvlan *ifv); 193 static void vlan_trunk_capabilities(struct ifnet *ifp); 194 195 static struct ifnet *vlan_clone_match_ethertag(struct if_clone *, 196 const char *, int *); 197 static int vlan_clone_match(struct if_clone *, const char *); 198 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 199 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 200 201 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 202 203 static struct if_clone vlan_cloner = IFC_CLONE_INITIALIZER(VLANNAME, NULL, 204 IF_MAXUNIT, NULL, vlan_clone_match, vlan_clone_create, vlan_clone_destroy); 205 206 #ifndef VLAN_ARRAY 207 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 208 209 static void 210 vlan_inithash(struct ifvlantrunk *trunk) 211 { 212 int i, n; 213 214 /* 215 * The trunk must not be locked here since we call malloc(M_WAITOK). 216 * It is OK in case this function is called before the trunk struct 217 * gets hooked up and becomes visible from other threads. 218 */ 219 220 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 221 ("%s: hash already initialized", __func__)); 222 223 trunk->hwidth = VLAN_DEF_HWIDTH; 224 n = 1 << trunk->hwidth; 225 trunk->hmask = n - 1; 226 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 227 for (i = 0; i < n; i++) 228 LIST_INIT(&trunk->hash[i]); 229 } 230 231 static void 232 vlan_freehash(struct ifvlantrunk *trunk) 233 { 234 #ifdef INVARIANTS 235 int i; 236 237 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 238 for (i = 0; i < (1 << trunk->hwidth); i++) 239 KASSERT(LIST_EMPTY(&trunk->hash[i]), 240 ("%s: hash table not empty", __func__)); 241 #endif 242 free(trunk->hash, M_VLAN); 243 trunk->hash = NULL; 244 trunk->hwidth = trunk->hmask = 0; 245 } 246 247 static int 248 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 249 { 250 int i, b; 251 struct ifvlan *ifv2; 252 253 TRUNK_LOCK_ASSERT(trunk); 254 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 255 256 b = 1 << trunk->hwidth; 257 i = HASH(ifv->ifv_tag, trunk->hmask); 258 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 259 if (ifv->ifv_tag == ifv2->ifv_tag) 260 return (EEXIST); 261 262 /* 263 * Grow the hash when the number of vlans exceeds half of the number of 264 * hash buckets squared. This will make the average linked-list length 265 * buckets/2. 266 */ 267 if (trunk->refcnt > (b * b) / 2) { 268 vlan_growhash(trunk, 1); 269 i = HASH(ifv->ifv_tag, trunk->hmask); 270 } 271 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 272 trunk->refcnt++; 273 274 return (0); 275 } 276 277 static int 278 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 279 { 280 int i, b; 281 struct ifvlan *ifv2; 282 283 TRUNK_LOCK_ASSERT(trunk); 284 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 285 286 b = 1 << trunk->hwidth; 287 i = HASH(ifv->ifv_tag, trunk->hmask); 288 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 289 if (ifv2 == ifv) { 290 trunk->refcnt--; 291 LIST_REMOVE(ifv2, ifv_list); 292 if (trunk->refcnt < (b * b) / 2) 293 vlan_growhash(trunk, -1); 294 return (0); 295 } 296 297 panic("%s: vlan not found\n", __func__); 298 return (ENOENT); /*NOTREACHED*/ 299 } 300 301 /* 302 * Grow the hash larger or smaller if memory permits. 303 */ 304 static void 305 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 306 { 307 struct ifvlan *ifv; 308 struct ifvlanhead *hash2; 309 int hwidth2, i, j, n, n2; 310 311 TRUNK_LOCK_ASSERT(trunk); 312 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 313 314 if (howmuch == 0) { 315 /* Harmless yet obvious coding error */ 316 printf("%s: howmuch is 0\n", __func__); 317 return; 318 } 319 320 hwidth2 = trunk->hwidth + howmuch; 321 n = 1 << trunk->hwidth; 322 n2 = 1 << hwidth2; 323 /* Do not shrink the table below the default */ 324 if (hwidth2 < VLAN_DEF_HWIDTH) 325 return; 326 327 /* M_NOWAIT because we're called with trunk mutex held */ 328 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 329 if (hash2 == NULL) { 330 printf("%s: out of memory -- hash size not changed\n", 331 __func__); 332 return; /* We can live with the old hash table */ 333 } 334 for (j = 0; j < n2; j++) 335 LIST_INIT(&hash2[j]); 336 for (i = 0; i < n; i++) 337 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 338 LIST_REMOVE(ifv, ifv_list); 339 j = HASH(ifv->ifv_tag, n2 - 1); 340 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 341 } 342 free(trunk->hash, M_VLAN); 343 trunk->hash = hash2; 344 trunk->hwidth = hwidth2; 345 trunk->hmask = n2 - 1; 346 347 if (bootverbose) 348 if_printf(trunk->parent, 349 "VLAN hash table resized from %d to %d buckets\n", n, n2); 350 } 351 352 static __inline struct ifvlan * 353 vlan_gethash(struct ifvlantrunk *trunk, uint16_t tag) 354 { 355 struct ifvlan *ifv; 356 357 TRUNK_LOCK_RASSERT(trunk); 358 359 LIST_FOREACH(ifv, &trunk->hash[HASH(tag, trunk->hmask)], ifv_list) 360 if (ifv->ifv_tag == tag) 361 return (ifv); 362 return (NULL); 363 } 364 365 #if 0 366 /* Debugging code to view the hashtables. */ 367 static void 368 vlan_dumphash(struct ifvlantrunk *trunk) 369 { 370 int i; 371 struct ifvlan *ifv; 372 373 for (i = 0; i < (1 << trunk->hwidth); i++) { 374 printf("%d: ", i); 375 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 376 printf("%s ", ifv->ifv_ifp->if_xname); 377 printf("\n"); 378 } 379 } 380 #endif /* 0 */ 381 #endif /* !VLAN_ARRAY */ 382 383 static void 384 trunk_destroy(struct ifvlantrunk *trunk) 385 { 386 VLAN_LOCK_ASSERT(); 387 388 TRUNK_LOCK(trunk); 389 #ifndef VLAN_ARRAY 390 vlan_freehash(trunk); 391 #endif 392 trunk->parent->if_vlantrunk = NULL; 393 TRUNK_UNLOCK(trunk); 394 TRUNK_LOCK_DESTROY(trunk); 395 free(trunk, M_VLAN); 396 } 397 398 /* 399 * Program our multicast filter. What we're actually doing is 400 * programming the multicast filter of the parent. This has the 401 * side effect of causing the parent interface to receive multicast 402 * traffic that it doesn't really want, which ends up being discarded 403 * later by the upper protocol layers. Unfortunately, there's no way 404 * to avoid this: there really is only one physical interface. 405 * 406 * XXX: There is a possible race here if more than one thread is 407 * modifying the multicast state of the vlan interface at the same time. 408 */ 409 static int 410 vlan_setmulti(struct ifnet *ifp) 411 { 412 struct ifnet *ifp_p; 413 struct ifmultiaddr *ifma, *rifma = NULL; 414 struct ifvlan *sc; 415 struct vlan_mc_entry *mc; 416 struct sockaddr_dl sdl; 417 int error; 418 419 /*VLAN_LOCK_ASSERT();*/ 420 421 /* Find the parent. */ 422 sc = ifp->if_softc; 423 ifp_p = PARENT(sc); 424 425 CURVNET_SET_QUIET(ifp_p->if_vnet); 426 427 bzero((char *)&sdl, sizeof(sdl)); 428 sdl.sdl_len = sizeof(sdl); 429 sdl.sdl_family = AF_LINK; 430 sdl.sdl_index = ifp_p->if_index; 431 sdl.sdl_type = IFT_ETHER; 432 sdl.sdl_alen = ETHER_ADDR_LEN; 433 434 /* First, remove any existing filter entries. */ 435 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 436 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); 437 error = if_delmulti(ifp_p, (struct sockaddr *)&sdl); 438 if (error) 439 return (error); 440 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 441 free(mc, M_VLAN); 442 } 443 444 /* Now program new ones. */ 445 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 446 if (ifma->ifma_addr->sa_family != AF_LINK) 447 continue; 448 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 449 if (mc == NULL) 450 return (ENOMEM); 451 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 452 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 453 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 454 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 455 LLADDR(&sdl), ETHER_ADDR_LEN); 456 error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma); 457 if (error) 458 return (error); 459 } 460 461 CURVNET_RESTORE(); 462 return (0); 463 } 464 465 /* 466 * A handler for network interface departure events. 467 * Track departure of trunks here so that we don't access invalid 468 * pointers or whatever if a trunk is ripped from under us, e.g., 469 * by ejecting its hot-plug card. 470 */ 471 static void 472 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 473 { 474 struct ifvlan *ifv; 475 int i; 476 477 /* 478 * Check if it's a trunk interface first of all 479 * to avoid needless locking. 480 */ 481 if (ifp->if_vlantrunk == NULL) 482 return; 483 484 VLAN_LOCK(); 485 /* 486 * OK, it's a trunk. Loop over and detach all vlan's on it. 487 * Check trunk pointer after each vlan_unconfig() as it will 488 * free it and set to NULL after the last vlan was detached. 489 */ 490 #ifdef VLAN_ARRAY 491 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 492 if ((ifv = ifp->if_vlantrunk->vlans[i])) { 493 vlan_unconfig_locked(ifv->ifv_ifp); 494 if (ifp->if_vlantrunk == NULL) 495 break; 496 } 497 #else /* VLAN_ARRAY */ 498 restart: 499 for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) 500 if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) { 501 vlan_unconfig_locked(ifv->ifv_ifp); 502 if (ifp->if_vlantrunk) 503 goto restart; /* trunk->hwidth can change */ 504 else 505 break; 506 } 507 #endif /* VLAN_ARRAY */ 508 /* Trunk should have been destroyed in vlan_unconfig(). */ 509 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 510 VLAN_UNLOCK(); 511 } 512 513 /* 514 * VLAN support can be loaded as a module. The only place in the 515 * system that's intimately aware of this is ether_input. We hook 516 * into this code through vlan_input_p which is defined there and 517 * set here. Noone else in the system should be aware of this so 518 * we use an explicit reference here. 519 */ 520 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 521 522 /* For if_link_state_change() eyes only... */ 523 extern void (*vlan_link_state_p)(struct ifnet *, int); 524 525 static int 526 vlan_modevent(module_t mod, int type, void *data) 527 { 528 529 switch (type) { 530 case MOD_LOAD: 531 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 532 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 533 if (ifdetach_tag == NULL) 534 return (ENOMEM); 535 VLAN_LOCK_INIT(); 536 vlan_input_p = vlan_input; 537 vlan_link_state_p = vlan_link_state; 538 vlan_trunk_cap_p = vlan_trunk_capabilities; 539 if_clone_attach(&vlan_cloner); 540 if (bootverbose) 541 printf("vlan: initialized, using " 542 #ifdef VLAN_ARRAY 543 "full-size arrays" 544 #else 545 "hash tables with chaining" 546 #endif 547 548 "\n"); 549 break; 550 case MOD_UNLOAD: 551 if_clone_detach(&vlan_cloner); 552 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 553 vlan_input_p = NULL; 554 vlan_link_state_p = NULL; 555 vlan_trunk_cap_p = NULL; 556 VLAN_LOCK_DESTROY(); 557 if (bootverbose) 558 printf("vlan: unloaded\n"); 559 break; 560 default: 561 return (EOPNOTSUPP); 562 } 563 return (0); 564 } 565 566 static moduledata_t vlan_mod = { 567 "if_vlan", 568 vlan_modevent, 569 0 570 }; 571 572 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 573 MODULE_VERSION(if_vlan, 3); 574 575 static struct ifnet * 576 vlan_clone_match_ethertag(struct if_clone *ifc, const char *name, int *tag) 577 { 578 const char *cp; 579 struct ifnet *ifp; 580 int t = 0; 581 582 /* Check for <etherif>.<vlan> style interface names. */ 583 IFNET_RLOCK(); 584 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 585 if (ifp->if_type != IFT_ETHER) 586 continue; 587 if (strncmp(ifp->if_xname, name, strlen(ifp->if_xname)) != 0) 588 continue; 589 cp = name + strlen(ifp->if_xname); 590 if (*cp != '.') 591 continue; 592 for(; *cp != '\0'; cp++) { 593 if (*cp < '0' || *cp > '9') 594 continue; 595 t = (t * 10) + (*cp - '0'); 596 } 597 if (tag != NULL) 598 *tag = t; 599 break; 600 } 601 IFNET_RUNLOCK(); 602 603 return (ifp); 604 } 605 606 static int 607 vlan_clone_match(struct if_clone *ifc, const char *name) 608 { 609 const char *cp; 610 611 if (vlan_clone_match_ethertag(ifc, name, NULL) != NULL) 612 return (1); 613 614 if (strncmp(VLANNAME, name, strlen(VLANNAME)) != 0) 615 return (0); 616 for (cp = name + 4; *cp != '\0'; cp++) { 617 if (*cp < '0' || *cp > '9') 618 return (0); 619 } 620 621 return (1); 622 } 623 624 static int 625 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 626 { 627 char *dp; 628 int wildcard; 629 int unit; 630 int error; 631 int tag; 632 int ethertag; 633 struct ifvlan *ifv; 634 struct ifnet *ifp; 635 struct ifnet *p; 636 struct vlanreq vlr; 637 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 638 639 /* 640 * There are 3 (ugh) ways to specify the cloned device: 641 * o pass a parameter block with the clone request. 642 * o specify parameters in the text of the clone device name 643 * o specify no parameters and get an unattached device that 644 * must be configured separately. 645 * The first technique is preferred; the latter two are 646 * supported for backwards compatibilty. 647 */ 648 if (params) { 649 error = copyin(params, &vlr, sizeof(vlr)); 650 if (error) 651 return error; 652 p = ifunit(vlr.vlr_parent); 653 if (p == NULL) 654 return ENXIO; 655 /* 656 * Don't let the caller set up a VLAN tag with 657 * anything except VLID bits. 658 */ 659 if (vlr.vlr_tag & ~EVL_VLID_MASK) 660 return (EINVAL); 661 error = ifc_name2unit(name, &unit); 662 if (error != 0) 663 return (error); 664 665 ethertag = 1; 666 tag = vlr.vlr_tag; 667 wildcard = (unit < 0); 668 } else if ((p = vlan_clone_match_ethertag(ifc, name, &tag)) != NULL) { 669 ethertag = 1; 670 unit = -1; 671 wildcard = 0; 672 673 /* 674 * Don't let the caller set up a VLAN tag with 675 * anything except VLID bits. 676 */ 677 if (tag & ~EVL_VLID_MASK) 678 return (EINVAL); 679 } else { 680 ethertag = 0; 681 682 error = ifc_name2unit(name, &unit); 683 if (error != 0) 684 return (error); 685 686 wildcard = (unit < 0); 687 } 688 689 error = ifc_alloc_unit(ifc, &unit); 690 if (error != 0) 691 return (error); 692 693 /* In the wildcard case, we need to update the name. */ 694 if (wildcard) { 695 for (dp = name; *dp != '\0'; dp++); 696 if (snprintf(dp, len - (dp-name), "%d", unit) > 697 len - (dp-name) - 1) { 698 panic("%s: interface name too long", __func__); 699 } 700 } 701 702 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 703 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 704 if (ifp == NULL) { 705 ifc_free_unit(ifc, unit); 706 free(ifv, M_VLAN); 707 return (ENOSPC); 708 } 709 SLIST_INIT(&ifv->vlan_mc_listhead); 710 711 ifp->if_softc = ifv; 712 /* 713 * Set the name manually rather than using if_initname because 714 * we don't conform to the default naming convention for interfaces. 715 */ 716 strlcpy(ifp->if_xname, name, IFNAMSIZ); 717 ifp->if_dname = ifc->ifc_name; 718 ifp->if_dunit = unit; 719 /* NB: flags are not set here */ 720 ifp->if_linkmib = &ifv->ifv_mib; 721 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 722 /* NB: mtu is not set here */ 723 724 ifp->if_init = vlan_init; 725 ifp->if_start = vlan_start; 726 ifp->if_ioctl = vlan_ioctl; 727 ifp->if_snd.ifq_maxlen = ifqmaxlen; 728 ifp->if_flags = VLAN_IFFLAGS; 729 ether_ifattach(ifp, eaddr); 730 /* Now undo some of the damage... */ 731 ifp->if_baudrate = 0; 732 ifp->if_type = IFT_L2VLAN; 733 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 734 735 if (ethertag) { 736 error = vlan_config(ifv, p, tag); 737 if (error != 0) { 738 /* 739 * Since we've partialy failed, we need to back 740 * out all the way, otherwise userland could get 741 * confused. Thus, we destroy the interface. 742 */ 743 ether_ifdetach(ifp); 744 vlan_unconfig(ifp); 745 if_free_type(ifp, IFT_ETHER); 746 ifc_free_unit(ifc, unit); 747 free(ifv, M_VLAN); 748 749 return (error); 750 } 751 752 /* Update flags on the parent, if necessary. */ 753 vlan_setflags(ifp, 1); 754 } 755 756 return (0); 757 } 758 759 static int 760 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 761 { 762 struct ifvlan *ifv = ifp->if_softc; 763 int unit = ifp->if_dunit; 764 765 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 766 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 767 if_free_type(ifp, IFT_ETHER); 768 free(ifv, M_VLAN); 769 ifc_free_unit(ifc, unit); 770 771 return (0); 772 } 773 774 /* 775 * The ifp->if_init entry point for vlan(4) is a no-op. 776 */ 777 static void 778 vlan_init(void *foo __unused) 779 { 780 } 781 782 /* 783 * The if_start method for vlan(4) interface. It doesn't 784 * raises the IFF_DRV_OACTIVE flag, since it is called 785 * only from IFQ_HANDOFF() macro in ether_output_frame(). 786 * If the interface queue is full, and vlan_start() is 787 * not called, the queue would never get emptied and 788 * interface would stall forever. 789 */ 790 static void 791 vlan_start(struct ifnet *ifp) 792 { 793 struct ifvlan *ifv; 794 struct ifnet *p; 795 struct mbuf *m; 796 int error; 797 798 ifv = ifp->if_softc; 799 p = PARENT(ifv); 800 801 for (;;) { 802 IF_DEQUEUE(&ifp->if_snd, m); 803 if (m == NULL) 804 break; 805 BPF_MTAP(ifp, m); 806 807 /* 808 * Do not run parent's if_start() if the parent is not up, 809 * or parent's driver will cause a system crash. 810 */ 811 if (!UP_AND_RUNNING(p)) { 812 m_freem(m); 813 ifp->if_collisions++; 814 continue; 815 } 816 817 /* 818 * Pad the frame to the minimum size allowed if told to. 819 * This option is in accord with IEEE Std 802.1Q, 2003 Ed., 820 * paragraph C.4.4.3.b. It can help to work around buggy 821 * bridges that violate paragraph C.4.4.3.a from the same 822 * document, i.e., fail to pad short frames after untagging. 823 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but 824 * untagging it will produce a 62-byte frame, which is a runt 825 * and requires padding. There are VLAN-enabled network 826 * devices that just discard such runts instead or mishandle 827 * them somehow. 828 */ 829 if (soft_pad) { 830 static char pad[8]; /* just zeros */ 831 int n; 832 833 for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len; 834 n > 0; n -= sizeof(pad)) 835 if (!m_append(m, min(n, sizeof(pad)), pad)) 836 break; 837 838 if (n > 0) { 839 if_printf(ifp, "cannot pad short frame\n"); 840 ifp->if_oerrors++; 841 m_freem(m); 842 continue; 843 } 844 } 845 846 /* 847 * If underlying interface can do VLAN tag insertion itself, 848 * just pass the packet along. However, we need some way to 849 * tell the interface where the packet came from so that it 850 * knows how to find the VLAN tag to use, so we attach a 851 * packet tag that holds it. 852 */ 853 if (p->if_capenable & IFCAP_VLAN_HWTAGGING) { 854 m->m_pkthdr.ether_vtag = ifv->ifv_tag; 855 m->m_flags |= M_VLANTAG; 856 } else { 857 m = ether_vlanencap(m, ifv->ifv_tag); 858 if (m == NULL) { 859 if_printf(ifp, 860 "unable to prepend VLAN header\n"); 861 ifp->if_oerrors++; 862 continue; 863 } 864 } 865 866 /* 867 * Send it, precisely as ether_output() would have. 868 * We are already running at splimp. 869 */ 870 error = (p->if_transmit)(p, m); 871 if (!error) 872 ifp->if_opackets++; 873 else 874 ifp->if_oerrors++; 875 } 876 } 877 878 static void 879 vlan_input(struct ifnet *ifp, struct mbuf *m) 880 { 881 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 882 struct ifvlan *ifv; 883 uint16_t tag; 884 885 KASSERT(trunk != NULL, ("%s: no trunk", __func__)); 886 887 if (m->m_flags & M_VLANTAG) { 888 /* 889 * Packet is tagged, but m contains a normal 890 * Ethernet frame; the tag is stored out-of-band. 891 */ 892 tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 893 m->m_flags &= ~M_VLANTAG; 894 } else { 895 struct ether_vlan_header *evl; 896 897 /* 898 * Packet is tagged in-band as specified by 802.1q. 899 */ 900 switch (ifp->if_type) { 901 case IFT_ETHER: 902 if (m->m_len < sizeof(*evl) && 903 (m = m_pullup(m, sizeof(*evl))) == NULL) { 904 if_printf(ifp, "cannot pullup VLAN header\n"); 905 return; 906 } 907 evl = mtod(m, struct ether_vlan_header *); 908 tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 909 910 /* 911 * Remove the 802.1q header by copying the Ethernet 912 * addresses over it and adjusting the beginning of 913 * the data in the mbuf. The encapsulated Ethernet 914 * type field is already in place. 915 */ 916 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 917 ETHER_HDR_LEN - ETHER_TYPE_LEN); 918 m_adj(m, ETHER_VLAN_ENCAP_LEN); 919 break; 920 921 default: 922 #ifdef INVARIANTS 923 panic("%s: %s has unsupported if_type %u", 924 __func__, ifp->if_xname, ifp->if_type); 925 #endif 926 m_freem(m); 927 ifp->if_noproto++; 928 return; 929 } 930 } 931 932 TRUNK_RLOCK(trunk); 933 #ifdef VLAN_ARRAY 934 ifv = trunk->vlans[tag]; 935 #else 936 ifv = vlan_gethash(trunk, tag); 937 #endif 938 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 939 TRUNK_RUNLOCK(trunk); 940 m_freem(m); 941 ifp->if_noproto++; 942 return; 943 } 944 TRUNK_RUNLOCK(trunk); 945 946 m->m_pkthdr.rcvif = ifv->ifv_ifp; 947 ifv->ifv_ifp->if_ipackets++; 948 949 /* Pass it back through the parent's input routine. */ 950 (*ifp->if_input)(ifv->ifv_ifp, m); 951 } 952 953 static int 954 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 955 { 956 struct ifvlantrunk *trunk; 957 struct ifnet *ifp; 958 int error = 0; 959 960 /* VID numbers 0x0 and 0xFFF are reserved */ 961 if (tag == 0 || tag == 0xFFF) 962 return (EINVAL); 963 if (p->if_type != IFT_ETHER) 964 return (EPROTONOSUPPORT); 965 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 966 return (EPROTONOSUPPORT); 967 if (ifv->ifv_trunk) 968 return (EBUSY); 969 970 if (p->if_vlantrunk == NULL) { 971 trunk = malloc(sizeof(struct ifvlantrunk), 972 M_VLAN, M_WAITOK | M_ZERO); 973 #ifndef VLAN_ARRAY 974 vlan_inithash(trunk); 975 #endif 976 VLAN_LOCK(); 977 if (p->if_vlantrunk != NULL) { 978 /* A race that that is very unlikely to be hit. */ 979 #ifndef VLAN_ARRAY 980 vlan_freehash(trunk); 981 #endif 982 free(trunk, M_VLAN); 983 goto exists; 984 } 985 TRUNK_LOCK_INIT(trunk); 986 TRUNK_LOCK(trunk); 987 p->if_vlantrunk = trunk; 988 trunk->parent = p; 989 } else { 990 VLAN_LOCK(); 991 exists: 992 trunk = p->if_vlantrunk; 993 TRUNK_LOCK(trunk); 994 } 995 996 ifv->ifv_tag = tag; /* must set this before vlan_inshash() */ 997 #ifdef VLAN_ARRAY 998 if (trunk->vlans[tag] != NULL) { 999 error = EEXIST; 1000 goto done; 1001 } 1002 trunk->vlans[tag] = ifv; 1003 trunk->refcnt++; 1004 #else 1005 error = vlan_inshash(trunk, ifv); 1006 if (error) 1007 goto done; 1008 #endif 1009 ifv->ifv_proto = ETHERTYPE_VLAN; 1010 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1011 ifv->ifv_mintu = ETHERMIN; 1012 ifv->ifv_pflags = 0; 1013 1014 /* 1015 * If the parent supports the VLAN_MTU capability, 1016 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1017 * use it. 1018 */ 1019 if (p->if_capenable & IFCAP_VLAN_MTU) { 1020 /* 1021 * No need to fudge the MTU since the parent can 1022 * handle extended frames. 1023 */ 1024 ifv->ifv_mtufudge = 0; 1025 } else { 1026 /* 1027 * Fudge the MTU by the encapsulation size. This 1028 * makes us incompatible with strictly compliant 1029 * 802.1Q implementations, but allows us to use 1030 * the feature with other NetBSD implementations, 1031 * which might still be useful. 1032 */ 1033 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1034 } 1035 1036 ifv->ifv_trunk = trunk; 1037 ifp = ifv->ifv_ifp; 1038 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1039 ifp->if_baudrate = p->if_baudrate; 1040 /* 1041 * Copy only a selected subset of flags from the parent. 1042 * Other flags are none of our business. 1043 */ 1044 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1045 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1046 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1047 #undef VLAN_COPY_FLAGS 1048 1049 ifp->if_link_state = p->if_link_state; 1050 1051 vlan_capabilities(ifv); 1052 1053 /* 1054 * Set up our ``Ethernet address'' to reflect the underlying 1055 * physical interface's. 1056 */ 1057 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), ETHER_ADDR_LEN); 1058 1059 /* 1060 * Configure multicast addresses that may already be 1061 * joined on the vlan device. 1062 */ 1063 (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */ 1064 1065 /* We are ready for operation now. */ 1066 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1067 done: 1068 TRUNK_UNLOCK(trunk); 1069 if (error == 0) 1070 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_tag); 1071 VLAN_UNLOCK(); 1072 1073 return (error); 1074 } 1075 1076 static int 1077 vlan_unconfig(struct ifnet *ifp) 1078 { 1079 int ret; 1080 1081 VLAN_LOCK(); 1082 ret = vlan_unconfig_locked(ifp); 1083 VLAN_UNLOCK(); 1084 return (ret); 1085 } 1086 1087 static int 1088 vlan_unconfig_locked(struct ifnet *ifp) 1089 { 1090 struct ifvlantrunk *trunk; 1091 struct vlan_mc_entry *mc; 1092 struct ifvlan *ifv; 1093 struct ifnet *parent; 1094 int error; 1095 1096 VLAN_LOCK_ASSERT(); 1097 1098 ifv = ifp->if_softc; 1099 trunk = ifv->ifv_trunk; 1100 parent = NULL; 1101 1102 if (trunk != NULL) { 1103 struct sockaddr_dl sdl; 1104 1105 TRUNK_LOCK(trunk); 1106 parent = trunk->parent; 1107 1108 /* 1109 * Since the interface is being unconfigured, we need to 1110 * empty the list of multicast groups that we may have joined 1111 * while we were alive from the parent's list. 1112 */ 1113 bzero((char *)&sdl, sizeof(sdl)); 1114 sdl.sdl_len = sizeof(sdl); 1115 sdl.sdl_family = AF_LINK; 1116 sdl.sdl_index = parent->if_index; 1117 sdl.sdl_type = IFT_ETHER; 1118 sdl.sdl_alen = ETHER_ADDR_LEN; 1119 1120 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1121 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), 1122 ETHER_ADDR_LEN); 1123 error = if_delmulti(parent, (struct sockaddr *)&sdl); 1124 if (error) 1125 return (error); 1126 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1127 free(mc, M_VLAN); 1128 } 1129 1130 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1131 #ifdef VLAN_ARRAY 1132 trunk->vlans[ifv->ifv_tag] = NULL; 1133 trunk->refcnt--; 1134 #else 1135 vlan_remhash(trunk, ifv); 1136 #endif 1137 ifv->ifv_trunk = NULL; 1138 1139 /* 1140 * Check if we were the last. 1141 */ 1142 if (trunk->refcnt == 0) { 1143 trunk->parent->if_vlantrunk = NULL; 1144 /* 1145 * XXXGL: If some ithread has already entered 1146 * vlan_input() and is now blocked on the trunk 1147 * lock, then it should preempt us right after 1148 * unlock and finish its work. Then we will acquire 1149 * lock again in trunk_destroy(). 1150 */ 1151 TRUNK_UNLOCK(trunk); 1152 trunk_destroy(trunk); 1153 } else 1154 TRUNK_UNLOCK(trunk); 1155 } 1156 1157 /* Disconnect from parent. */ 1158 if (ifv->ifv_pflags) 1159 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1160 ifp->if_mtu = ETHERMTU; 1161 ifp->if_link_state = LINK_STATE_UNKNOWN; 1162 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1163 1164 /* 1165 * Only dispatch an event if vlan was 1166 * attached, otherwise there is nothing 1167 * to cleanup anyway. 1168 */ 1169 if (parent != NULL) 1170 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_tag); 1171 1172 return (0); 1173 } 1174 1175 /* Handle a reference counted flag that should be set on the parent as well */ 1176 static int 1177 vlan_setflag(struct ifnet *ifp, int flag, int status, 1178 int (*func)(struct ifnet *, int)) 1179 { 1180 struct ifvlan *ifv; 1181 int error; 1182 1183 /* XXX VLAN_LOCK_ASSERT(); */ 1184 1185 ifv = ifp->if_softc; 1186 status = status ? (ifp->if_flags & flag) : 0; 1187 /* Now "status" contains the flag value or 0 */ 1188 1189 /* 1190 * See if recorded parent's status is different from what 1191 * we want it to be. If it is, flip it. We record parent's 1192 * status in ifv_pflags so that we won't clear parent's flag 1193 * we haven't set. In fact, we don't clear or set parent's 1194 * flags directly, but get or release references to them. 1195 * That's why we can be sure that recorded flags still are 1196 * in accord with actual parent's flags. 1197 */ 1198 if (status != (ifv->ifv_pflags & flag)) { 1199 error = (*func)(PARENT(ifv), status); 1200 if (error) 1201 return (error); 1202 ifv->ifv_pflags &= ~flag; 1203 ifv->ifv_pflags |= status; 1204 } 1205 return (0); 1206 } 1207 1208 /* 1209 * Handle IFF_* flags that require certain changes on the parent: 1210 * if "status" is true, update parent's flags respective to our if_flags; 1211 * if "status" is false, forcedly clear the flags set on parent. 1212 */ 1213 static int 1214 vlan_setflags(struct ifnet *ifp, int status) 1215 { 1216 int error, i; 1217 1218 for (i = 0; vlan_pflags[i].flag; i++) { 1219 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1220 status, vlan_pflags[i].func); 1221 if (error) 1222 return (error); 1223 } 1224 return (0); 1225 } 1226 1227 /* Inform all vlans that their parent has changed link state */ 1228 static void 1229 vlan_link_state(struct ifnet *ifp, int link) 1230 { 1231 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1232 struct ifvlan *ifv; 1233 int i; 1234 1235 TRUNK_LOCK(trunk); 1236 #ifdef VLAN_ARRAY 1237 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1238 if (trunk->vlans[i] != NULL) { 1239 ifv = trunk->vlans[i]; 1240 #else 1241 for (i = 0; i < (1 << trunk->hwidth); i++) 1242 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) { 1243 #endif 1244 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1245 if_link_state_change(ifv->ifv_ifp, 1246 trunk->parent->if_link_state); 1247 } 1248 TRUNK_UNLOCK(trunk); 1249 } 1250 1251 static void 1252 vlan_capabilities(struct ifvlan *ifv) 1253 { 1254 struct ifnet *p = PARENT(ifv); 1255 struct ifnet *ifp = ifv->ifv_ifp; 1256 1257 TRUNK_LOCK_ASSERT(TRUNK(ifv)); 1258 1259 /* 1260 * If the parent interface can do checksum offloading 1261 * on VLANs, then propagate its hardware-assisted 1262 * checksumming flags. Also assert that checksum 1263 * offloading requires hardware VLAN tagging. 1264 */ 1265 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1266 ifp->if_capabilities = p->if_capabilities & IFCAP_HWCSUM; 1267 1268 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1269 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1270 ifp->if_capenable = p->if_capenable & IFCAP_HWCSUM; 1271 ifp->if_hwassist = p->if_hwassist; 1272 } else { 1273 ifp->if_capenable = 0; 1274 ifp->if_hwassist = 0; 1275 } 1276 } 1277 1278 static void 1279 vlan_trunk_capabilities(struct ifnet *ifp) 1280 { 1281 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1282 struct ifvlan *ifv; 1283 int i; 1284 1285 TRUNK_LOCK(trunk); 1286 #ifdef VLAN_ARRAY 1287 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1288 if (trunk->vlans[i] != NULL) { 1289 ifv = trunk->vlans[i]; 1290 #else 1291 for (i = 0; i < (1 << trunk->hwidth); i++) { 1292 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 1293 #endif 1294 vlan_capabilities(ifv); 1295 } 1296 TRUNK_UNLOCK(trunk); 1297 } 1298 1299 static int 1300 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1301 { 1302 struct ifnet *p; 1303 struct ifreq *ifr; 1304 struct ifvlan *ifv; 1305 struct vlanreq vlr; 1306 int error = 0; 1307 1308 ifr = (struct ifreq *)data; 1309 ifv = ifp->if_softc; 1310 1311 switch (cmd) { 1312 case SIOCGIFMEDIA: 1313 VLAN_LOCK(); 1314 if (TRUNK(ifv) != NULL) { 1315 error = (*PARENT(ifv)->if_ioctl)(PARENT(ifv), 1316 SIOCGIFMEDIA, data); 1317 VLAN_UNLOCK(); 1318 /* Limit the result to the parent's current config. */ 1319 if (error == 0) { 1320 struct ifmediareq *ifmr; 1321 1322 ifmr = (struct ifmediareq *)data; 1323 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1324 ifmr->ifm_count = 1; 1325 error = copyout(&ifmr->ifm_current, 1326 ifmr->ifm_ulist, 1327 sizeof(int)); 1328 } 1329 } 1330 } else { 1331 VLAN_UNLOCK(); 1332 error = EINVAL; 1333 } 1334 break; 1335 1336 case SIOCSIFMEDIA: 1337 error = EINVAL; 1338 break; 1339 1340 case SIOCSIFMTU: 1341 /* 1342 * Set the interface MTU. 1343 */ 1344 VLAN_LOCK(); 1345 if (TRUNK(ifv) != NULL) { 1346 if (ifr->ifr_mtu > 1347 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1348 ifr->ifr_mtu < 1349 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1350 error = EINVAL; 1351 else 1352 ifp->if_mtu = ifr->ifr_mtu; 1353 } else 1354 error = EINVAL; 1355 VLAN_UNLOCK(); 1356 break; 1357 1358 case SIOCSETVLAN: 1359 error = copyin(ifr->ifr_data, &vlr, sizeof(vlr)); 1360 if (error) 1361 break; 1362 if (vlr.vlr_parent[0] == '\0') { 1363 vlan_unconfig(ifp); 1364 break; 1365 } 1366 p = ifunit(vlr.vlr_parent); 1367 if (p == 0) { 1368 error = ENOENT; 1369 break; 1370 } 1371 /* 1372 * Don't let the caller set up a VLAN tag with 1373 * anything except VLID bits. 1374 */ 1375 if (vlr.vlr_tag & ~EVL_VLID_MASK) { 1376 error = EINVAL; 1377 break; 1378 } 1379 error = vlan_config(ifv, p, vlr.vlr_tag); 1380 if (error) 1381 break; 1382 1383 /* Update flags on the parent, if necessary. */ 1384 vlan_setflags(ifp, 1); 1385 break; 1386 1387 case SIOCGETVLAN: 1388 bzero(&vlr, sizeof(vlr)); 1389 VLAN_LOCK(); 1390 if (TRUNK(ifv) != NULL) { 1391 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1392 sizeof(vlr.vlr_parent)); 1393 vlr.vlr_tag = ifv->ifv_tag; 1394 } 1395 VLAN_UNLOCK(); 1396 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1397 break; 1398 1399 case SIOCSIFFLAGS: 1400 /* 1401 * We should propagate selected flags to the parent, 1402 * e.g., promiscuous mode. 1403 */ 1404 if (TRUNK(ifv) != NULL) 1405 error = vlan_setflags(ifp, 1); 1406 break; 1407 1408 case SIOCADDMULTI: 1409 case SIOCDELMULTI: 1410 /* 1411 * If we don't have a parent, just remember the membership for 1412 * when we do. 1413 */ 1414 if (TRUNK(ifv) != NULL) 1415 error = vlan_setmulti(ifp); 1416 break; 1417 1418 default: 1419 error = ether_ioctl(ifp, cmd, data); 1420 } 1421 1422 return (error); 1423 } 1424