1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 34 * Might be extended some day to also handle IEEE 802.1p priority 35 * tagging. This is sort of sneaky in the implementation, since 36 * we need to pretend to be enough of an Ethernet implementation 37 * to make arp work. The way we do this is by telling everyone 38 * that we are an Ethernet, and then catch the packets that 39 * ether_output() left on our output queue when it calls 40 * if_start(), rewrite them for use by the real outgoing interface, 41 * and ask it to send them. 42 */ 43 44 #include "opt_vlan.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/module.h> 52 #include <sys/rwlock.h> 53 #include <sys/queue.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/sysctl.h> 57 #include <sys/systm.h> 58 #include <sys/vimage.h> 59 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/if.h> 63 #include <net/if_clone.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_vlan_var.h> 67 68 #define VLANNAME "vlan" 69 #define VLAN_DEF_HWIDTH 4 70 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 71 72 #define UP_AND_RUNNING(ifp) \ 73 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 74 75 LIST_HEAD(ifvlanhead, ifvlan); 76 77 struct ifvlantrunk { 78 struct ifnet *parent; /* parent interface of this trunk */ 79 struct rwlock rw; 80 #ifdef VLAN_ARRAY 81 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 82 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 83 #else 84 struct ifvlanhead *hash; /* dynamic hash-list table */ 85 uint16_t hmask; 86 uint16_t hwidth; 87 #endif 88 int refcnt; 89 }; 90 91 struct vlan_mc_entry { 92 struct ether_addr mc_addr; 93 SLIST_ENTRY(vlan_mc_entry) mc_entries; 94 }; 95 96 struct ifvlan { 97 struct ifvlantrunk *ifv_trunk; 98 struct ifnet *ifv_ifp; 99 #define TRUNK(ifv) ((ifv)->ifv_trunk) 100 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 101 int ifv_pflags; /* special flags we have set on parent */ 102 struct ifv_linkmib { 103 int ifvm_encaplen; /* encapsulation length */ 104 int ifvm_mtufudge; /* MTU fudged by this much */ 105 int ifvm_mintu; /* min transmission unit */ 106 uint16_t ifvm_proto; /* encapsulation ethertype */ 107 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 108 } ifv_mib; 109 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 110 #ifndef VLAN_ARRAY 111 LIST_ENTRY(ifvlan) ifv_list; 112 #endif 113 }; 114 #define ifv_proto ifv_mib.ifvm_proto 115 #define ifv_tag ifv_mib.ifvm_tag 116 #define ifv_encaplen ifv_mib.ifvm_encaplen 117 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 118 #define ifv_mintu ifv_mib.ifvm_mintu 119 120 /* Special flags we should propagate to parent. */ 121 static struct { 122 int flag; 123 int (*func)(struct ifnet *, int); 124 } vlan_pflags[] = { 125 {IFF_PROMISC, ifpromisc}, 126 {IFF_ALLMULTI, if_allmulti}, 127 {0, NULL} 128 }; 129 130 SYSCTL_DECL(_net_link); 131 SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN"); 132 SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency"); 133 134 static int soft_pad = 0; 135 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW, &soft_pad, 0, 136 "pad short frames before tagging"); 137 138 static MALLOC_DEFINE(M_VLAN, VLANNAME, "802.1Q Virtual LAN Interface"); 139 140 static eventhandler_tag ifdetach_tag; 141 142 /* 143 * We have a global mutex, that is used to serialize configuration 144 * changes and isn't used in normal packet delivery. 145 * 146 * We also have a per-trunk rwlock, that is locked shared on packet 147 * processing and exclusive when configuration is changed. 148 * 149 * The VLAN_ARRAY substitutes the dynamic hash with a static array 150 * with 4096 entries. In theory this can give a boost in processing, 151 * however on practice it does not. Probably this is because array 152 * is too big to fit into CPU cache. 153 */ 154 static struct mtx ifv_mtx; 155 #define VLAN_LOCK_INIT() mtx_init(&ifv_mtx, "vlan_global", NULL, MTX_DEF) 156 #define VLAN_LOCK_DESTROY() mtx_destroy(&ifv_mtx) 157 #define VLAN_LOCK_ASSERT() mtx_assert(&ifv_mtx, MA_OWNED) 158 #define VLAN_LOCK() mtx_lock(&ifv_mtx) 159 #define VLAN_UNLOCK() mtx_unlock(&ifv_mtx) 160 #define TRUNK_LOCK_INIT(trunk) rw_init(&(trunk)->rw, VLANNAME) 161 #define TRUNK_LOCK_DESTROY(trunk) rw_destroy(&(trunk)->rw) 162 #define TRUNK_LOCK(trunk) rw_wlock(&(trunk)->rw) 163 #define TRUNK_UNLOCK(trunk) rw_wunlock(&(trunk)->rw) 164 #define TRUNK_LOCK_ASSERT(trunk) rw_assert(&(trunk)->rw, RA_WLOCKED) 165 #define TRUNK_RLOCK(trunk) rw_rlock(&(trunk)->rw) 166 #define TRUNK_RUNLOCK(trunk) rw_runlock(&(trunk)->rw) 167 #define TRUNK_LOCK_RASSERT(trunk) rw_assert(&(trunk)->rw, RA_RLOCKED) 168 169 #ifndef VLAN_ARRAY 170 static void vlan_inithash(struct ifvlantrunk *trunk); 171 static void vlan_freehash(struct ifvlantrunk *trunk); 172 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 173 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 174 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 175 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 176 uint16_t tag); 177 #endif 178 static void trunk_destroy(struct ifvlantrunk *trunk); 179 180 static void vlan_start(struct ifnet *ifp); 181 static void vlan_init(void *foo); 182 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 183 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 184 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 185 int (*func)(struct ifnet *, int)); 186 static int vlan_setflags(struct ifnet *ifp, int status); 187 static int vlan_setmulti(struct ifnet *ifp); 188 static int vlan_unconfig(struct ifnet *ifp); 189 static int vlan_unconfig_locked(struct ifnet *ifp); 190 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 191 static void vlan_link_state(struct ifnet *ifp, int link); 192 static void vlan_capabilities(struct ifvlan *ifv); 193 static void vlan_trunk_capabilities(struct ifnet *ifp); 194 195 static struct ifnet *vlan_clone_match_ethertag(struct if_clone *, 196 const char *, int *); 197 static int vlan_clone_match(struct if_clone *, const char *); 198 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 199 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 200 201 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 202 203 static struct if_clone vlan_cloner = IFC_CLONE_INITIALIZER(VLANNAME, NULL, 204 IF_MAXUNIT, NULL, vlan_clone_match, vlan_clone_create, vlan_clone_destroy); 205 206 #ifndef VLAN_ARRAY 207 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 208 209 static void 210 vlan_inithash(struct ifvlantrunk *trunk) 211 { 212 int i, n; 213 214 /* 215 * The trunk must not be locked here since we call malloc(M_WAITOK). 216 * It is OK in case this function is called before the trunk struct 217 * gets hooked up and becomes visible from other threads. 218 */ 219 220 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 221 ("%s: hash already initialized", __func__)); 222 223 trunk->hwidth = VLAN_DEF_HWIDTH; 224 n = 1 << trunk->hwidth; 225 trunk->hmask = n - 1; 226 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 227 for (i = 0; i < n; i++) 228 LIST_INIT(&trunk->hash[i]); 229 } 230 231 static void 232 vlan_freehash(struct ifvlantrunk *trunk) 233 { 234 #ifdef INVARIANTS 235 int i; 236 237 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 238 for (i = 0; i < (1 << trunk->hwidth); i++) 239 KASSERT(LIST_EMPTY(&trunk->hash[i]), 240 ("%s: hash table not empty", __func__)); 241 #endif 242 free(trunk->hash, M_VLAN); 243 trunk->hash = NULL; 244 trunk->hwidth = trunk->hmask = 0; 245 } 246 247 static int 248 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 249 { 250 int i, b; 251 struct ifvlan *ifv2; 252 253 TRUNK_LOCK_ASSERT(trunk); 254 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 255 256 b = 1 << trunk->hwidth; 257 i = HASH(ifv->ifv_tag, trunk->hmask); 258 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 259 if (ifv->ifv_tag == ifv2->ifv_tag) 260 return (EEXIST); 261 262 /* 263 * Grow the hash when the number of vlans exceeds half of the number of 264 * hash buckets squared. This will make the average linked-list length 265 * buckets/2. 266 */ 267 if (trunk->refcnt > (b * b) / 2) { 268 vlan_growhash(trunk, 1); 269 i = HASH(ifv->ifv_tag, trunk->hmask); 270 } 271 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 272 trunk->refcnt++; 273 274 return (0); 275 } 276 277 static int 278 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 279 { 280 int i, b; 281 struct ifvlan *ifv2; 282 283 TRUNK_LOCK_ASSERT(trunk); 284 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 285 286 b = 1 << trunk->hwidth; 287 i = HASH(ifv->ifv_tag, trunk->hmask); 288 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 289 if (ifv2 == ifv) { 290 trunk->refcnt--; 291 LIST_REMOVE(ifv2, ifv_list); 292 if (trunk->refcnt < (b * b) / 2) 293 vlan_growhash(trunk, -1); 294 return (0); 295 } 296 297 panic("%s: vlan not found\n", __func__); 298 return (ENOENT); /*NOTREACHED*/ 299 } 300 301 /* 302 * Grow the hash larger or smaller if memory permits. 303 */ 304 static void 305 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 306 { 307 struct ifvlan *ifv; 308 struct ifvlanhead *hash2; 309 int hwidth2, i, j, n, n2; 310 311 TRUNK_LOCK_ASSERT(trunk); 312 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 313 314 if (howmuch == 0) { 315 /* Harmless yet obvious coding error */ 316 printf("%s: howmuch is 0\n", __func__); 317 return; 318 } 319 320 hwidth2 = trunk->hwidth + howmuch; 321 n = 1 << trunk->hwidth; 322 n2 = 1 << hwidth2; 323 /* Do not shrink the table below the default */ 324 if (hwidth2 < VLAN_DEF_HWIDTH) 325 return; 326 327 /* M_NOWAIT because we're called with trunk mutex held */ 328 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 329 if (hash2 == NULL) { 330 printf("%s: out of memory -- hash size not changed\n", 331 __func__); 332 return; /* We can live with the old hash table */ 333 } 334 for (j = 0; j < n2; j++) 335 LIST_INIT(&hash2[j]); 336 for (i = 0; i < n; i++) 337 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 338 LIST_REMOVE(ifv, ifv_list); 339 j = HASH(ifv->ifv_tag, n2 - 1); 340 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 341 } 342 free(trunk->hash, M_VLAN); 343 trunk->hash = hash2; 344 trunk->hwidth = hwidth2; 345 trunk->hmask = n2 - 1; 346 347 if (bootverbose) 348 if_printf(trunk->parent, 349 "VLAN hash table resized from %d to %d buckets\n", n, n2); 350 } 351 352 static __inline struct ifvlan * 353 vlan_gethash(struct ifvlantrunk *trunk, uint16_t tag) 354 { 355 struct ifvlan *ifv; 356 357 TRUNK_LOCK_RASSERT(trunk); 358 359 LIST_FOREACH(ifv, &trunk->hash[HASH(tag, trunk->hmask)], ifv_list) 360 if (ifv->ifv_tag == tag) 361 return (ifv); 362 return (NULL); 363 } 364 365 #if 0 366 /* Debugging code to view the hashtables. */ 367 static void 368 vlan_dumphash(struct ifvlantrunk *trunk) 369 { 370 int i; 371 struct ifvlan *ifv; 372 373 for (i = 0; i < (1 << trunk->hwidth); i++) { 374 printf("%d: ", i); 375 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 376 printf("%s ", ifv->ifv_ifp->if_xname); 377 printf("\n"); 378 } 379 } 380 #endif /* 0 */ 381 #endif /* !VLAN_ARRAY */ 382 383 static void 384 trunk_destroy(struct ifvlantrunk *trunk) 385 { 386 VLAN_LOCK_ASSERT(); 387 388 TRUNK_LOCK(trunk); 389 #ifndef VLAN_ARRAY 390 vlan_freehash(trunk); 391 #endif 392 trunk->parent->if_vlantrunk = NULL; 393 TRUNK_UNLOCK(trunk); 394 TRUNK_LOCK_DESTROY(trunk); 395 free(trunk, M_VLAN); 396 } 397 398 /* 399 * Program our multicast filter. What we're actually doing is 400 * programming the multicast filter of the parent. This has the 401 * side effect of causing the parent interface to receive multicast 402 * traffic that it doesn't really want, which ends up being discarded 403 * later by the upper protocol layers. Unfortunately, there's no way 404 * to avoid this: there really is only one physical interface. 405 * 406 * XXX: There is a possible race here if more than one thread is 407 * modifying the multicast state of the vlan interface at the same time. 408 */ 409 static int 410 vlan_setmulti(struct ifnet *ifp) 411 { 412 struct ifnet *ifp_p; 413 struct ifmultiaddr *ifma, *rifma = NULL; 414 struct ifvlan *sc; 415 struct vlan_mc_entry *mc; 416 struct sockaddr_dl sdl; 417 int error; 418 419 /*VLAN_LOCK_ASSERT();*/ 420 421 /* Find the parent. */ 422 sc = ifp->if_softc; 423 ifp_p = PARENT(sc); 424 425 CURVNET_SET_QUIET(ifp_p->if_vnet); 426 427 bzero((char *)&sdl, sizeof(sdl)); 428 sdl.sdl_len = sizeof(sdl); 429 sdl.sdl_family = AF_LINK; 430 sdl.sdl_index = ifp_p->if_index; 431 sdl.sdl_type = IFT_ETHER; 432 sdl.sdl_alen = ETHER_ADDR_LEN; 433 434 /* First, remove any existing filter entries. */ 435 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 436 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); 437 error = if_delmulti(ifp_p, (struct sockaddr *)&sdl); 438 if (error) 439 return (error); 440 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 441 free(mc, M_VLAN); 442 } 443 444 /* Now program new ones. */ 445 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 446 if (ifma->ifma_addr->sa_family != AF_LINK) 447 continue; 448 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 449 if (mc == NULL) 450 return (ENOMEM); 451 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 452 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 453 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 454 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 455 LLADDR(&sdl), ETHER_ADDR_LEN); 456 error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma); 457 if (error) 458 return (error); 459 } 460 461 CURVNET_RESTORE(); 462 return (0); 463 } 464 465 /* 466 * A handler for network interface departure events. 467 * Track departure of trunks here so that we don't access invalid 468 * pointers or whatever if a trunk is ripped from under us, e.g., 469 * by ejecting its hot-plug card. 470 */ 471 static void 472 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 473 { 474 struct ifvlan *ifv; 475 int i; 476 477 /* 478 * Check if it's a trunk interface first of all 479 * to avoid needless locking. 480 */ 481 if (ifp->if_vlantrunk == NULL) 482 return; 483 484 VLAN_LOCK(); 485 /* 486 * OK, it's a trunk. Loop over and detach all vlan's on it. 487 * Check trunk pointer after each vlan_unconfig() as it will 488 * free it and set to NULL after the last vlan was detached. 489 */ 490 #ifdef VLAN_ARRAY 491 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 492 if ((ifv = ifp->if_vlantrunk->vlans[i])) { 493 vlan_unconfig_locked(ifv->ifv_ifp); 494 if (ifp->if_vlantrunk == NULL) 495 break; 496 } 497 #else /* VLAN_ARRAY */ 498 restart: 499 for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) 500 if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) { 501 vlan_unconfig_locked(ifv->ifv_ifp); 502 if (ifp->if_vlantrunk) 503 goto restart; /* trunk->hwidth can change */ 504 else 505 break; 506 } 507 #endif /* VLAN_ARRAY */ 508 /* Trunk should have been destroyed in vlan_unconfig(). */ 509 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 510 VLAN_UNLOCK(); 511 } 512 513 /* 514 * VLAN support can be loaded as a module. The only place in the 515 * system that's intimately aware of this is ether_input. We hook 516 * into this code through vlan_input_p which is defined there and 517 * set here. Noone else in the system should be aware of this so 518 * we use an explicit reference here. 519 */ 520 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 521 522 /* For if_link_state_change() eyes only... */ 523 extern void (*vlan_link_state_p)(struct ifnet *, int); 524 525 static int 526 vlan_modevent(module_t mod, int type, void *data) 527 { 528 529 switch (type) { 530 case MOD_LOAD: 531 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 532 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 533 if (ifdetach_tag == NULL) 534 return (ENOMEM); 535 VLAN_LOCK_INIT(); 536 vlan_input_p = vlan_input; 537 vlan_link_state_p = vlan_link_state; 538 vlan_trunk_cap_p = vlan_trunk_capabilities; 539 if_clone_attach(&vlan_cloner); 540 if (bootverbose) 541 printf("vlan: initialized, using " 542 #ifdef VLAN_ARRAY 543 "full-size arrays" 544 #else 545 "hash tables with chaining" 546 #endif 547 548 "\n"); 549 break; 550 case MOD_UNLOAD: 551 if_clone_detach(&vlan_cloner); 552 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 553 vlan_input_p = NULL; 554 vlan_link_state_p = NULL; 555 vlan_trunk_cap_p = NULL; 556 VLAN_LOCK_DESTROY(); 557 if (bootverbose) 558 printf("vlan: unloaded\n"); 559 break; 560 default: 561 return (EOPNOTSUPP); 562 } 563 return (0); 564 } 565 566 static moduledata_t vlan_mod = { 567 "if_vlan", 568 vlan_modevent, 569 0 570 }; 571 572 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 573 MODULE_VERSION(if_vlan, 3); 574 MODULE_DEPEND(if_vlan, miibus, 1, 1, 1); 575 576 static struct ifnet * 577 vlan_clone_match_ethertag(struct if_clone *ifc, const char *name, int *tag) 578 { 579 INIT_VNET_NET(curvnet); 580 const char *cp; 581 struct ifnet *ifp; 582 int t = 0; 583 584 /* Check for <etherif>.<vlan> style interface names. */ 585 IFNET_RLOCK(); 586 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 587 if (ifp->if_type != IFT_ETHER) 588 continue; 589 if (strncmp(ifp->if_xname, name, strlen(ifp->if_xname)) != 0) 590 continue; 591 cp = name + strlen(ifp->if_xname); 592 if (*cp != '.') 593 continue; 594 for(; *cp != '\0'; cp++) { 595 if (*cp < '0' || *cp > '9') 596 continue; 597 t = (t * 10) + (*cp - '0'); 598 } 599 if (tag != NULL) 600 *tag = t; 601 break; 602 } 603 IFNET_RUNLOCK(); 604 605 return (ifp); 606 } 607 608 static int 609 vlan_clone_match(struct if_clone *ifc, const char *name) 610 { 611 const char *cp; 612 613 if (vlan_clone_match_ethertag(ifc, name, NULL) != NULL) 614 return (1); 615 616 if (strncmp(VLANNAME, name, strlen(VLANNAME)) != 0) 617 return (0); 618 for (cp = name + 4; *cp != '\0'; cp++) { 619 if (*cp < '0' || *cp > '9') 620 return (0); 621 } 622 623 return (1); 624 } 625 626 static int 627 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 628 { 629 char *dp; 630 int wildcard; 631 int unit; 632 int error; 633 int tag; 634 int ethertag; 635 struct ifvlan *ifv; 636 struct ifnet *ifp; 637 struct ifnet *p; 638 struct vlanreq vlr; 639 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 640 641 /* 642 * There are 3 (ugh) ways to specify the cloned device: 643 * o pass a parameter block with the clone request. 644 * o specify parameters in the text of the clone device name 645 * o specify no parameters and get an unattached device that 646 * must be configured separately. 647 * The first technique is preferred; the latter two are 648 * supported for backwards compatibilty. 649 */ 650 if (params) { 651 error = copyin(params, &vlr, sizeof(vlr)); 652 if (error) 653 return error; 654 p = ifunit(vlr.vlr_parent); 655 if (p == NULL) 656 return ENXIO; 657 /* 658 * Don't let the caller set up a VLAN tag with 659 * anything except VLID bits. 660 */ 661 if (vlr.vlr_tag & ~EVL_VLID_MASK) 662 return (EINVAL); 663 error = ifc_name2unit(name, &unit); 664 if (error != 0) 665 return (error); 666 667 ethertag = 1; 668 tag = vlr.vlr_tag; 669 wildcard = (unit < 0); 670 } else if ((p = vlan_clone_match_ethertag(ifc, name, &tag)) != NULL) { 671 ethertag = 1; 672 unit = -1; 673 wildcard = 0; 674 675 /* 676 * Don't let the caller set up a VLAN tag with 677 * anything except VLID bits. 678 */ 679 if (tag & ~EVL_VLID_MASK) 680 return (EINVAL); 681 } else { 682 ethertag = 0; 683 684 error = ifc_name2unit(name, &unit); 685 if (error != 0) 686 return (error); 687 688 wildcard = (unit < 0); 689 } 690 691 error = ifc_alloc_unit(ifc, &unit); 692 if (error != 0) 693 return (error); 694 695 /* In the wildcard case, we need to update the name. */ 696 if (wildcard) { 697 for (dp = name; *dp != '\0'; dp++); 698 if (snprintf(dp, len - (dp-name), "%d", unit) > 699 len - (dp-name) - 1) { 700 panic("%s: interface name too long", __func__); 701 } 702 } 703 704 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 705 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 706 if (ifp == NULL) { 707 ifc_free_unit(ifc, unit); 708 free(ifv, M_VLAN); 709 return (ENOSPC); 710 } 711 SLIST_INIT(&ifv->vlan_mc_listhead); 712 713 ifp->if_softc = ifv; 714 /* 715 * Set the name manually rather than using if_initname because 716 * we don't conform to the default naming convention for interfaces. 717 */ 718 strlcpy(ifp->if_xname, name, IFNAMSIZ); 719 ifp->if_dname = ifc->ifc_name; 720 ifp->if_dunit = unit; 721 /* NB: flags are not set here */ 722 ifp->if_linkmib = &ifv->ifv_mib; 723 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 724 /* NB: mtu is not set here */ 725 726 ifp->if_init = vlan_init; 727 ifp->if_start = vlan_start; 728 ifp->if_ioctl = vlan_ioctl; 729 ifp->if_snd.ifq_maxlen = ifqmaxlen; 730 ifp->if_flags = VLAN_IFFLAGS; 731 ether_ifattach(ifp, eaddr); 732 /* Now undo some of the damage... */ 733 ifp->if_baudrate = 0; 734 ifp->if_type = IFT_L2VLAN; 735 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 736 737 if (ethertag) { 738 error = vlan_config(ifv, p, tag); 739 if (error != 0) { 740 /* 741 * Since we've partialy failed, we need to back 742 * out all the way, otherwise userland could get 743 * confused. Thus, we destroy the interface. 744 */ 745 ether_ifdetach(ifp); 746 vlan_unconfig(ifp); 747 if_free_type(ifp, IFT_ETHER); 748 free(ifv, M_VLAN); 749 750 return (error); 751 } 752 753 /* Update flags on the parent, if necessary. */ 754 vlan_setflags(ifp, 1); 755 } 756 757 return (0); 758 } 759 760 static int 761 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 762 { 763 struct ifvlan *ifv = ifp->if_softc; 764 int unit = ifp->if_dunit; 765 766 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 767 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 768 if_free_type(ifp, IFT_ETHER); 769 free(ifv, M_VLAN); 770 ifc_free_unit(ifc, unit); 771 772 return (0); 773 } 774 775 /* 776 * The ifp->if_init entry point for vlan(4) is a no-op. 777 */ 778 static void 779 vlan_init(void *foo __unused) 780 { 781 } 782 783 /* 784 * The if_start method for vlan(4) interface. It doesn't 785 * raises the IFF_DRV_OACTIVE flag, since it is called 786 * only from IFQ_HANDOFF() macro in ether_output_frame(). 787 * If the interface queue is full, and vlan_start() is 788 * not called, the queue would never get emptied and 789 * interface would stall forever. 790 */ 791 static void 792 vlan_start(struct ifnet *ifp) 793 { 794 struct ifvlan *ifv; 795 struct ifnet *p; 796 struct mbuf *m; 797 int error; 798 799 ifv = ifp->if_softc; 800 p = PARENT(ifv); 801 802 for (;;) { 803 IF_DEQUEUE(&ifp->if_snd, m); 804 if (m == NULL) 805 break; 806 BPF_MTAP(ifp, m); 807 808 /* 809 * Do not run parent's if_start() if the parent is not up, 810 * or parent's driver will cause a system crash. 811 */ 812 if (!UP_AND_RUNNING(p)) { 813 m_freem(m); 814 ifp->if_collisions++; 815 continue; 816 } 817 818 /* 819 * Pad the frame to the minimum size allowed if told to. 820 * This option is in accord with IEEE Std 802.1Q, 2003 Ed., 821 * paragraph C.4.4.3.b. It can help to work around buggy 822 * bridges that violate paragraph C.4.4.3.a from the same 823 * document, i.e., fail to pad short frames after untagging. 824 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but 825 * untagging it will produce a 62-byte frame, which is a runt 826 * and requires padding. There are VLAN-enabled network 827 * devices that just discard such runts instead or mishandle 828 * them somehow. 829 */ 830 if (soft_pad) { 831 static char pad[8]; /* just zeros */ 832 int n; 833 834 for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len; 835 n > 0; n -= sizeof(pad)) 836 if (!m_append(m, min(n, sizeof(pad)), pad)) 837 break; 838 839 if (n > 0) { 840 if_printf(ifp, "cannot pad short frame\n"); 841 ifp->if_oerrors++; 842 m_freem(m); 843 continue; 844 } 845 } 846 847 /* 848 * If underlying interface can do VLAN tag insertion itself, 849 * just pass the packet along. However, we need some way to 850 * tell the interface where the packet came from so that it 851 * knows how to find the VLAN tag to use, so we attach a 852 * packet tag that holds it. 853 */ 854 if (p->if_capenable & IFCAP_VLAN_HWTAGGING) { 855 m->m_pkthdr.ether_vtag = ifv->ifv_tag; 856 m->m_flags |= M_VLANTAG; 857 } else { 858 m = ether_vlanencap(m, ifv->ifv_tag); 859 if (m == NULL) { 860 if_printf(ifp, 861 "unable to prepend VLAN header\n"); 862 ifp->if_oerrors++; 863 continue; 864 } 865 } 866 867 /* 868 * Send it, precisely as ether_output() would have. 869 * We are already running at splimp. 870 */ 871 IFQ_HANDOFF(p, m, error); 872 if (!error) 873 ifp->if_opackets++; 874 else 875 ifp->if_oerrors++; 876 } 877 } 878 879 static void 880 vlan_input(struct ifnet *ifp, struct mbuf *m) 881 { 882 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 883 struct ifvlan *ifv; 884 uint16_t tag; 885 886 KASSERT(trunk != NULL, ("%s: no trunk", __func__)); 887 888 if (m->m_flags & M_VLANTAG) { 889 /* 890 * Packet is tagged, but m contains a normal 891 * Ethernet frame; the tag is stored out-of-band. 892 */ 893 tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 894 m->m_flags &= ~M_VLANTAG; 895 } else { 896 struct ether_vlan_header *evl; 897 898 /* 899 * Packet is tagged in-band as specified by 802.1q. 900 */ 901 switch (ifp->if_type) { 902 case IFT_ETHER: 903 if (m->m_len < sizeof(*evl) && 904 (m = m_pullup(m, sizeof(*evl))) == NULL) { 905 if_printf(ifp, "cannot pullup VLAN header\n"); 906 return; 907 } 908 evl = mtod(m, struct ether_vlan_header *); 909 tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 910 911 /* 912 * Remove the 802.1q header by copying the Ethernet 913 * addresses over it and adjusting the beginning of 914 * the data in the mbuf. The encapsulated Ethernet 915 * type field is already in place. 916 */ 917 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 918 ETHER_HDR_LEN - ETHER_TYPE_LEN); 919 m_adj(m, ETHER_VLAN_ENCAP_LEN); 920 break; 921 922 default: 923 #ifdef INVARIANTS 924 panic("%s: %s has unsupported if_type %u", 925 __func__, ifp->if_xname, ifp->if_type); 926 #endif 927 m_freem(m); 928 ifp->if_noproto++; 929 return; 930 } 931 } 932 933 TRUNK_RLOCK(trunk); 934 #ifdef VLAN_ARRAY 935 ifv = trunk->vlans[tag]; 936 #else 937 ifv = vlan_gethash(trunk, tag); 938 #endif 939 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 940 TRUNK_RUNLOCK(trunk); 941 m_freem(m); 942 ifp->if_noproto++; 943 return; 944 } 945 TRUNK_RUNLOCK(trunk); 946 947 m->m_pkthdr.rcvif = ifv->ifv_ifp; 948 ifv->ifv_ifp->if_ipackets++; 949 950 /* Pass it back through the parent's input routine. */ 951 (*ifp->if_input)(ifv->ifv_ifp, m); 952 } 953 954 static int 955 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 956 { 957 struct ifvlantrunk *trunk; 958 struct ifnet *ifp; 959 int error = 0; 960 961 /* VID numbers 0x0 and 0xFFF are reserved */ 962 if (tag == 0 || tag == 0xFFF) 963 return (EINVAL); 964 if (p->if_type != IFT_ETHER) 965 return (EPROTONOSUPPORT); 966 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 967 return (EPROTONOSUPPORT); 968 if (ifv->ifv_trunk) 969 return (EBUSY); 970 971 if (p->if_vlantrunk == NULL) { 972 trunk = malloc(sizeof(struct ifvlantrunk), 973 M_VLAN, M_WAITOK | M_ZERO); 974 #ifndef VLAN_ARRAY 975 vlan_inithash(trunk); 976 #endif 977 VLAN_LOCK(); 978 if (p->if_vlantrunk != NULL) { 979 /* A race that that is very unlikely to be hit. */ 980 #ifndef VLAN_ARRAY 981 vlan_freehash(trunk); 982 #endif 983 free(trunk, M_VLAN); 984 goto exists; 985 } 986 TRUNK_LOCK_INIT(trunk); 987 TRUNK_LOCK(trunk); 988 p->if_vlantrunk = trunk; 989 trunk->parent = p; 990 } else { 991 VLAN_LOCK(); 992 exists: 993 trunk = p->if_vlantrunk; 994 TRUNK_LOCK(trunk); 995 } 996 997 ifv->ifv_tag = tag; /* must set this before vlan_inshash() */ 998 #ifdef VLAN_ARRAY 999 if (trunk->vlans[tag] != NULL) { 1000 error = EEXIST; 1001 goto done; 1002 } 1003 trunk->vlans[tag] = ifv; 1004 trunk->refcnt++; 1005 #else 1006 error = vlan_inshash(trunk, ifv); 1007 if (error) 1008 goto done; 1009 #endif 1010 ifv->ifv_proto = ETHERTYPE_VLAN; 1011 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1012 ifv->ifv_mintu = ETHERMIN; 1013 ifv->ifv_pflags = 0; 1014 1015 /* 1016 * If the parent supports the VLAN_MTU capability, 1017 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1018 * use it. 1019 */ 1020 if (p->if_capenable & IFCAP_VLAN_MTU) { 1021 /* 1022 * No need to fudge the MTU since the parent can 1023 * handle extended frames. 1024 */ 1025 ifv->ifv_mtufudge = 0; 1026 } else { 1027 /* 1028 * Fudge the MTU by the encapsulation size. This 1029 * makes us incompatible with strictly compliant 1030 * 802.1Q implementations, but allows us to use 1031 * the feature with other NetBSD implementations, 1032 * which might still be useful. 1033 */ 1034 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1035 } 1036 1037 ifv->ifv_trunk = trunk; 1038 ifp = ifv->ifv_ifp; 1039 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1040 ifp->if_baudrate = p->if_baudrate; 1041 /* 1042 * Copy only a selected subset of flags from the parent. 1043 * Other flags are none of our business. 1044 */ 1045 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1046 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1047 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1048 #undef VLAN_COPY_FLAGS 1049 1050 ifp->if_link_state = p->if_link_state; 1051 1052 vlan_capabilities(ifv); 1053 1054 /* 1055 * Set up our ``Ethernet address'' to reflect the underlying 1056 * physical interface's. 1057 */ 1058 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), ETHER_ADDR_LEN); 1059 1060 /* 1061 * Configure multicast addresses that may already be 1062 * joined on the vlan device. 1063 */ 1064 (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */ 1065 1066 /* We are ready for operation now. */ 1067 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1068 done: 1069 TRUNK_UNLOCK(trunk); 1070 if (error == 0) 1071 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_tag); 1072 VLAN_UNLOCK(); 1073 1074 return (error); 1075 } 1076 1077 static int 1078 vlan_unconfig(struct ifnet *ifp) 1079 { 1080 int ret; 1081 1082 VLAN_LOCK(); 1083 ret = vlan_unconfig_locked(ifp); 1084 VLAN_UNLOCK(); 1085 return (ret); 1086 } 1087 1088 static int 1089 vlan_unconfig_locked(struct ifnet *ifp) 1090 { 1091 struct ifvlantrunk *trunk; 1092 struct vlan_mc_entry *mc; 1093 struct ifvlan *ifv; 1094 struct ifnet *parent; 1095 int error; 1096 1097 VLAN_LOCK_ASSERT(); 1098 1099 ifv = ifp->if_softc; 1100 trunk = ifv->ifv_trunk; 1101 parent = NULL; 1102 1103 if (trunk != NULL) { 1104 struct sockaddr_dl sdl; 1105 1106 TRUNK_LOCK(trunk); 1107 parent = trunk->parent; 1108 1109 /* 1110 * Since the interface is being unconfigured, we need to 1111 * empty the list of multicast groups that we may have joined 1112 * while we were alive from the parent's list. 1113 */ 1114 bzero((char *)&sdl, sizeof(sdl)); 1115 sdl.sdl_len = sizeof(sdl); 1116 sdl.sdl_family = AF_LINK; 1117 sdl.sdl_index = parent->if_index; 1118 sdl.sdl_type = IFT_ETHER; 1119 sdl.sdl_alen = ETHER_ADDR_LEN; 1120 1121 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1122 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), 1123 ETHER_ADDR_LEN); 1124 error = if_delmulti(parent, (struct sockaddr *)&sdl); 1125 if (error) 1126 return (error); 1127 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1128 free(mc, M_VLAN); 1129 } 1130 1131 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1132 #ifdef VLAN_ARRAY 1133 trunk->vlans[ifv->ifv_tag] = NULL; 1134 trunk->refcnt--; 1135 #else 1136 vlan_remhash(trunk, ifv); 1137 #endif 1138 ifv->ifv_trunk = NULL; 1139 1140 /* 1141 * Check if we were the last. 1142 */ 1143 if (trunk->refcnt == 0) { 1144 trunk->parent->if_vlantrunk = NULL; 1145 /* 1146 * XXXGL: If some ithread has already entered 1147 * vlan_input() and is now blocked on the trunk 1148 * lock, then it should preempt us right after 1149 * unlock and finish its work. Then we will acquire 1150 * lock again in trunk_destroy(). 1151 */ 1152 TRUNK_UNLOCK(trunk); 1153 trunk_destroy(trunk); 1154 } else 1155 TRUNK_UNLOCK(trunk); 1156 } 1157 1158 /* Disconnect from parent. */ 1159 if (ifv->ifv_pflags) 1160 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1161 ifp->if_mtu = ETHERMTU; 1162 ifp->if_link_state = LINK_STATE_UNKNOWN; 1163 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1164 1165 /* 1166 * Only dispatch an event if vlan was 1167 * attached, otherwise there is nothing 1168 * to cleanup anyway. 1169 */ 1170 if (parent != NULL) 1171 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_tag); 1172 1173 return (0); 1174 } 1175 1176 /* Handle a reference counted flag that should be set on the parent as well */ 1177 static int 1178 vlan_setflag(struct ifnet *ifp, int flag, int status, 1179 int (*func)(struct ifnet *, int)) 1180 { 1181 struct ifvlan *ifv; 1182 int error; 1183 1184 /* XXX VLAN_LOCK_ASSERT(); */ 1185 1186 ifv = ifp->if_softc; 1187 status = status ? (ifp->if_flags & flag) : 0; 1188 /* Now "status" contains the flag value or 0 */ 1189 1190 /* 1191 * See if recorded parent's status is different from what 1192 * we want it to be. If it is, flip it. We record parent's 1193 * status in ifv_pflags so that we won't clear parent's flag 1194 * we haven't set. In fact, we don't clear or set parent's 1195 * flags directly, but get or release references to them. 1196 * That's why we can be sure that recorded flags still are 1197 * in accord with actual parent's flags. 1198 */ 1199 if (status != (ifv->ifv_pflags & flag)) { 1200 error = (*func)(PARENT(ifv), status); 1201 if (error) 1202 return (error); 1203 ifv->ifv_pflags &= ~flag; 1204 ifv->ifv_pflags |= status; 1205 } 1206 return (0); 1207 } 1208 1209 /* 1210 * Handle IFF_* flags that require certain changes on the parent: 1211 * if "status" is true, update parent's flags respective to our if_flags; 1212 * if "status" is false, forcedly clear the flags set on parent. 1213 */ 1214 static int 1215 vlan_setflags(struct ifnet *ifp, int status) 1216 { 1217 int error, i; 1218 1219 for (i = 0; vlan_pflags[i].flag; i++) { 1220 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1221 status, vlan_pflags[i].func); 1222 if (error) 1223 return (error); 1224 } 1225 return (0); 1226 } 1227 1228 /* Inform all vlans that their parent has changed link state */ 1229 static void 1230 vlan_link_state(struct ifnet *ifp, int link) 1231 { 1232 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1233 struct ifvlan *ifv; 1234 int i; 1235 1236 TRUNK_LOCK(trunk); 1237 #ifdef VLAN_ARRAY 1238 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1239 if (trunk->vlans[i] != NULL) { 1240 ifv = trunk->vlans[i]; 1241 #else 1242 for (i = 0; i < (1 << trunk->hwidth); i++) 1243 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) { 1244 #endif 1245 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1246 if_link_state_change(ifv->ifv_ifp, 1247 trunk->parent->if_link_state); 1248 } 1249 TRUNK_UNLOCK(trunk); 1250 } 1251 1252 static void 1253 vlan_capabilities(struct ifvlan *ifv) 1254 { 1255 struct ifnet *p = PARENT(ifv); 1256 struct ifnet *ifp = ifv->ifv_ifp; 1257 1258 TRUNK_LOCK_ASSERT(TRUNK(ifv)); 1259 1260 /* 1261 * If the parent interface can do checksum offloading 1262 * on VLANs, then propagate its hardware-assisted 1263 * checksumming flags. Also assert that checksum 1264 * offloading requires hardware VLAN tagging. 1265 */ 1266 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1267 ifp->if_capabilities = p->if_capabilities & IFCAP_HWCSUM; 1268 1269 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1270 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1271 ifp->if_capenable = p->if_capenable & IFCAP_HWCSUM; 1272 ifp->if_hwassist = p->if_hwassist; 1273 } else { 1274 ifp->if_capenable = 0; 1275 ifp->if_hwassist = 0; 1276 } 1277 } 1278 1279 static void 1280 vlan_trunk_capabilities(struct ifnet *ifp) 1281 { 1282 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1283 struct ifvlan *ifv; 1284 int i; 1285 1286 TRUNK_LOCK(trunk); 1287 #ifdef VLAN_ARRAY 1288 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1289 if (trunk->vlans[i] != NULL) { 1290 ifv = trunk->vlans[i]; 1291 #else 1292 for (i = 0; i < (1 << trunk->hwidth); i++) { 1293 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 1294 #endif 1295 vlan_capabilities(ifv); 1296 } 1297 TRUNK_UNLOCK(trunk); 1298 } 1299 1300 static int 1301 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1302 { 1303 struct ifaddr *ifa; 1304 struct ifnet *p; 1305 struct ifreq *ifr; 1306 struct ifvlan *ifv; 1307 struct vlanreq vlr; 1308 int error = 0; 1309 1310 ifr = (struct ifreq *)data; 1311 ifa = (struct ifaddr *)data; 1312 ifv = ifp->if_softc; 1313 1314 switch (cmd) { 1315 case SIOCGIFMEDIA: 1316 VLAN_LOCK(); 1317 if (TRUNK(ifv) != NULL) { 1318 error = (*PARENT(ifv)->if_ioctl)(PARENT(ifv), 1319 SIOCGIFMEDIA, data); 1320 VLAN_UNLOCK(); 1321 /* Limit the result to the parent's current config. */ 1322 if (error == 0) { 1323 struct ifmediareq *ifmr; 1324 1325 ifmr = (struct ifmediareq *)data; 1326 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1327 ifmr->ifm_count = 1; 1328 error = copyout(&ifmr->ifm_current, 1329 ifmr->ifm_ulist, 1330 sizeof(int)); 1331 } 1332 } 1333 } else { 1334 VLAN_UNLOCK(); 1335 error = EINVAL; 1336 } 1337 break; 1338 1339 case SIOCSIFMEDIA: 1340 error = EINVAL; 1341 break; 1342 1343 case SIOCSIFMTU: 1344 /* 1345 * Set the interface MTU. 1346 */ 1347 VLAN_LOCK(); 1348 if (TRUNK(ifv) != NULL) { 1349 if (ifr->ifr_mtu > 1350 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1351 ifr->ifr_mtu < 1352 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1353 error = EINVAL; 1354 else 1355 ifp->if_mtu = ifr->ifr_mtu; 1356 } else 1357 error = EINVAL; 1358 VLAN_UNLOCK(); 1359 break; 1360 1361 case SIOCSETVLAN: 1362 error = copyin(ifr->ifr_data, &vlr, sizeof(vlr)); 1363 if (error) 1364 break; 1365 if (vlr.vlr_parent[0] == '\0') { 1366 vlan_unconfig(ifp); 1367 break; 1368 } 1369 p = ifunit(vlr.vlr_parent); 1370 if (p == 0) { 1371 error = ENOENT; 1372 break; 1373 } 1374 /* 1375 * Don't let the caller set up a VLAN tag with 1376 * anything except VLID bits. 1377 */ 1378 if (vlr.vlr_tag & ~EVL_VLID_MASK) { 1379 error = EINVAL; 1380 break; 1381 } 1382 error = vlan_config(ifv, p, vlr.vlr_tag); 1383 if (error) 1384 break; 1385 1386 /* Update flags on the parent, if necessary. */ 1387 vlan_setflags(ifp, 1); 1388 break; 1389 1390 case SIOCGETVLAN: 1391 bzero(&vlr, sizeof(vlr)); 1392 VLAN_LOCK(); 1393 if (TRUNK(ifv) != NULL) { 1394 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1395 sizeof(vlr.vlr_parent)); 1396 vlr.vlr_tag = ifv->ifv_tag; 1397 } 1398 VLAN_UNLOCK(); 1399 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1400 break; 1401 1402 case SIOCSIFFLAGS: 1403 /* 1404 * We should propagate selected flags to the parent, 1405 * e.g., promiscuous mode. 1406 */ 1407 if (TRUNK(ifv) != NULL) 1408 error = vlan_setflags(ifp, 1); 1409 break; 1410 1411 case SIOCADDMULTI: 1412 case SIOCDELMULTI: 1413 /* 1414 * If we don't have a parent, just remember the membership for 1415 * when we do. 1416 */ 1417 if (TRUNK(ifv) != NULL) 1418 error = vlan_setmulti(ifp); 1419 break; 1420 1421 default: 1422 error = ether_ioctl(ifp, cmd, data); 1423 } 1424 1425 return (error); 1426 } 1427