1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 34 * Might be extended some day to also handle IEEE 802.1p priority 35 * tagging. This is sort of sneaky in the implementation, since 36 * we need to pretend to be enough of an Ethernet implementation 37 * to make arp work. The way we do this is by telling everyone 38 * that we are an Ethernet, and then catch the packets that 39 * ether_output() left on our output queue when it calls 40 * if_start(), rewrite them for use by the real outgoing interface, 41 * and ask it to send them. 42 */ 43 44 #include "opt_vlan.h" 45 46 #include <sys/param.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/module.h> 52 #include <sys/rwlock.h> 53 #include <sys/queue.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/sysctl.h> 57 #include <sys/systm.h> 58 #include <sys/vimage.h> 59 60 #include <net/bpf.h> 61 #include <net/ethernet.h> 62 #include <net/if.h> 63 #include <net/if_clone.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_vlan_var.h> 67 #include <net/vnet.h> 68 69 #define VLANNAME "vlan" 70 #define VLAN_DEF_HWIDTH 4 71 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 72 73 #define UP_AND_RUNNING(ifp) \ 74 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 75 76 LIST_HEAD(ifvlanhead, ifvlan); 77 78 struct ifvlantrunk { 79 struct ifnet *parent; /* parent interface of this trunk */ 80 struct rwlock rw; 81 #ifdef VLAN_ARRAY 82 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 83 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 84 #else 85 struct ifvlanhead *hash; /* dynamic hash-list table */ 86 uint16_t hmask; 87 uint16_t hwidth; 88 #endif 89 int refcnt; 90 }; 91 92 struct vlan_mc_entry { 93 struct ether_addr mc_addr; 94 SLIST_ENTRY(vlan_mc_entry) mc_entries; 95 }; 96 97 struct ifvlan { 98 struct ifvlantrunk *ifv_trunk; 99 struct ifnet *ifv_ifp; 100 #define TRUNK(ifv) ((ifv)->ifv_trunk) 101 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 102 int ifv_pflags; /* special flags we have set on parent */ 103 struct ifv_linkmib { 104 int ifvm_encaplen; /* encapsulation length */ 105 int ifvm_mtufudge; /* MTU fudged by this much */ 106 int ifvm_mintu; /* min transmission unit */ 107 uint16_t ifvm_proto; /* encapsulation ethertype */ 108 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 109 } ifv_mib; 110 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 111 #ifndef VLAN_ARRAY 112 LIST_ENTRY(ifvlan) ifv_list; 113 #endif 114 }; 115 #define ifv_proto ifv_mib.ifvm_proto 116 #define ifv_tag ifv_mib.ifvm_tag 117 #define ifv_encaplen ifv_mib.ifvm_encaplen 118 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 119 #define ifv_mintu ifv_mib.ifvm_mintu 120 121 /* Special flags we should propagate to parent. */ 122 static struct { 123 int flag; 124 int (*func)(struct ifnet *, int); 125 } vlan_pflags[] = { 126 {IFF_PROMISC, ifpromisc}, 127 {IFF_ALLMULTI, if_allmulti}, 128 {0, NULL} 129 }; 130 131 SYSCTL_DECL(_net_link); 132 SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN"); 133 SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency"); 134 135 static int soft_pad = 0; 136 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW, &soft_pad, 0, 137 "pad short frames before tagging"); 138 139 static MALLOC_DEFINE(M_VLAN, VLANNAME, "802.1Q Virtual LAN Interface"); 140 141 static eventhandler_tag ifdetach_tag; 142 143 /* 144 * We have a global mutex, that is used to serialize configuration 145 * changes and isn't used in normal packet delivery. 146 * 147 * We also have a per-trunk rwlock, that is locked shared on packet 148 * processing and exclusive when configuration is changed. 149 * 150 * The VLAN_ARRAY substitutes the dynamic hash with a static array 151 * with 4096 entries. In theory this can give a boost in processing, 152 * however on practice it does not. Probably this is because array 153 * is too big to fit into CPU cache. 154 */ 155 static struct mtx ifv_mtx; 156 #define VLAN_LOCK_INIT() mtx_init(&ifv_mtx, "vlan_global", NULL, MTX_DEF) 157 #define VLAN_LOCK_DESTROY() mtx_destroy(&ifv_mtx) 158 #define VLAN_LOCK_ASSERT() mtx_assert(&ifv_mtx, MA_OWNED) 159 #define VLAN_LOCK() mtx_lock(&ifv_mtx) 160 #define VLAN_UNLOCK() mtx_unlock(&ifv_mtx) 161 #define TRUNK_LOCK_INIT(trunk) rw_init(&(trunk)->rw, VLANNAME) 162 #define TRUNK_LOCK_DESTROY(trunk) rw_destroy(&(trunk)->rw) 163 #define TRUNK_LOCK(trunk) rw_wlock(&(trunk)->rw) 164 #define TRUNK_UNLOCK(trunk) rw_wunlock(&(trunk)->rw) 165 #define TRUNK_LOCK_ASSERT(trunk) rw_assert(&(trunk)->rw, RA_WLOCKED) 166 #define TRUNK_RLOCK(trunk) rw_rlock(&(trunk)->rw) 167 #define TRUNK_RUNLOCK(trunk) rw_runlock(&(trunk)->rw) 168 #define TRUNK_LOCK_RASSERT(trunk) rw_assert(&(trunk)->rw, RA_RLOCKED) 169 170 #ifndef VLAN_ARRAY 171 static void vlan_inithash(struct ifvlantrunk *trunk); 172 static void vlan_freehash(struct ifvlantrunk *trunk); 173 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 174 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 175 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 176 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 177 uint16_t tag); 178 #endif 179 static void trunk_destroy(struct ifvlantrunk *trunk); 180 181 static void vlan_start(struct ifnet *ifp); 182 static void vlan_init(void *foo); 183 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 184 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 185 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 186 int (*func)(struct ifnet *, int)); 187 static int vlan_setflags(struct ifnet *ifp, int status); 188 static int vlan_setmulti(struct ifnet *ifp); 189 static int vlan_unconfig(struct ifnet *ifp); 190 static int vlan_unconfig_locked(struct ifnet *ifp); 191 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 192 static void vlan_link_state(struct ifnet *ifp, int link); 193 static void vlan_capabilities(struct ifvlan *ifv); 194 static void vlan_trunk_capabilities(struct ifnet *ifp); 195 196 static struct ifnet *vlan_clone_match_ethertag(struct if_clone *, 197 const char *, int *); 198 static int vlan_clone_match(struct if_clone *, const char *); 199 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 200 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 201 202 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 203 204 static struct if_clone vlan_cloner = IFC_CLONE_INITIALIZER(VLANNAME, NULL, 205 IF_MAXUNIT, NULL, vlan_clone_match, vlan_clone_create, vlan_clone_destroy); 206 207 #ifndef VLAN_ARRAY 208 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 209 210 static void 211 vlan_inithash(struct ifvlantrunk *trunk) 212 { 213 int i, n; 214 215 /* 216 * The trunk must not be locked here since we call malloc(M_WAITOK). 217 * It is OK in case this function is called before the trunk struct 218 * gets hooked up and becomes visible from other threads. 219 */ 220 221 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 222 ("%s: hash already initialized", __func__)); 223 224 trunk->hwidth = VLAN_DEF_HWIDTH; 225 n = 1 << trunk->hwidth; 226 trunk->hmask = n - 1; 227 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 228 for (i = 0; i < n; i++) 229 LIST_INIT(&trunk->hash[i]); 230 } 231 232 static void 233 vlan_freehash(struct ifvlantrunk *trunk) 234 { 235 #ifdef INVARIANTS 236 int i; 237 238 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 239 for (i = 0; i < (1 << trunk->hwidth); i++) 240 KASSERT(LIST_EMPTY(&trunk->hash[i]), 241 ("%s: hash table not empty", __func__)); 242 #endif 243 free(trunk->hash, M_VLAN); 244 trunk->hash = NULL; 245 trunk->hwidth = trunk->hmask = 0; 246 } 247 248 static int 249 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 250 { 251 int i, b; 252 struct ifvlan *ifv2; 253 254 TRUNK_LOCK_ASSERT(trunk); 255 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 256 257 b = 1 << trunk->hwidth; 258 i = HASH(ifv->ifv_tag, trunk->hmask); 259 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 260 if (ifv->ifv_tag == ifv2->ifv_tag) 261 return (EEXIST); 262 263 /* 264 * Grow the hash when the number of vlans exceeds half of the number of 265 * hash buckets squared. This will make the average linked-list length 266 * buckets/2. 267 */ 268 if (trunk->refcnt > (b * b) / 2) { 269 vlan_growhash(trunk, 1); 270 i = HASH(ifv->ifv_tag, trunk->hmask); 271 } 272 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 273 trunk->refcnt++; 274 275 return (0); 276 } 277 278 static int 279 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 280 { 281 int i, b; 282 struct ifvlan *ifv2; 283 284 TRUNK_LOCK_ASSERT(trunk); 285 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 286 287 b = 1 << trunk->hwidth; 288 i = HASH(ifv->ifv_tag, trunk->hmask); 289 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 290 if (ifv2 == ifv) { 291 trunk->refcnt--; 292 LIST_REMOVE(ifv2, ifv_list); 293 if (trunk->refcnt < (b * b) / 2) 294 vlan_growhash(trunk, -1); 295 return (0); 296 } 297 298 panic("%s: vlan not found\n", __func__); 299 return (ENOENT); /*NOTREACHED*/ 300 } 301 302 /* 303 * Grow the hash larger or smaller if memory permits. 304 */ 305 static void 306 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 307 { 308 struct ifvlan *ifv; 309 struct ifvlanhead *hash2; 310 int hwidth2, i, j, n, n2; 311 312 TRUNK_LOCK_ASSERT(trunk); 313 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 314 315 if (howmuch == 0) { 316 /* Harmless yet obvious coding error */ 317 printf("%s: howmuch is 0\n", __func__); 318 return; 319 } 320 321 hwidth2 = trunk->hwidth + howmuch; 322 n = 1 << trunk->hwidth; 323 n2 = 1 << hwidth2; 324 /* Do not shrink the table below the default */ 325 if (hwidth2 < VLAN_DEF_HWIDTH) 326 return; 327 328 /* M_NOWAIT because we're called with trunk mutex held */ 329 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 330 if (hash2 == NULL) { 331 printf("%s: out of memory -- hash size not changed\n", 332 __func__); 333 return; /* We can live with the old hash table */ 334 } 335 for (j = 0; j < n2; j++) 336 LIST_INIT(&hash2[j]); 337 for (i = 0; i < n; i++) 338 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 339 LIST_REMOVE(ifv, ifv_list); 340 j = HASH(ifv->ifv_tag, n2 - 1); 341 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 342 } 343 free(trunk->hash, M_VLAN); 344 trunk->hash = hash2; 345 trunk->hwidth = hwidth2; 346 trunk->hmask = n2 - 1; 347 348 if (bootverbose) 349 if_printf(trunk->parent, 350 "VLAN hash table resized from %d to %d buckets\n", n, n2); 351 } 352 353 static __inline struct ifvlan * 354 vlan_gethash(struct ifvlantrunk *trunk, uint16_t tag) 355 { 356 struct ifvlan *ifv; 357 358 TRUNK_LOCK_RASSERT(trunk); 359 360 LIST_FOREACH(ifv, &trunk->hash[HASH(tag, trunk->hmask)], ifv_list) 361 if (ifv->ifv_tag == tag) 362 return (ifv); 363 return (NULL); 364 } 365 366 #if 0 367 /* Debugging code to view the hashtables. */ 368 static void 369 vlan_dumphash(struct ifvlantrunk *trunk) 370 { 371 int i; 372 struct ifvlan *ifv; 373 374 for (i = 0; i < (1 << trunk->hwidth); i++) { 375 printf("%d: ", i); 376 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 377 printf("%s ", ifv->ifv_ifp->if_xname); 378 printf("\n"); 379 } 380 } 381 #endif /* 0 */ 382 #endif /* !VLAN_ARRAY */ 383 384 static void 385 trunk_destroy(struct ifvlantrunk *trunk) 386 { 387 VLAN_LOCK_ASSERT(); 388 389 TRUNK_LOCK(trunk); 390 #ifndef VLAN_ARRAY 391 vlan_freehash(trunk); 392 #endif 393 trunk->parent->if_vlantrunk = NULL; 394 TRUNK_UNLOCK(trunk); 395 TRUNK_LOCK_DESTROY(trunk); 396 free(trunk, M_VLAN); 397 } 398 399 /* 400 * Program our multicast filter. What we're actually doing is 401 * programming the multicast filter of the parent. This has the 402 * side effect of causing the parent interface to receive multicast 403 * traffic that it doesn't really want, which ends up being discarded 404 * later by the upper protocol layers. Unfortunately, there's no way 405 * to avoid this: there really is only one physical interface. 406 * 407 * XXX: There is a possible race here if more than one thread is 408 * modifying the multicast state of the vlan interface at the same time. 409 */ 410 static int 411 vlan_setmulti(struct ifnet *ifp) 412 { 413 struct ifnet *ifp_p; 414 struct ifmultiaddr *ifma, *rifma = NULL; 415 struct ifvlan *sc; 416 struct vlan_mc_entry *mc; 417 struct sockaddr_dl sdl; 418 int error; 419 420 /*VLAN_LOCK_ASSERT();*/ 421 422 /* Find the parent. */ 423 sc = ifp->if_softc; 424 ifp_p = PARENT(sc); 425 426 CURVNET_SET_QUIET(ifp_p->if_vnet); 427 428 bzero((char *)&sdl, sizeof(sdl)); 429 sdl.sdl_len = sizeof(sdl); 430 sdl.sdl_family = AF_LINK; 431 sdl.sdl_index = ifp_p->if_index; 432 sdl.sdl_type = IFT_ETHER; 433 sdl.sdl_alen = ETHER_ADDR_LEN; 434 435 /* First, remove any existing filter entries. */ 436 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 437 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); 438 error = if_delmulti(ifp_p, (struct sockaddr *)&sdl); 439 if (error) 440 return (error); 441 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 442 free(mc, M_VLAN); 443 } 444 445 /* Now program new ones. */ 446 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 447 if (ifma->ifma_addr->sa_family != AF_LINK) 448 continue; 449 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 450 if (mc == NULL) 451 return (ENOMEM); 452 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 453 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 454 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 455 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 456 LLADDR(&sdl), ETHER_ADDR_LEN); 457 error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma); 458 if (error) 459 return (error); 460 } 461 462 CURVNET_RESTORE(); 463 return (0); 464 } 465 466 /* 467 * A handler for network interface departure events. 468 * Track departure of trunks here so that we don't access invalid 469 * pointers or whatever if a trunk is ripped from under us, e.g., 470 * by ejecting its hot-plug card. 471 */ 472 static void 473 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 474 { 475 struct ifvlan *ifv; 476 int i; 477 478 /* 479 * Check if it's a trunk interface first of all 480 * to avoid needless locking. 481 */ 482 if (ifp->if_vlantrunk == NULL) 483 return; 484 485 VLAN_LOCK(); 486 /* 487 * OK, it's a trunk. Loop over and detach all vlan's on it. 488 * Check trunk pointer after each vlan_unconfig() as it will 489 * free it and set to NULL after the last vlan was detached. 490 */ 491 #ifdef VLAN_ARRAY 492 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 493 if ((ifv = ifp->if_vlantrunk->vlans[i])) { 494 vlan_unconfig_locked(ifv->ifv_ifp); 495 if (ifp->if_vlantrunk == NULL) 496 break; 497 } 498 #else /* VLAN_ARRAY */ 499 restart: 500 for (i = 0; i < (1 << ifp->if_vlantrunk->hwidth); i++) 501 if ((ifv = LIST_FIRST(&ifp->if_vlantrunk->hash[i]))) { 502 vlan_unconfig_locked(ifv->ifv_ifp); 503 if (ifp->if_vlantrunk) 504 goto restart; /* trunk->hwidth can change */ 505 else 506 break; 507 } 508 #endif /* VLAN_ARRAY */ 509 /* Trunk should have been destroyed in vlan_unconfig(). */ 510 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 511 VLAN_UNLOCK(); 512 } 513 514 /* 515 * VLAN support can be loaded as a module. The only place in the 516 * system that's intimately aware of this is ether_input. We hook 517 * into this code through vlan_input_p which is defined there and 518 * set here. Noone else in the system should be aware of this so 519 * we use an explicit reference here. 520 */ 521 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 522 523 /* For if_link_state_change() eyes only... */ 524 extern void (*vlan_link_state_p)(struct ifnet *, int); 525 526 static int 527 vlan_modevent(module_t mod, int type, void *data) 528 { 529 530 switch (type) { 531 case MOD_LOAD: 532 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 533 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 534 if (ifdetach_tag == NULL) 535 return (ENOMEM); 536 VLAN_LOCK_INIT(); 537 vlan_input_p = vlan_input; 538 vlan_link_state_p = vlan_link_state; 539 vlan_trunk_cap_p = vlan_trunk_capabilities; 540 if_clone_attach(&vlan_cloner); 541 if (bootverbose) 542 printf("vlan: initialized, using " 543 #ifdef VLAN_ARRAY 544 "full-size arrays" 545 #else 546 "hash tables with chaining" 547 #endif 548 549 "\n"); 550 break; 551 case MOD_UNLOAD: 552 if_clone_detach(&vlan_cloner); 553 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 554 vlan_input_p = NULL; 555 vlan_link_state_p = NULL; 556 vlan_trunk_cap_p = NULL; 557 VLAN_LOCK_DESTROY(); 558 if (bootverbose) 559 printf("vlan: unloaded\n"); 560 break; 561 default: 562 return (EOPNOTSUPP); 563 } 564 return (0); 565 } 566 567 static moduledata_t vlan_mod = { 568 "if_vlan", 569 vlan_modevent, 570 0 571 }; 572 573 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 574 MODULE_VERSION(if_vlan, 3); 575 MODULE_DEPEND(if_vlan, miibus, 1, 1, 1); 576 577 static struct ifnet * 578 vlan_clone_match_ethertag(struct if_clone *ifc, const char *name, int *tag) 579 { 580 INIT_VNET_NET(curvnet); 581 const char *cp; 582 struct ifnet *ifp; 583 int t = 0; 584 585 /* Check for <etherif>.<vlan> style interface names. */ 586 IFNET_RLOCK(); 587 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 588 if (ifp->if_type != IFT_ETHER) 589 continue; 590 if (strncmp(ifp->if_xname, name, strlen(ifp->if_xname)) != 0) 591 continue; 592 cp = name + strlen(ifp->if_xname); 593 if (*cp != '.') 594 continue; 595 for(; *cp != '\0'; cp++) { 596 if (*cp < '0' || *cp > '9') 597 continue; 598 t = (t * 10) + (*cp - '0'); 599 } 600 if (tag != NULL) 601 *tag = t; 602 break; 603 } 604 IFNET_RUNLOCK(); 605 606 return (ifp); 607 } 608 609 static int 610 vlan_clone_match(struct if_clone *ifc, const char *name) 611 { 612 const char *cp; 613 614 if (vlan_clone_match_ethertag(ifc, name, NULL) != NULL) 615 return (1); 616 617 if (strncmp(VLANNAME, name, strlen(VLANNAME)) != 0) 618 return (0); 619 for (cp = name + 4; *cp != '\0'; cp++) { 620 if (*cp < '0' || *cp > '9') 621 return (0); 622 } 623 624 return (1); 625 } 626 627 static int 628 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 629 { 630 char *dp; 631 int wildcard; 632 int unit; 633 int error; 634 int tag; 635 int ethertag; 636 struct ifvlan *ifv; 637 struct ifnet *ifp; 638 struct ifnet *p; 639 struct vlanreq vlr; 640 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 641 642 /* 643 * There are 3 (ugh) ways to specify the cloned device: 644 * o pass a parameter block with the clone request. 645 * o specify parameters in the text of the clone device name 646 * o specify no parameters and get an unattached device that 647 * must be configured separately. 648 * The first technique is preferred; the latter two are 649 * supported for backwards compatibilty. 650 */ 651 if (params) { 652 error = copyin(params, &vlr, sizeof(vlr)); 653 if (error) 654 return error; 655 p = ifunit(vlr.vlr_parent); 656 if (p == NULL) 657 return ENXIO; 658 /* 659 * Don't let the caller set up a VLAN tag with 660 * anything except VLID bits. 661 */ 662 if (vlr.vlr_tag & ~EVL_VLID_MASK) 663 return (EINVAL); 664 error = ifc_name2unit(name, &unit); 665 if (error != 0) 666 return (error); 667 668 ethertag = 1; 669 tag = vlr.vlr_tag; 670 wildcard = (unit < 0); 671 } else if ((p = vlan_clone_match_ethertag(ifc, name, &tag)) != NULL) { 672 ethertag = 1; 673 unit = -1; 674 wildcard = 0; 675 676 /* 677 * Don't let the caller set up a VLAN tag with 678 * anything except VLID bits. 679 */ 680 if (tag & ~EVL_VLID_MASK) 681 return (EINVAL); 682 } else { 683 ethertag = 0; 684 685 error = ifc_name2unit(name, &unit); 686 if (error != 0) 687 return (error); 688 689 wildcard = (unit < 0); 690 } 691 692 error = ifc_alloc_unit(ifc, &unit); 693 if (error != 0) 694 return (error); 695 696 /* In the wildcard case, we need to update the name. */ 697 if (wildcard) { 698 for (dp = name; *dp != '\0'; dp++); 699 if (snprintf(dp, len - (dp-name), "%d", unit) > 700 len - (dp-name) - 1) { 701 panic("%s: interface name too long", __func__); 702 } 703 } 704 705 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 706 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 707 if (ifp == NULL) { 708 ifc_free_unit(ifc, unit); 709 free(ifv, M_VLAN); 710 return (ENOSPC); 711 } 712 SLIST_INIT(&ifv->vlan_mc_listhead); 713 714 ifp->if_softc = ifv; 715 /* 716 * Set the name manually rather than using if_initname because 717 * we don't conform to the default naming convention for interfaces. 718 */ 719 strlcpy(ifp->if_xname, name, IFNAMSIZ); 720 ifp->if_dname = ifc->ifc_name; 721 ifp->if_dunit = unit; 722 /* NB: flags are not set here */ 723 ifp->if_linkmib = &ifv->ifv_mib; 724 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 725 /* NB: mtu is not set here */ 726 727 ifp->if_init = vlan_init; 728 ifp->if_start = vlan_start; 729 ifp->if_ioctl = vlan_ioctl; 730 ifp->if_snd.ifq_maxlen = ifqmaxlen; 731 ifp->if_flags = VLAN_IFFLAGS; 732 ether_ifattach(ifp, eaddr); 733 /* Now undo some of the damage... */ 734 ifp->if_baudrate = 0; 735 ifp->if_type = IFT_L2VLAN; 736 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 737 738 if (ethertag) { 739 error = vlan_config(ifv, p, tag); 740 if (error != 0) { 741 /* 742 * Since we've partialy failed, we need to back 743 * out all the way, otherwise userland could get 744 * confused. Thus, we destroy the interface. 745 */ 746 ether_ifdetach(ifp); 747 vlan_unconfig(ifp); 748 if_free_type(ifp, IFT_ETHER); 749 ifc_free_unit(ifc, unit); 750 free(ifv, M_VLAN); 751 752 return (error); 753 } 754 755 /* Update flags on the parent, if necessary. */ 756 vlan_setflags(ifp, 1); 757 } 758 759 return (0); 760 } 761 762 static int 763 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 764 { 765 struct ifvlan *ifv = ifp->if_softc; 766 int unit = ifp->if_dunit; 767 768 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 769 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 770 if_free_type(ifp, IFT_ETHER); 771 free(ifv, M_VLAN); 772 ifc_free_unit(ifc, unit); 773 774 return (0); 775 } 776 777 /* 778 * The ifp->if_init entry point for vlan(4) is a no-op. 779 */ 780 static void 781 vlan_init(void *foo __unused) 782 { 783 } 784 785 /* 786 * The if_start method for vlan(4) interface. It doesn't 787 * raises the IFF_DRV_OACTIVE flag, since it is called 788 * only from IFQ_HANDOFF() macro in ether_output_frame(). 789 * If the interface queue is full, and vlan_start() is 790 * not called, the queue would never get emptied and 791 * interface would stall forever. 792 */ 793 static void 794 vlan_start(struct ifnet *ifp) 795 { 796 struct ifvlan *ifv; 797 struct ifnet *p; 798 struct mbuf *m; 799 int error; 800 801 ifv = ifp->if_softc; 802 p = PARENT(ifv); 803 804 for (;;) { 805 IF_DEQUEUE(&ifp->if_snd, m); 806 if (m == NULL) 807 break; 808 BPF_MTAP(ifp, m); 809 810 /* 811 * Do not run parent's if_start() if the parent is not up, 812 * or parent's driver will cause a system crash. 813 */ 814 if (!UP_AND_RUNNING(p)) { 815 m_freem(m); 816 ifp->if_collisions++; 817 continue; 818 } 819 820 /* 821 * Pad the frame to the minimum size allowed if told to. 822 * This option is in accord with IEEE Std 802.1Q, 2003 Ed., 823 * paragraph C.4.4.3.b. It can help to work around buggy 824 * bridges that violate paragraph C.4.4.3.a from the same 825 * document, i.e., fail to pad short frames after untagging. 826 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but 827 * untagging it will produce a 62-byte frame, which is a runt 828 * and requires padding. There are VLAN-enabled network 829 * devices that just discard such runts instead or mishandle 830 * them somehow. 831 */ 832 if (soft_pad) { 833 static char pad[8]; /* just zeros */ 834 int n; 835 836 for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len; 837 n > 0; n -= sizeof(pad)) 838 if (!m_append(m, min(n, sizeof(pad)), pad)) 839 break; 840 841 if (n > 0) { 842 if_printf(ifp, "cannot pad short frame\n"); 843 ifp->if_oerrors++; 844 m_freem(m); 845 continue; 846 } 847 } 848 849 /* 850 * If underlying interface can do VLAN tag insertion itself, 851 * just pass the packet along. However, we need some way to 852 * tell the interface where the packet came from so that it 853 * knows how to find the VLAN tag to use, so we attach a 854 * packet tag that holds it. 855 */ 856 if (p->if_capenable & IFCAP_VLAN_HWTAGGING) { 857 m->m_pkthdr.ether_vtag = ifv->ifv_tag; 858 m->m_flags |= M_VLANTAG; 859 } else { 860 m = ether_vlanencap(m, ifv->ifv_tag); 861 if (m == NULL) { 862 if_printf(ifp, 863 "unable to prepend VLAN header\n"); 864 ifp->if_oerrors++; 865 continue; 866 } 867 } 868 869 /* 870 * Send it, precisely as ether_output() would have. 871 * We are already running at splimp. 872 */ 873 error = (p->if_transmit)(p, m); 874 if (!error) 875 ifp->if_opackets++; 876 else 877 ifp->if_oerrors++; 878 } 879 } 880 881 static void 882 vlan_input(struct ifnet *ifp, struct mbuf *m) 883 { 884 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 885 struct ifvlan *ifv; 886 uint16_t tag; 887 888 KASSERT(trunk != NULL, ("%s: no trunk", __func__)); 889 890 if (m->m_flags & M_VLANTAG) { 891 /* 892 * Packet is tagged, but m contains a normal 893 * Ethernet frame; the tag is stored out-of-band. 894 */ 895 tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); 896 m->m_flags &= ~M_VLANTAG; 897 } else { 898 struct ether_vlan_header *evl; 899 900 /* 901 * Packet is tagged in-band as specified by 802.1q. 902 */ 903 switch (ifp->if_type) { 904 case IFT_ETHER: 905 if (m->m_len < sizeof(*evl) && 906 (m = m_pullup(m, sizeof(*evl))) == NULL) { 907 if_printf(ifp, "cannot pullup VLAN header\n"); 908 return; 909 } 910 evl = mtod(m, struct ether_vlan_header *); 911 tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 912 913 /* 914 * Remove the 802.1q header by copying the Ethernet 915 * addresses over it and adjusting the beginning of 916 * the data in the mbuf. The encapsulated Ethernet 917 * type field is already in place. 918 */ 919 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 920 ETHER_HDR_LEN - ETHER_TYPE_LEN); 921 m_adj(m, ETHER_VLAN_ENCAP_LEN); 922 break; 923 924 default: 925 #ifdef INVARIANTS 926 panic("%s: %s has unsupported if_type %u", 927 __func__, ifp->if_xname, ifp->if_type); 928 #endif 929 m_freem(m); 930 ifp->if_noproto++; 931 return; 932 } 933 } 934 935 TRUNK_RLOCK(trunk); 936 #ifdef VLAN_ARRAY 937 ifv = trunk->vlans[tag]; 938 #else 939 ifv = vlan_gethash(trunk, tag); 940 #endif 941 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 942 TRUNK_RUNLOCK(trunk); 943 m_freem(m); 944 ifp->if_noproto++; 945 return; 946 } 947 TRUNK_RUNLOCK(trunk); 948 949 m->m_pkthdr.rcvif = ifv->ifv_ifp; 950 ifv->ifv_ifp->if_ipackets++; 951 952 /* Pass it back through the parent's input routine. */ 953 (*ifp->if_input)(ifv->ifv_ifp, m); 954 } 955 956 static int 957 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 958 { 959 struct ifvlantrunk *trunk; 960 struct ifnet *ifp; 961 int error = 0; 962 963 /* VID numbers 0x0 and 0xFFF are reserved */ 964 if (tag == 0 || tag == 0xFFF) 965 return (EINVAL); 966 if (p->if_type != IFT_ETHER) 967 return (EPROTONOSUPPORT); 968 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 969 return (EPROTONOSUPPORT); 970 if (ifv->ifv_trunk) 971 return (EBUSY); 972 973 if (p->if_vlantrunk == NULL) { 974 trunk = malloc(sizeof(struct ifvlantrunk), 975 M_VLAN, M_WAITOK | M_ZERO); 976 #ifndef VLAN_ARRAY 977 vlan_inithash(trunk); 978 #endif 979 VLAN_LOCK(); 980 if (p->if_vlantrunk != NULL) { 981 /* A race that that is very unlikely to be hit. */ 982 #ifndef VLAN_ARRAY 983 vlan_freehash(trunk); 984 #endif 985 free(trunk, M_VLAN); 986 goto exists; 987 } 988 TRUNK_LOCK_INIT(trunk); 989 TRUNK_LOCK(trunk); 990 p->if_vlantrunk = trunk; 991 trunk->parent = p; 992 } else { 993 VLAN_LOCK(); 994 exists: 995 trunk = p->if_vlantrunk; 996 TRUNK_LOCK(trunk); 997 } 998 999 ifv->ifv_tag = tag; /* must set this before vlan_inshash() */ 1000 #ifdef VLAN_ARRAY 1001 if (trunk->vlans[tag] != NULL) { 1002 error = EEXIST; 1003 goto done; 1004 } 1005 trunk->vlans[tag] = ifv; 1006 trunk->refcnt++; 1007 #else 1008 error = vlan_inshash(trunk, ifv); 1009 if (error) 1010 goto done; 1011 #endif 1012 ifv->ifv_proto = ETHERTYPE_VLAN; 1013 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1014 ifv->ifv_mintu = ETHERMIN; 1015 ifv->ifv_pflags = 0; 1016 1017 /* 1018 * If the parent supports the VLAN_MTU capability, 1019 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1020 * use it. 1021 */ 1022 if (p->if_capenable & IFCAP_VLAN_MTU) { 1023 /* 1024 * No need to fudge the MTU since the parent can 1025 * handle extended frames. 1026 */ 1027 ifv->ifv_mtufudge = 0; 1028 } else { 1029 /* 1030 * Fudge the MTU by the encapsulation size. This 1031 * makes us incompatible with strictly compliant 1032 * 802.1Q implementations, but allows us to use 1033 * the feature with other NetBSD implementations, 1034 * which might still be useful. 1035 */ 1036 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1037 } 1038 1039 ifv->ifv_trunk = trunk; 1040 ifp = ifv->ifv_ifp; 1041 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1042 ifp->if_baudrate = p->if_baudrate; 1043 /* 1044 * Copy only a selected subset of flags from the parent. 1045 * Other flags are none of our business. 1046 */ 1047 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1048 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1049 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1050 #undef VLAN_COPY_FLAGS 1051 1052 ifp->if_link_state = p->if_link_state; 1053 1054 vlan_capabilities(ifv); 1055 1056 /* 1057 * Set up our ``Ethernet address'' to reflect the underlying 1058 * physical interface's. 1059 */ 1060 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), ETHER_ADDR_LEN); 1061 1062 /* 1063 * Configure multicast addresses that may already be 1064 * joined on the vlan device. 1065 */ 1066 (void)vlan_setmulti(ifp); /* XXX: VLAN lock held */ 1067 1068 /* We are ready for operation now. */ 1069 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1070 done: 1071 TRUNK_UNLOCK(trunk); 1072 if (error == 0) 1073 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_tag); 1074 VLAN_UNLOCK(); 1075 1076 return (error); 1077 } 1078 1079 static int 1080 vlan_unconfig(struct ifnet *ifp) 1081 { 1082 int ret; 1083 1084 VLAN_LOCK(); 1085 ret = vlan_unconfig_locked(ifp); 1086 VLAN_UNLOCK(); 1087 return (ret); 1088 } 1089 1090 static int 1091 vlan_unconfig_locked(struct ifnet *ifp) 1092 { 1093 struct ifvlantrunk *trunk; 1094 struct vlan_mc_entry *mc; 1095 struct ifvlan *ifv; 1096 struct ifnet *parent; 1097 int error; 1098 1099 VLAN_LOCK_ASSERT(); 1100 1101 ifv = ifp->if_softc; 1102 trunk = ifv->ifv_trunk; 1103 parent = NULL; 1104 1105 if (trunk != NULL) { 1106 struct sockaddr_dl sdl; 1107 1108 TRUNK_LOCK(trunk); 1109 parent = trunk->parent; 1110 1111 /* 1112 * Since the interface is being unconfigured, we need to 1113 * empty the list of multicast groups that we may have joined 1114 * while we were alive from the parent's list. 1115 */ 1116 bzero((char *)&sdl, sizeof(sdl)); 1117 sdl.sdl_len = sizeof(sdl); 1118 sdl.sdl_family = AF_LINK; 1119 sdl.sdl_index = parent->if_index; 1120 sdl.sdl_type = IFT_ETHER; 1121 sdl.sdl_alen = ETHER_ADDR_LEN; 1122 1123 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1124 bcopy((char *)&mc->mc_addr, LLADDR(&sdl), 1125 ETHER_ADDR_LEN); 1126 error = if_delmulti(parent, (struct sockaddr *)&sdl); 1127 if (error) 1128 return (error); 1129 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1130 free(mc, M_VLAN); 1131 } 1132 1133 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1134 #ifdef VLAN_ARRAY 1135 trunk->vlans[ifv->ifv_tag] = NULL; 1136 trunk->refcnt--; 1137 #else 1138 vlan_remhash(trunk, ifv); 1139 #endif 1140 ifv->ifv_trunk = NULL; 1141 1142 /* 1143 * Check if we were the last. 1144 */ 1145 if (trunk->refcnt == 0) { 1146 trunk->parent->if_vlantrunk = NULL; 1147 /* 1148 * XXXGL: If some ithread has already entered 1149 * vlan_input() and is now blocked on the trunk 1150 * lock, then it should preempt us right after 1151 * unlock and finish its work. Then we will acquire 1152 * lock again in trunk_destroy(). 1153 */ 1154 TRUNK_UNLOCK(trunk); 1155 trunk_destroy(trunk); 1156 } else 1157 TRUNK_UNLOCK(trunk); 1158 } 1159 1160 /* Disconnect from parent. */ 1161 if (ifv->ifv_pflags) 1162 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1163 ifp->if_mtu = ETHERMTU; 1164 ifp->if_link_state = LINK_STATE_UNKNOWN; 1165 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1166 1167 /* 1168 * Only dispatch an event if vlan was 1169 * attached, otherwise there is nothing 1170 * to cleanup anyway. 1171 */ 1172 if (parent != NULL) 1173 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_tag); 1174 1175 return (0); 1176 } 1177 1178 /* Handle a reference counted flag that should be set on the parent as well */ 1179 static int 1180 vlan_setflag(struct ifnet *ifp, int flag, int status, 1181 int (*func)(struct ifnet *, int)) 1182 { 1183 struct ifvlan *ifv; 1184 int error; 1185 1186 /* XXX VLAN_LOCK_ASSERT(); */ 1187 1188 ifv = ifp->if_softc; 1189 status = status ? (ifp->if_flags & flag) : 0; 1190 /* Now "status" contains the flag value or 0 */ 1191 1192 /* 1193 * See if recorded parent's status is different from what 1194 * we want it to be. If it is, flip it. We record parent's 1195 * status in ifv_pflags so that we won't clear parent's flag 1196 * we haven't set. In fact, we don't clear or set parent's 1197 * flags directly, but get or release references to them. 1198 * That's why we can be sure that recorded flags still are 1199 * in accord with actual parent's flags. 1200 */ 1201 if (status != (ifv->ifv_pflags & flag)) { 1202 error = (*func)(PARENT(ifv), status); 1203 if (error) 1204 return (error); 1205 ifv->ifv_pflags &= ~flag; 1206 ifv->ifv_pflags |= status; 1207 } 1208 return (0); 1209 } 1210 1211 /* 1212 * Handle IFF_* flags that require certain changes on the parent: 1213 * if "status" is true, update parent's flags respective to our if_flags; 1214 * if "status" is false, forcedly clear the flags set on parent. 1215 */ 1216 static int 1217 vlan_setflags(struct ifnet *ifp, int status) 1218 { 1219 int error, i; 1220 1221 for (i = 0; vlan_pflags[i].flag; i++) { 1222 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1223 status, vlan_pflags[i].func); 1224 if (error) 1225 return (error); 1226 } 1227 return (0); 1228 } 1229 1230 /* Inform all vlans that their parent has changed link state */ 1231 static void 1232 vlan_link_state(struct ifnet *ifp, int link) 1233 { 1234 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1235 struct ifvlan *ifv; 1236 int i; 1237 1238 TRUNK_LOCK(trunk); 1239 #ifdef VLAN_ARRAY 1240 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1241 if (trunk->vlans[i] != NULL) { 1242 ifv = trunk->vlans[i]; 1243 #else 1244 for (i = 0; i < (1 << trunk->hwidth); i++) 1245 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) { 1246 #endif 1247 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1248 if_link_state_change(ifv->ifv_ifp, 1249 trunk->parent->if_link_state); 1250 } 1251 TRUNK_UNLOCK(trunk); 1252 } 1253 1254 static void 1255 vlan_capabilities(struct ifvlan *ifv) 1256 { 1257 struct ifnet *p = PARENT(ifv); 1258 struct ifnet *ifp = ifv->ifv_ifp; 1259 1260 TRUNK_LOCK_ASSERT(TRUNK(ifv)); 1261 1262 /* 1263 * If the parent interface can do checksum offloading 1264 * on VLANs, then propagate its hardware-assisted 1265 * checksumming flags. Also assert that checksum 1266 * offloading requires hardware VLAN tagging. 1267 */ 1268 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1269 ifp->if_capabilities = p->if_capabilities & IFCAP_HWCSUM; 1270 1271 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1272 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1273 ifp->if_capenable = p->if_capenable & IFCAP_HWCSUM; 1274 ifp->if_hwassist = p->if_hwassist; 1275 } else { 1276 ifp->if_capenable = 0; 1277 ifp->if_hwassist = 0; 1278 } 1279 } 1280 1281 static void 1282 vlan_trunk_capabilities(struct ifnet *ifp) 1283 { 1284 struct ifvlantrunk *trunk = ifp->if_vlantrunk; 1285 struct ifvlan *ifv; 1286 int i; 1287 1288 TRUNK_LOCK(trunk); 1289 #ifdef VLAN_ARRAY 1290 for (i = 0; i < VLAN_ARRAY_SIZE; i++) 1291 if (trunk->vlans[i] != NULL) { 1292 ifv = trunk->vlans[i]; 1293 #else 1294 for (i = 0; i < (1 << trunk->hwidth); i++) { 1295 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 1296 #endif 1297 vlan_capabilities(ifv); 1298 } 1299 TRUNK_UNLOCK(trunk); 1300 } 1301 1302 static int 1303 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1304 { 1305 struct ifnet *p; 1306 struct ifreq *ifr; 1307 struct ifvlan *ifv; 1308 struct vlanreq vlr; 1309 int error = 0; 1310 1311 ifr = (struct ifreq *)data; 1312 ifv = ifp->if_softc; 1313 1314 switch (cmd) { 1315 case SIOCGIFMEDIA: 1316 VLAN_LOCK(); 1317 if (TRUNK(ifv) != NULL) { 1318 error = (*PARENT(ifv)->if_ioctl)(PARENT(ifv), 1319 SIOCGIFMEDIA, data); 1320 VLAN_UNLOCK(); 1321 /* Limit the result to the parent's current config. */ 1322 if (error == 0) { 1323 struct ifmediareq *ifmr; 1324 1325 ifmr = (struct ifmediareq *)data; 1326 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1327 ifmr->ifm_count = 1; 1328 error = copyout(&ifmr->ifm_current, 1329 ifmr->ifm_ulist, 1330 sizeof(int)); 1331 } 1332 } 1333 } else { 1334 VLAN_UNLOCK(); 1335 error = EINVAL; 1336 } 1337 break; 1338 1339 case SIOCSIFMEDIA: 1340 error = EINVAL; 1341 break; 1342 1343 case SIOCSIFMTU: 1344 /* 1345 * Set the interface MTU. 1346 */ 1347 VLAN_LOCK(); 1348 if (TRUNK(ifv) != NULL) { 1349 if (ifr->ifr_mtu > 1350 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1351 ifr->ifr_mtu < 1352 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1353 error = EINVAL; 1354 else 1355 ifp->if_mtu = ifr->ifr_mtu; 1356 } else 1357 error = EINVAL; 1358 VLAN_UNLOCK(); 1359 break; 1360 1361 case SIOCSETVLAN: 1362 error = copyin(ifr->ifr_data, &vlr, sizeof(vlr)); 1363 if (error) 1364 break; 1365 if (vlr.vlr_parent[0] == '\0') { 1366 vlan_unconfig(ifp); 1367 break; 1368 } 1369 p = ifunit(vlr.vlr_parent); 1370 if (p == 0) { 1371 error = ENOENT; 1372 break; 1373 } 1374 /* 1375 * Don't let the caller set up a VLAN tag with 1376 * anything except VLID bits. 1377 */ 1378 if (vlr.vlr_tag & ~EVL_VLID_MASK) { 1379 error = EINVAL; 1380 break; 1381 } 1382 error = vlan_config(ifv, p, vlr.vlr_tag); 1383 if (error) 1384 break; 1385 1386 /* Update flags on the parent, if necessary. */ 1387 vlan_setflags(ifp, 1); 1388 break; 1389 1390 case SIOCGETVLAN: 1391 bzero(&vlr, sizeof(vlr)); 1392 VLAN_LOCK(); 1393 if (TRUNK(ifv) != NULL) { 1394 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1395 sizeof(vlr.vlr_parent)); 1396 vlr.vlr_tag = ifv->ifv_tag; 1397 } 1398 VLAN_UNLOCK(); 1399 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1400 break; 1401 1402 case SIOCSIFFLAGS: 1403 /* 1404 * We should propagate selected flags to the parent, 1405 * e.g., promiscuous mode. 1406 */ 1407 if (TRUNK(ifv) != NULL) 1408 error = vlan_setflags(ifp, 1); 1409 break; 1410 1411 case SIOCADDMULTI: 1412 case SIOCDELMULTI: 1413 /* 1414 * If we don't have a parent, just remember the membership for 1415 * when we do. 1416 */ 1417 if (TRUNK(ifv) != NULL) 1418 error = vlan_setmulti(ifp); 1419 break; 1420 1421 default: 1422 error = ether_ioctl(ifp, cmd, data); 1423 } 1424 1425 return (error); 1426 } 1427