1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_clone.h> 74 #include <net/if_dl.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 #include <net/vnet.h> 78 79 #ifdef INET 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #endif 83 84 #define VLAN_DEF_HWIDTH 4 85 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 86 87 #define UP_AND_RUNNING(ifp) \ 88 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 89 90 CK_SLIST_HEAD(ifvlanhead, ifvlan); 91 92 struct ifvlantrunk { 93 struct ifnet *parent; /* parent interface of this trunk */ 94 struct mtx lock; 95 #ifdef VLAN_ARRAY 96 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 97 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 98 #else 99 struct ifvlanhead *hash; /* dynamic hash-list table */ 100 uint16_t hmask; 101 uint16_t hwidth; 102 #endif 103 int refcnt; 104 }; 105 106 /* 107 * This macro provides a facility to iterate over every vlan on a trunk with 108 * the assumption that none will be added/removed during iteration. 109 */ 110 #ifdef VLAN_ARRAY 111 #define VLAN_FOREACH(_ifv, _trunk) \ 112 size_t _i; \ 113 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 114 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 115 #else /* VLAN_ARRAY */ 116 #define VLAN_FOREACH(_ifv, _trunk) \ 117 struct ifvlan *_next; \ 118 size_t _i; \ 119 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 120 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 121 #endif /* VLAN_ARRAY */ 122 123 /* 124 * This macro provides a facility to iterate over every vlan on a trunk while 125 * also modifying the number of vlans on the trunk. The iteration continues 126 * until some condition is met or there are no more vlans on the trunk. 127 */ 128 #ifdef VLAN_ARRAY 129 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 130 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 131 size_t _i; \ 132 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 133 if (((_ifv) = (_trunk)->vlans[_i])) 134 #else /* VLAN_ARRAY */ 135 /* 136 * The hash table case is more complicated. We allow for the hash table to be 137 * modified (i.e. vlans removed) while we are iterating over it. To allow for 138 * this we must restart the iteration every time we "touch" something during 139 * the iteration, since removal will resize the hash table and invalidate our 140 * current position. If acting on the touched element causes the trunk to be 141 * emptied, then iteration also stops. 142 */ 143 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 144 size_t _i; \ 145 bool _touch = false; \ 146 for (_i = 0; \ 147 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 148 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 149 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 150 (_touch = true)) 151 #endif /* VLAN_ARRAY */ 152 153 struct vlan_mc_entry { 154 struct sockaddr_dl mc_addr; 155 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 156 struct epoch_context mc_epoch_ctx; 157 }; 158 159 struct ifvlan { 160 struct ifvlantrunk *ifv_trunk; 161 struct ifnet *ifv_ifp; 162 #define TRUNK(ifv) ((ifv)->ifv_trunk) 163 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 164 void *ifv_cookie; 165 int ifv_pflags; /* special flags we have set on parent */ 166 int ifv_capenable; 167 struct ifv_linkmib { 168 int ifvm_encaplen; /* encapsulation length */ 169 int ifvm_mtufudge; /* MTU fudged by this much */ 170 int ifvm_mintu; /* min transmission unit */ 171 uint16_t ifvm_proto; /* encapsulation ethertype */ 172 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 173 uint16_t ifvm_vid; /* VLAN ID */ 174 uint8_t ifvm_pcp; /* Priority Code Point (PCP). */ 175 } ifv_mib; 176 struct task lladdr_task; 177 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 178 #ifndef VLAN_ARRAY 179 CK_SLIST_ENTRY(ifvlan) ifv_list; 180 #endif 181 }; 182 #define ifv_proto ifv_mib.ifvm_proto 183 #define ifv_tag ifv_mib.ifvm_tag 184 #define ifv_vid ifv_mib.ifvm_vid 185 #define ifv_pcp ifv_mib.ifvm_pcp 186 #define ifv_encaplen ifv_mib.ifvm_encaplen 187 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 188 #define ifv_mintu ifv_mib.ifvm_mintu 189 190 /* Special flags we should propagate to parent. */ 191 static struct { 192 int flag; 193 int (*func)(struct ifnet *, int); 194 } vlan_pflags[] = { 195 {IFF_PROMISC, ifpromisc}, 196 {IFF_ALLMULTI, if_allmulti}, 197 {0, NULL} 198 }; 199 200 extern int vlan_mtag_pcp; 201 202 static const char vlanname[] = "vlan"; 203 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 204 205 static eventhandler_tag ifdetach_tag; 206 static eventhandler_tag iflladdr_tag; 207 208 /* 209 * if_vlan uses two module-level synchronizations primitives to allow concurrent 210 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 211 * while they are being used for tx/rx. To accomplish this in a way that has 212 * acceptable performance and cooperation with other parts of the network stack 213 * there is a non-sleepable epoch(9) and an sx(9). 214 * 215 * The performance-sensitive paths that warrant using the epoch(9) are 216 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 217 * existence using if_vlantrunk, and being in the network tx/rx paths the use 218 * of an epoch(9) gives a measureable improvement in performance. 219 * 220 * The reason for having an sx(9) is mostly because there are still areas that 221 * must be sleepable and also have safe concurrent access to a vlan interface. 222 * Since the sx(9) exists, it is used by default in most paths unless sleeping 223 * is not permitted, or if it is not clear whether sleeping is permitted. 224 * 225 */ 226 #define _VLAN_SX_ID ifv_sx 227 228 static struct sx _VLAN_SX_ID; 229 230 #define VLAN_LOCKING_INIT() \ 231 sx_init(&_VLAN_SX_ID, "vlan_sx") 232 233 #define VLAN_LOCKING_DESTROY() \ 234 sx_destroy(&_VLAN_SX_ID) 235 236 #define VLAN_RLOCK() NET_EPOCH_ENTER(); 237 #define VLAN_RUNLOCK() NET_EPOCH_EXIT(); 238 #define VLAN_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt)) 239 240 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 241 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 242 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 243 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 244 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 245 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 246 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 247 248 249 /* 250 * We also have a per-trunk mutex that should be acquired when changing 251 * its state. 252 */ 253 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 254 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 255 #define TRUNK_RLOCK(trunk) NET_EPOCH_ENTER() 256 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 257 #define TRUNK_RUNLOCK(trunk) NET_EPOCH_EXIT(); 258 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 259 #define TRUNK_RLOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt)) 260 #define TRUNK_LOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(trunk)->lock)) 261 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 262 263 /* 264 * The VLAN_ARRAY substitutes the dynamic hash with a static array 265 * with 4096 entries. In theory this can give a boost in processing, 266 * however in practice it does not. Probably this is because the array 267 * is too big to fit into CPU cache. 268 */ 269 #ifndef VLAN_ARRAY 270 static void vlan_inithash(struct ifvlantrunk *trunk); 271 static void vlan_freehash(struct ifvlantrunk *trunk); 272 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 273 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 274 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 275 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 276 uint16_t vid); 277 #endif 278 static void trunk_destroy(struct ifvlantrunk *trunk); 279 280 static void vlan_init(void *foo); 281 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 282 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 283 #ifdef RATELIMIT 284 static int vlan_snd_tag_alloc(struct ifnet *, 285 union if_snd_tag_alloc_params *, struct m_snd_tag **); 286 #endif 287 static void vlan_qflush(struct ifnet *ifp); 288 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 289 int (*func)(struct ifnet *, int)); 290 static int vlan_setflags(struct ifnet *ifp, int status); 291 static int vlan_setmulti(struct ifnet *ifp); 292 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 293 static void vlan_unconfig(struct ifnet *ifp); 294 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 295 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 296 static void vlan_link_state(struct ifnet *ifp); 297 static void vlan_capabilities(struct ifvlan *ifv); 298 static void vlan_trunk_capabilities(struct ifnet *ifp); 299 300 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 301 static int vlan_clone_match(struct if_clone *, const char *); 302 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 303 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 304 305 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 306 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 307 308 static void vlan_lladdr_fn(void *arg, int pending); 309 310 static struct if_clone *vlan_cloner; 311 312 #ifdef VIMAGE 313 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 314 #define V_vlan_cloner VNET(vlan_cloner) 315 #endif 316 317 #ifndef VLAN_ARRAY 318 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 319 320 static void 321 vlan_mc_free(struct epoch_context *ctx) 322 { 323 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 324 free(mc, M_VLAN); 325 } 326 327 static void 328 vlan_inithash(struct ifvlantrunk *trunk) 329 { 330 int i, n; 331 332 /* 333 * The trunk must not be locked here since we call malloc(M_WAITOK). 334 * It is OK in case this function is called before the trunk struct 335 * gets hooked up and becomes visible from other threads. 336 */ 337 338 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 339 ("%s: hash already initialized", __func__)); 340 341 trunk->hwidth = VLAN_DEF_HWIDTH; 342 n = 1 << trunk->hwidth; 343 trunk->hmask = n - 1; 344 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 345 for (i = 0; i < n; i++) 346 CK_SLIST_INIT(&trunk->hash[i]); 347 } 348 349 static void 350 vlan_freehash(struct ifvlantrunk *trunk) 351 { 352 #ifdef INVARIANTS 353 int i; 354 355 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 356 for (i = 0; i < (1 << trunk->hwidth); i++) 357 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 358 ("%s: hash table not empty", __func__)); 359 #endif 360 free(trunk->hash, M_VLAN); 361 trunk->hash = NULL; 362 trunk->hwidth = trunk->hmask = 0; 363 } 364 365 static int 366 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 367 { 368 int i, b; 369 struct ifvlan *ifv2; 370 371 VLAN_XLOCK_ASSERT(); 372 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 373 374 b = 1 << trunk->hwidth; 375 i = HASH(ifv->ifv_vid, trunk->hmask); 376 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 377 if (ifv->ifv_vid == ifv2->ifv_vid) 378 return (EEXIST); 379 380 /* 381 * Grow the hash when the number of vlans exceeds half of the number of 382 * hash buckets squared. This will make the average linked-list length 383 * buckets/2. 384 */ 385 if (trunk->refcnt > (b * b) / 2) { 386 vlan_growhash(trunk, 1); 387 i = HASH(ifv->ifv_vid, trunk->hmask); 388 } 389 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 390 trunk->refcnt++; 391 392 return (0); 393 } 394 395 static int 396 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 397 { 398 int i, b; 399 struct ifvlan *ifv2; 400 401 VLAN_XLOCK_ASSERT(); 402 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 403 404 b = 1 << trunk->hwidth; 405 i = HASH(ifv->ifv_vid, trunk->hmask); 406 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 407 if (ifv2 == ifv) { 408 trunk->refcnt--; 409 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 410 if (trunk->refcnt < (b * b) / 2) 411 vlan_growhash(trunk, -1); 412 return (0); 413 } 414 415 panic("%s: vlan not found\n", __func__); 416 return (ENOENT); /*NOTREACHED*/ 417 } 418 419 /* 420 * Grow the hash larger or smaller if memory permits. 421 */ 422 static void 423 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 424 { 425 struct ifvlan *ifv; 426 struct ifvlanhead *hash2; 427 int hwidth2, i, j, n, n2; 428 429 VLAN_XLOCK_ASSERT(); 430 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 431 432 if (howmuch == 0) { 433 /* Harmless yet obvious coding error */ 434 printf("%s: howmuch is 0\n", __func__); 435 return; 436 } 437 438 hwidth2 = trunk->hwidth + howmuch; 439 n = 1 << trunk->hwidth; 440 n2 = 1 << hwidth2; 441 /* Do not shrink the table below the default */ 442 if (hwidth2 < VLAN_DEF_HWIDTH) 443 return; 444 445 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 446 if (hash2 == NULL) { 447 printf("%s: out of memory -- hash size not changed\n", 448 __func__); 449 return; /* We can live with the old hash table */ 450 } 451 for (j = 0; j < n2; j++) 452 CK_SLIST_INIT(&hash2[j]); 453 for (i = 0; i < n; i++) 454 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 455 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 456 j = HASH(ifv->ifv_vid, n2 - 1); 457 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 458 } 459 NET_EPOCH_WAIT(); 460 free(trunk->hash, M_VLAN); 461 trunk->hash = hash2; 462 trunk->hwidth = hwidth2; 463 trunk->hmask = n2 - 1; 464 465 if (bootverbose) 466 if_printf(trunk->parent, 467 "VLAN hash table resized from %d to %d buckets\n", n, n2); 468 } 469 470 static __inline struct ifvlan * 471 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 472 { 473 struct ifvlan *ifv; 474 475 TRUNK_RLOCK_ASSERT(trunk); 476 477 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 478 if (ifv->ifv_vid == vid) 479 return (ifv); 480 return (NULL); 481 } 482 483 #if 0 484 /* Debugging code to view the hashtables. */ 485 static void 486 vlan_dumphash(struct ifvlantrunk *trunk) 487 { 488 int i; 489 struct ifvlan *ifv; 490 491 for (i = 0; i < (1 << trunk->hwidth); i++) { 492 printf("%d: ", i); 493 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 494 printf("%s ", ifv->ifv_ifp->if_xname); 495 printf("\n"); 496 } 497 } 498 #endif /* 0 */ 499 #else 500 501 static __inline struct ifvlan * 502 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 503 { 504 505 return trunk->vlans[vid]; 506 } 507 508 static __inline int 509 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 510 { 511 512 if (trunk->vlans[ifv->ifv_vid] != NULL) 513 return EEXIST; 514 trunk->vlans[ifv->ifv_vid] = ifv; 515 trunk->refcnt++; 516 517 return (0); 518 } 519 520 static __inline int 521 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 522 { 523 524 trunk->vlans[ifv->ifv_vid] = NULL; 525 trunk->refcnt--; 526 527 return (0); 528 } 529 530 static __inline void 531 vlan_freehash(struct ifvlantrunk *trunk) 532 { 533 } 534 535 static __inline void 536 vlan_inithash(struct ifvlantrunk *trunk) 537 { 538 } 539 540 #endif /* !VLAN_ARRAY */ 541 542 static void 543 trunk_destroy(struct ifvlantrunk *trunk) 544 { 545 VLAN_XLOCK_ASSERT(); 546 547 vlan_freehash(trunk); 548 trunk->parent->if_vlantrunk = NULL; 549 TRUNK_LOCK_DESTROY(trunk); 550 if_rele(trunk->parent); 551 free(trunk, M_VLAN); 552 } 553 554 /* 555 * Program our multicast filter. What we're actually doing is 556 * programming the multicast filter of the parent. This has the 557 * side effect of causing the parent interface to receive multicast 558 * traffic that it doesn't really want, which ends up being discarded 559 * later by the upper protocol layers. Unfortunately, there's no way 560 * to avoid this: there really is only one physical interface. 561 */ 562 static int 563 vlan_setmulti(struct ifnet *ifp) 564 { 565 struct ifnet *ifp_p; 566 struct ifmultiaddr *ifma; 567 struct ifvlan *sc; 568 struct vlan_mc_entry *mc; 569 int error; 570 571 VLAN_XLOCK_ASSERT(); 572 573 /* Find the parent. */ 574 sc = ifp->if_softc; 575 ifp_p = PARENT(sc); 576 577 CURVNET_SET_QUIET(ifp_p->if_vnet); 578 579 /* First, remove any existing filter entries. */ 580 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 581 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 582 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 583 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 584 } 585 586 /* Now program new ones. */ 587 IF_ADDR_WLOCK(ifp); 588 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 589 if (ifma->ifma_addr->sa_family != AF_LINK) 590 continue; 591 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 592 if (mc == NULL) { 593 IF_ADDR_WUNLOCK(ifp); 594 return (ENOMEM); 595 } 596 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 597 mc->mc_addr.sdl_index = ifp_p->if_index; 598 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 599 } 600 IF_ADDR_WUNLOCK(ifp); 601 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 602 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 603 NULL); 604 if (error) 605 return (error); 606 } 607 608 CURVNET_RESTORE(); 609 return (0); 610 } 611 612 /* 613 * A handler for parent interface link layer address changes. 614 * If the parent interface link layer address is changed we 615 * should also change it on all children vlans. 616 */ 617 static void 618 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 619 { 620 struct ifvlan *ifv; 621 struct ifnet *ifv_ifp; 622 struct ifvlantrunk *trunk; 623 struct sockaddr_dl *sdl; 624 625 /* Need the rmlock since this is run on taskqueue_swi. */ 626 VLAN_RLOCK(); 627 trunk = ifp->if_vlantrunk; 628 if (trunk == NULL) { 629 VLAN_RUNLOCK(); 630 return; 631 } 632 633 /* 634 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 635 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 636 * ioctl calls on the parent garbling the lladdr of the child vlan. 637 */ 638 TRUNK_WLOCK(trunk); 639 VLAN_FOREACH(ifv, trunk) { 640 /* 641 * Copy new new lladdr into the ifv_ifp, enqueue a task 642 * to actually call if_setlladdr. if_setlladdr needs to 643 * be deferred to a taskqueue because it will call into 644 * the if_vlan ioctl path and try to acquire the global 645 * lock. 646 */ 647 ifv_ifp = ifv->ifv_ifp; 648 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 649 ifp->if_addrlen); 650 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 651 sdl->sdl_alen = ifp->if_addrlen; 652 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 653 } 654 TRUNK_WUNLOCK(trunk); 655 VLAN_RUNLOCK(); 656 } 657 658 /* 659 * A handler for network interface departure events. 660 * Track departure of trunks here so that we don't access invalid 661 * pointers or whatever if a trunk is ripped from under us, e.g., 662 * by ejecting its hot-plug card. However, if an ifnet is simply 663 * being renamed, then there's no need to tear down the state. 664 */ 665 static void 666 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 667 { 668 struct ifvlan *ifv; 669 struct ifvlantrunk *trunk; 670 671 /* If the ifnet is just being renamed, don't do anything. */ 672 if (ifp->if_flags & IFF_RENAMING) 673 return; 674 VLAN_XLOCK(); 675 trunk = ifp->if_vlantrunk; 676 if (trunk == NULL) { 677 VLAN_XUNLOCK(); 678 return; 679 } 680 681 /* 682 * OK, it's a trunk. Loop over and detach all vlan's on it. 683 * Check trunk pointer after each vlan_unconfig() as it will 684 * free it and set to NULL after the last vlan was detached. 685 */ 686 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 687 ifp->if_vlantrunk == NULL) 688 vlan_unconfig_locked(ifv->ifv_ifp, 1); 689 690 /* Trunk should have been destroyed in vlan_unconfig(). */ 691 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 692 VLAN_XUNLOCK(); 693 } 694 695 /* 696 * Return the trunk device for a virtual interface. 697 */ 698 static struct ifnet * 699 vlan_trunkdev(struct ifnet *ifp) 700 { 701 struct ifvlan *ifv; 702 703 if (ifp->if_type != IFT_L2VLAN) 704 return (NULL); 705 706 VLAN_RLOCK(); 707 ifv = ifp->if_softc; 708 ifp = NULL; 709 if (ifv->ifv_trunk) 710 ifp = PARENT(ifv); 711 VLAN_RUNLOCK(); 712 return (ifp); 713 } 714 715 /* 716 * Return the 12-bit VLAN VID for this interface, for use by external 717 * components such as Infiniband. 718 * 719 * XXXRW: Note that the function name here is historical; it should be named 720 * vlan_vid(). 721 */ 722 static int 723 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 724 { 725 struct ifvlan *ifv; 726 727 if (ifp->if_type != IFT_L2VLAN) 728 return (EINVAL); 729 ifv = ifp->if_softc; 730 *vidp = ifv->ifv_vid; 731 return (0); 732 } 733 734 static int 735 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 736 { 737 struct ifvlan *ifv; 738 739 if (ifp->if_type != IFT_L2VLAN) 740 return (EINVAL); 741 ifv = ifp->if_softc; 742 *pcpp = ifv->ifv_pcp; 743 return (0); 744 } 745 746 /* 747 * Return a driver specific cookie for this interface. Synchronization 748 * with setcookie must be provided by the driver. 749 */ 750 static void * 751 vlan_cookie(struct ifnet *ifp) 752 { 753 struct ifvlan *ifv; 754 755 if (ifp->if_type != IFT_L2VLAN) 756 return (NULL); 757 ifv = ifp->if_softc; 758 return (ifv->ifv_cookie); 759 } 760 761 /* 762 * Store a cookie in our softc that drivers can use to store driver 763 * private per-instance data in. 764 */ 765 static int 766 vlan_setcookie(struct ifnet *ifp, void *cookie) 767 { 768 struct ifvlan *ifv; 769 770 if (ifp->if_type != IFT_L2VLAN) 771 return (EINVAL); 772 ifv = ifp->if_softc; 773 ifv->ifv_cookie = cookie; 774 return (0); 775 } 776 777 /* 778 * Return the vlan device present at the specific VID. 779 */ 780 static struct ifnet * 781 vlan_devat(struct ifnet *ifp, uint16_t vid) 782 { 783 struct ifvlantrunk *trunk; 784 struct ifvlan *ifv; 785 786 VLAN_RLOCK(); 787 trunk = ifp->if_vlantrunk; 788 if (trunk == NULL) { 789 VLAN_RUNLOCK(); 790 return (NULL); 791 } 792 ifp = NULL; 793 ifv = vlan_gethash(trunk, vid); 794 if (ifv) 795 ifp = ifv->ifv_ifp; 796 VLAN_RUNLOCK(); 797 return (ifp); 798 } 799 800 /* 801 * Recalculate the cached VLAN tag exposed via the MIB. 802 */ 803 static void 804 vlan_tag_recalculate(struct ifvlan *ifv) 805 { 806 807 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 808 } 809 810 /* 811 * VLAN support can be loaded as a module. The only place in the 812 * system that's intimately aware of this is ether_input. We hook 813 * into this code through vlan_input_p which is defined there and 814 * set here. No one else in the system should be aware of this so 815 * we use an explicit reference here. 816 */ 817 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 818 819 /* For if_link_state_change() eyes only... */ 820 extern void (*vlan_link_state_p)(struct ifnet *); 821 822 static int 823 vlan_modevent(module_t mod, int type, void *data) 824 { 825 826 switch (type) { 827 case MOD_LOAD: 828 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 829 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 830 if (ifdetach_tag == NULL) 831 return (ENOMEM); 832 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 833 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 834 if (iflladdr_tag == NULL) 835 return (ENOMEM); 836 VLAN_LOCKING_INIT(); 837 vlan_input_p = vlan_input; 838 vlan_link_state_p = vlan_link_state; 839 vlan_trunk_cap_p = vlan_trunk_capabilities; 840 vlan_trunkdev_p = vlan_trunkdev; 841 vlan_cookie_p = vlan_cookie; 842 vlan_setcookie_p = vlan_setcookie; 843 vlan_tag_p = vlan_tag; 844 vlan_pcp_p = vlan_pcp; 845 vlan_devat_p = vlan_devat; 846 #ifndef VIMAGE 847 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 848 vlan_clone_create, vlan_clone_destroy); 849 #endif 850 if (bootverbose) 851 printf("vlan: initialized, using " 852 #ifdef VLAN_ARRAY 853 "full-size arrays" 854 #else 855 "hash tables with chaining" 856 #endif 857 858 "\n"); 859 break; 860 case MOD_UNLOAD: 861 #ifndef VIMAGE 862 if_clone_detach(vlan_cloner); 863 #endif 864 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 865 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 866 vlan_input_p = NULL; 867 vlan_link_state_p = NULL; 868 vlan_trunk_cap_p = NULL; 869 vlan_trunkdev_p = NULL; 870 vlan_tag_p = NULL; 871 vlan_cookie_p = NULL; 872 vlan_setcookie_p = NULL; 873 vlan_devat_p = NULL; 874 VLAN_LOCKING_DESTROY(); 875 if (bootverbose) 876 printf("vlan: unloaded\n"); 877 break; 878 default: 879 return (EOPNOTSUPP); 880 } 881 return (0); 882 } 883 884 static moduledata_t vlan_mod = { 885 "if_vlan", 886 vlan_modevent, 887 0 888 }; 889 890 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 891 MODULE_VERSION(if_vlan, 3); 892 893 #ifdef VIMAGE 894 static void 895 vnet_vlan_init(const void *unused __unused) 896 { 897 898 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 899 vlan_clone_create, vlan_clone_destroy); 900 V_vlan_cloner = vlan_cloner; 901 } 902 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 903 vnet_vlan_init, NULL); 904 905 static void 906 vnet_vlan_uninit(const void *unused __unused) 907 { 908 909 if_clone_detach(V_vlan_cloner); 910 } 911 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 912 vnet_vlan_uninit, NULL); 913 #endif 914 915 /* 916 * Check for <etherif>.<vlan> style interface names. 917 */ 918 static struct ifnet * 919 vlan_clone_match_ethervid(const char *name, int *vidp) 920 { 921 char ifname[IFNAMSIZ]; 922 char *cp; 923 struct ifnet *ifp; 924 int vid; 925 926 strlcpy(ifname, name, IFNAMSIZ); 927 if ((cp = strchr(ifname, '.')) == NULL) 928 return (NULL); 929 *cp = '\0'; 930 if ((ifp = ifunit_ref(ifname)) == NULL) 931 return (NULL); 932 /* Parse VID. */ 933 if (*++cp == '\0') { 934 if_rele(ifp); 935 return (NULL); 936 } 937 vid = 0; 938 for(; *cp >= '0' && *cp <= '9'; cp++) 939 vid = (vid * 10) + (*cp - '0'); 940 if (*cp != '\0') { 941 if_rele(ifp); 942 return (NULL); 943 } 944 if (vidp != NULL) 945 *vidp = vid; 946 947 return (ifp); 948 } 949 950 static int 951 vlan_clone_match(struct if_clone *ifc, const char *name) 952 { 953 const char *cp; 954 955 if (vlan_clone_match_ethervid(name, NULL) != NULL) 956 return (1); 957 958 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 959 return (0); 960 for (cp = name + 4; *cp != '\0'; cp++) { 961 if (*cp < '0' || *cp > '9') 962 return (0); 963 } 964 965 return (1); 966 } 967 968 static int 969 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 970 { 971 char *dp; 972 int wildcard; 973 int unit; 974 int error; 975 int vid; 976 struct ifvlan *ifv; 977 struct ifnet *ifp; 978 struct ifnet *p; 979 struct ifaddr *ifa; 980 struct sockaddr_dl *sdl; 981 struct vlanreq vlr; 982 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 983 984 /* 985 * There are 3 (ugh) ways to specify the cloned device: 986 * o pass a parameter block with the clone request. 987 * o specify parameters in the text of the clone device name 988 * o specify no parameters and get an unattached device that 989 * must be configured separately. 990 * The first technique is preferred; the latter two are 991 * supported for backwards compatibility. 992 * 993 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 994 * called for. 995 */ 996 if (params) { 997 error = copyin(params, &vlr, sizeof(vlr)); 998 if (error) 999 return error; 1000 p = ifunit_ref(vlr.vlr_parent); 1001 if (p == NULL) 1002 return (ENXIO); 1003 error = ifc_name2unit(name, &unit); 1004 if (error != 0) { 1005 if_rele(p); 1006 return (error); 1007 } 1008 vid = vlr.vlr_tag; 1009 wildcard = (unit < 0); 1010 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1011 unit = -1; 1012 wildcard = 0; 1013 } else { 1014 p = NULL; 1015 error = ifc_name2unit(name, &unit); 1016 if (error != 0) 1017 return (error); 1018 1019 wildcard = (unit < 0); 1020 } 1021 1022 error = ifc_alloc_unit(ifc, &unit); 1023 if (error != 0) { 1024 if (p != NULL) 1025 if_rele(p); 1026 return (error); 1027 } 1028 1029 /* In the wildcard case, we need to update the name. */ 1030 if (wildcard) { 1031 for (dp = name; *dp != '\0'; dp++); 1032 if (snprintf(dp, len - (dp-name), "%d", unit) > 1033 len - (dp-name) - 1) { 1034 panic("%s: interface name too long", __func__); 1035 } 1036 } 1037 1038 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1039 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1040 if (ifp == NULL) { 1041 ifc_free_unit(ifc, unit); 1042 free(ifv, M_VLAN); 1043 if (p != NULL) 1044 if_rele(p); 1045 return (ENOSPC); 1046 } 1047 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1048 ifp->if_softc = ifv; 1049 /* 1050 * Set the name manually rather than using if_initname because 1051 * we don't conform to the default naming convention for interfaces. 1052 */ 1053 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1054 ifp->if_dname = vlanname; 1055 ifp->if_dunit = unit; 1056 /* NB: flags are not set here */ 1057 ifp->if_linkmib = &ifv->ifv_mib; 1058 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 1059 /* NB: mtu is not set here */ 1060 1061 ifp->if_init = vlan_init; 1062 ifp->if_transmit = vlan_transmit; 1063 ifp->if_qflush = vlan_qflush; 1064 ifp->if_ioctl = vlan_ioctl; 1065 #ifdef RATELIMIT 1066 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1067 #endif 1068 ifp->if_flags = VLAN_IFFLAGS; 1069 ether_ifattach(ifp, eaddr); 1070 /* Now undo some of the damage... */ 1071 ifp->if_baudrate = 0; 1072 ifp->if_type = IFT_L2VLAN; 1073 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1074 ifa = ifp->if_addr; 1075 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1076 sdl->sdl_type = IFT_L2VLAN; 1077 1078 if (p != NULL) { 1079 error = vlan_config(ifv, p, vid); 1080 if_rele(p); 1081 if (error != 0) { 1082 /* 1083 * Since we've partially failed, we need to back 1084 * out all the way, otherwise userland could get 1085 * confused. Thus, we destroy the interface. 1086 */ 1087 ether_ifdetach(ifp); 1088 vlan_unconfig(ifp); 1089 if_free(ifp); 1090 ifc_free_unit(ifc, unit); 1091 free(ifv, M_VLAN); 1092 1093 return (error); 1094 } 1095 } 1096 1097 return (0); 1098 } 1099 1100 static int 1101 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1102 { 1103 struct ifvlan *ifv = ifp->if_softc; 1104 int unit = ifp->if_dunit; 1105 1106 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1107 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1108 /* 1109 * We should have the only reference to the ifv now, so we can now 1110 * drain any remaining lladdr task before freeing the ifnet and the 1111 * ifvlan. 1112 */ 1113 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1114 NET_EPOCH_WAIT(); 1115 if_free(ifp); 1116 free(ifv, M_VLAN); 1117 ifc_free_unit(ifc, unit); 1118 1119 return (0); 1120 } 1121 1122 /* 1123 * The ifp->if_init entry point for vlan(4) is a no-op. 1124 */ 1125 static void 1126 vlan_init(void *foo __unused) 1127 { 1128 } 1129 1130 /* 1131 * The if_transmit method for vlan(4) interface. 1132 */ 1133 static int 1134 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1135 { 1136 struct ifvlan *ifv; 1137 struct ifnet *p; 1138 int error, len, mcast; 1139 1140 VLAN_RLOCK(); 1141 ifv = ifp->if_softc; 1142 if (TRUNK(ifv) == NULL) { 1143 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1144 VLAN_RUNLOCK(); 1145 m_freem(m); 1146 return (ENETDOWN); 1147 } 1148 p = PARENT(ifv); 1149 len = m->m_pkthdr.len; 1150 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1151 1152 BPF_MTAP(ifp, m); 1153 1154 /* 1155 * Do not run parent's if_transmit() if the parent is not up, 1156 * or parent's driver will cause a system crash. 1157 */ 1158 if (!UP_AND_RUNNING(p)) { 1159 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1160 VLAN_RUNLOCK(); 1161 m_freem(m); 1162 return (ENETDOWN); 1163 } 1164 1165 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1166 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1167 VLAN_RUNLOCK(); 1168 return (0); 1169 } 1170 1171 /* 1172 * Send it, precisely as ether_output() would have. 1173 */ 1174 error = (p->if_transmit)(p, m); 1175 if (error == 0) { 1176 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1177 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1178 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1179 } else 1180 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1181 VLAN_RUNLOCK(); 1182 return (error); 1183 } 1184 1185 /* 1186 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1187 */ 1188 static void 1189 vlan_qflush(struct ifnet *ifp __unused) 1190 { 1191 } 1192 1193 static void 1194 vlan_input(struct ifnet *ifp, struct mbuf *m) 1195 { 1196 struct ifvlantrunk *trunk; 1197 struct ifvlan *ifv; 1198 struct m_tag *mtag; 1199 uint16_t vid, tag; 1200 1201 VLAN_RLOCK(); 1202 trunk = ifp->if_vlantrunk; 1203 if (trunk == NULL) { 1204 VLAN_RUNLOCK(); 1205 m_freem(m); 1206 return; 1207 } 1208 1209 if (m->m_flags & M_VLANTAG) { 1210 /* 1211 * Packet is tagged, but m contains a normal 1212 * Ethernet frame; the tag is stored out-of-band. 1213 */ 1214 tag = m->m_pkthdr.ether_vtag; 1215 m->m_flags &= ~M_VLANTAG; 1216 } else { 1217 struct ether_vlan_header *evl; 1218 1219 /* 1220 * Packet is tagged in-band as specified by 802.1q. 1221 */ 1222 switch (ifp->if_type) { 1223 case IFT_ETHER: 1224 if (m->m_len < sizeof(*evl) && 1225 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1226 if_printf(ifp, "cannot pullup VLAN header\n"); 1227 VLAN_RUNLOCK(); 1228 return; 1229 } 1230 evl = mtod(m, struct ether_vlan_header *); 1231 tag = ntohs(evl->evl_tag); 1232 1233 /* 1234 * Remove the 802.1q header by copying the Ethernet 1235 * addresses over it and adjusting the beginning of 1236 * the data in the mbuf. The encapsulated Ethernet 1237 * type field is already in place. 1238 */ 1239 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1240 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1241 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1242 break; 1243 1244 default: 1245 #ifdef INVARIANTS 1246 panic("%s: %s has unsupported if_type %u", 1247 __func__, ifp->if_xname, ifp->if_type); 1248 #endif 1249 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1250 VLAN_RUNLOCK(); 1251 m_freem(m); 1252 return; 1253 } 1254 } 1255 1256 vid = EVL_VLANOFTAG(tag); 1257 1258 ifv = vlan_gethash(trunk, vid); 1259 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1260 VLAN_RUNLOCK(); 1261 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1262 m_freem(m); 1263 return; 1264 } 1265 1266 if (vlan_mtag_pcp) { 1267 /* 1268 * While uncommon, it is possible that we will find a 802.1q 1269 * packet encapsulated inside another packet that also had an 1270 * 802.1q header. For example, ethernet tunneled over IPSEC 1271 * arriving over ethernet. In that case, we replace the 1272 * existing 802.1q PCP m_tag value. 1273 */ 1274 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1275 if (mtag == NULL) { 1276 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1277 sizeof(uint8_t), M_NOWAIT); 1278 if (mtag == NULL) { 1279 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1280 VLAN_RUNLOCK(); 1281 m_freem(m); 1282 return; 1283 } 1284 m_tag_prepend(m, mtag); 1285 } 1286 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1287 } 1288 1289 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1290 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1291 VLAN_RUNLOCK(); 1292 1293 /* Pass it back through the parent's input routine. */ 1294 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1295 } 1296 1297 static void 1298 vlan_lladdr_fn(void *arg, int pending __unused) 1299 { 1300 struct ifvlan *ifv; 1301 struct ifnet *ifp; 1302 1303 ifv = (struct ifvlan *)arg; 1304 ifp = ifv->ifv_ifp; 1305 /* The ifv_ifp already has the lladdr copied in. */ 1306 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1307 } 1308 1309 static int 1310 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1311 { 1312 struct ifvlantrunk *trunk; 1313 struct ifnet *ifp; 1314 int error = 0; 1315 1316 /* 1317 * We can handle non-ethernet hardware types as long as 1318 * they handle the tagging and headers themselves. 1319 */ 1320 if (p->if_type != IFT_ETHER && 1321 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1322 return (EPROTONOSUPPORT); 1323 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1324 return (EPROTONOSUPPORT); 1325 /* 1326 * Don't let the caller set up a VLAN VID with 1327 * anything except VLID bits. 1328 * VID numbers 0x0 and 0xFFF are reserved. 1329 */ 1330 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1331 return (EINVAL); 1332 if (ifv->ifv_trunk) 1333 return (EBUSY); 1334 1335 VLAN_XLOCK(); 1336 if (p->if_vlantrunk == NULL) { 1337 trunk = malloc(sizeof(struct ifvlantrunk), 1338 M_VLAN, M_WAITOK | M_ZERO); 1339 vlan_inithash(trunk); 1340 TRUNK_LOCK_INIT(trunk); 1341 TRUNK_WLOCK(trunk); 1342 p->if_vlantrunk = trunk; 1343 trunk->parent = p; 1344 if_ref(trunk->parent); 1345 TRUNK_WUNLOCK(trunk); 1346 } else { 1347 trunk = p->if_vlantrunk; 1348 } 1349 1350 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1351 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1352 vlan_tag_recalculate(ifv); 1353 error = vlan_inshash(trunk, ifv); 1354 if (error) 1355 goto done; 1356 ifv->ifv_proto = ETHERTYPE_VLAN; 1357 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1358 ifv->ifv_mintu = ETHERMIN; 1359 ifv->ifv_pflags = 0; 1360 ifv->ifv_capenable = -1; 1361 1362 /* 1363 * If the parent supports the VLAN_MTU capability, 1364 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1365 * use it. 1366 */ 1367 if (p->if_capenable & IFCAP_VLAN_MTU) { 1368 /* 1369 * No need to fudge the MTU since the parent can 1370 * handle extended frames. 1371 */ 1372 ifv->ifv_mtufudge = 0; 1373 } else { 1374 /* 1375 * Fudge the MTU by the encapsulation size. This 1376 * makes us incompatible with strictly compliant 1377 * 802.1Q implementations, but allows us to use 1378 * the feature with other NetBSD implementations, 1379 * which might still be useful. 1380 */ 1381 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1382 } 1383 1384 ifv->ifv_trunk = trunk; 1385 ifp = ifv->ifv_ifp; 1386 /* 1387 * Initialize fields from our parent. This duplicates some 1388 * work with ether_ifattach() but allows for non-ethernet 1389 * interfaces to also work. 1390 */ 1391 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1392 ifp->if_baudrate = p->if_baudrate; 1393 ifp->if_output = p->if_output; 1394 ifp->if_input = p->if_input; 1395 ifp->if_resolvemulti = p->if_resolvemulti; 1396 ifp->if_addrlen = p->if_addrlen; 1397 ifp->if_broadcastaddr = p->if_broadcastaddr; 1398 ifp->if_pcp = ifv->ifv_pcp; 1399 1400 /* 1401 * Copy only a selected subset of flags from the parent. 1402 * Other flags are none of our business. 1403 */ 1404 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1405 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1406 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1407 #undef VLAN_COPY_FLAGS 1408 1409 ifp->if_link_state = p->if_link_state; 1410 1411 TRUNK_RLOCK(TRUNK(ifv)); 1412 vlan_capabilities(ifv); 1413 TRUNK_RUNLOCK(TRUNK(ifv)); 1414 1415 /* 1416 * Set up our interface address to reflect the underlying 1417 * physical interface's. 1418 */ 1419 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1420 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1421 p->if_addrlen; 1422 1423 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1424 1425 /* We are ready for operation now. */ 1426 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1427 1428 /* Update flags on the parent, if necessary. */ 1429 vlan_setflags(ifp, 1); 1430 1431 /* 1432 * Configure multicast addresses that may already be 1433 * joined on the vlan device. 1434 */ 1435 (void)vlan_setmulti(ifp); 1436 1437 done: 1438 if (error == 0) 1439 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1440 VLAN_XUNLOCK(); 1441 1442 return (error); 1443 } 1444 1445 static void 1446 vlan_unconfig(struct ifnet *ifp) 1447 { 1448 1449 VLAN_XLOCK(); 1450 vlan_unconfig_locked(ifp, 0); 1451 VLAN_XUNLOCK(); 1452 } 1453 1454 static void 1455 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1456 { 1457 struct ifvlantrunk *trunk; 1458 struct vlan_mc_entry *mc; 1459 struct ifvlan *ifv; 1460 struct ifnet *parent; 1461 int error; 1462 1463 VLAN_XLOCK_ASSERT(); 1464 1465 ifv = ifp->if_softc; 1466 trunk = ifv->ifv_trunk; 1467 parent = NULL; 1468 1469 if (trunk != NULL) { 1470 parent = trunk->parent; 1471 1472 /* 1473 * Since the interface is being unconfigured, we need to 1474 * empty the list of multicast groups that we may have joined 1475 * while we were alive from the parent's list. 1476 */ 1477 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1478 /* 1479 * If the parent interface is being detached, 1480 * all its multicast addresses have already 1481 * been removed. Warn about errors if 1482 * if_delmulti() does fail, but don't abort as 1483 * all callers expect vlan destruction to 1484 * succeed. 1485 */ 1486 if (!departing) { 1487 error = if_delmulti(parent, 1488 (struct sockaddr *)&mc->mc_addr); 1489 if (error) 1490 if_printf(ifp, 1491 "Failed to delete multicast address from parent: %d\n", 1492 error); 1493 } 1494 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1495 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 1496 } 1497 1498 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1499 1500 vlan_remhash(trunk, ifv); 1501 ifv->ifv_trunk = NULL; 1502 1503 /* 1504 * Check if we were the last. 1505 */ 1506 if (trunk->refcnt == 0) { 1507 parent->if_vlantrunk = NULL; 1508 NET_EPOCH_WAIT(); 1509 trunk_destroy(trunk); 1510 } 1511 } 1512 1513 /* Disconnect from parent. */ 1514 if (ifv->ifv_pflags) 1515 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1516 ifp->if_mtu = ETHERMTU; 1517 ifp->if_link_state = LINK_STATE_UNKNOWN; 1518 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1519 1520 /* 1521 * Only dispatch an event if vlan was 1522 * attached, otherwise there is nothing 1523 * to cleanup anyway. 1524 */ 1525 if (parent != NULL) 1526 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1527 } 1528 1529 /* Handle a reference counted flag that should be set on the parent as well */ 1530 static int 1531 vlan_setflag(struct ifnet *ifp, int flag, int status, 1532 int (*func)(struct ifnet *, int)) 1533 { 1534 struct ifvlan *ifv; 1535 int error; 1536 1537 VLAN_SXLOCK_ASSERT(); 1538 1539 ifv = ifp->if_softc; 1540 status = status ? (ifp->if_flags & flag) : 0; 1541 /* Now "status" contains the flag value or 0 */ 1542 1543 /* 1544 * See if recorded parent's status is different from what 1545 * we want it to be. If it is, flip it. We record parent's 1546 * status in ifv_pflags so that we won't clear parent's flag 1547 * we haven't set. In fact, we don't clear or set parent's 1548 * flags directly, but get or release references to them. 1549 * That's why we can be sure that recorded flags still are 1550 * in accord with actual parent's flags. 1551 */ 1552 if (status != (ifv->ifv_pflags & flag)) { 1553 error = (*func)(PARENT(ifv), status); 1554 if (error) 1555 return (error); 1556 ifv->ifv_pflags &= ~flag; 1557 ifv->ifv_pflags |= status; 1558 } 1559 return (0); 1560 } 1561 1562 /* 1563 * Handle IFF_* flags that require certain changes on the parent: 1564 * if "status" is true, update parent's flags respective to our if_flags; 1565 * if "status" is false, forcedly clear the flags set on parent. 1566 */ 1567 static int 1568 vlan_setflags(struct ifnet *ifp, int status) 1569 { 1570 int error, i; 1571 1572 for (i = 0; vlan_pflags[i].flag; i++) { 1573 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1574 status, vlan_pflags[i].func); 1575 if (error) 1576 return (error); 1577 } 1578 return (0); 1579 } 1580 1581 /* Inform all vlans that their parent has changed link state */ 1582 static void 1583 vlan_link_state(struct ifnet *ifp) 1584 { 1585 struct ifvlantrunk *trunk; 1586 struct ifvlan *ifv; 1587 1588 /* Called from a taskqueue_swi task, so we cannot sleep. */ 1589 VLAN_RLOCK(); 1590 trunk = ifp->if_vlantrunk; 1591 if (trunk == NULL) { 1592 VLAN_RUNLOCK(); 1593 return; 1594 } 1595 1596 TRUNK_WLOCK(trunk); 1597 VLAN_FOREACH(ifv, trunk) { 1598 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1599 if_link_state_change(ifv->ifv_ifp, 1600 trunk->parent->if_link_state); 1601 } 1602 TRUNK_WUNLOCK(trunk); 1603 VLAN_RUNLOCK(); 1604 } 1605 1606 static void 1607 vlan_capabilities(struct ifvlan *ifv) 1608 { 1609 struct ifnet *p; 1610 struct ifnet *ifp; 1611 struct ifnet_hw_tsomax hw_tsomax; 1612 int cap = 0, ena = 0, mena; 1613 u_long hwa = 0; 1614 1615 VLAN_SXLOCK_ASSERT(); 1616 TRUNK_RLOCK_ASSERT(TRUNK(ifv)); 1617 p = PARENT(ifv); 1618 ifp = ifv->ifv_ifp; 1619 1620 /* Mask parent interface enabled capabilities disabled by user. */ 1621 mena = p->if_capenable & ifv->ifv_capenable; 1622 1623 /* 1624 * If the parent interface can do checksum offloading 1625 * on VLANs, then propagate its hardware-assisted 1626 * checksumming flags. Also assert that checksum 1627 * offloading requires hardware VLAN tagging. 1628 */ 1629 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1630 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1631 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1632 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1633 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1634 if (ena & IFCAP_TXCSUM) 1635 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1636 CSUM_UDP | CSUM_SCTP); 1637 if (ena & IFCAP_TXCSUM_IPV6) 1638 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1639 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1640 } 1641 1642 /* 1643 * If the parent interface can do TSO on VLANs then 1644 * propagate the hardware-assisted flag. TSO on VLANs 1645 * does not necessarily require hardware VLAN tagging. 1646 */ 1647 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1648 if_hw_tsomax_common(p, &hw_tsomax); 1649 if_hw_tsomax_update(ifp, &hw_tsomax); 1650 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1651 cap |= p->if_capabilities & IFCAP_TSO; 1652 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1653 ena |= mena & IFCAP_TSO; 1654 if (ena & IFCAP_TSO) 1655 hwa |= p->if_hwassist & CSUM_TSO; 1656 } 1657 1658 /* 1659 * If the parent interface can do LRO and checksum offloading on 1660 * VLANs, then guess it may do LRO on VLANs. False positive here 1661 * cost nothing, while false negative may lead to some confusions. 1662 */ 1663 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1664 cap |= p->if_capabilities & IFCAP_LRO; 1665 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1666 ena |= p->if_capenable & IFCAP_LRO; 1667 1668 /* 1669 * If the parent interface can offload TCP connections over VLANs then 1670 * propagate its TOE capability to the VLAN interface. 1671 * 1672 * All TOE drivers in the tree today can deal with VLANs. If this 1673 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1674 * with its own bit. 1675 */ 1676 #define IFCAP_VLAN_TOE IFCAP_TOE 1677 if (p->if_capabilities & IFCAP_VLAN_TOE) 1678 cap |= p->if_capabilities & IFCAP_TOE; 1679 if (p->if_capenable & IFCAP_VLAN_TOE) { 1680 TOEDEV(ifp) = TOEDEV(p); 1681 ena |= mena & IFCAP_TOE; 1682 } 1683 1684 /* 1685 * If the parent interface supports dynamic link state, so does the 1686 * VLAN interface. 1687 */ 1688 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1689 ena |= (mena & IFCAP_LINKSTATE); 1690 1691 #ifdef RATELIMIT 1692 /* 1693 * If the parent interface supports ratelimiting, so does the 1694 * VLAN interface. 1695 */ 1696 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1697 ena |= (mena & IFCAP_TXRTLMT); 1698 #endif 1699 1700 ifp->if_capabilities = cap; 1701 ifp->if_capenable = ena; 1702 ifp->if_hwassist = hwa; 1703 } 1704 1705 static void 1706 vlan_trunk_capabilities(struct ifnet *ifp) 1707 { 1708 struct ifvlantrunk *trunk; 1709 struct ifvlan *ifv; 1710 1711 VLAN_SLOCK(); 1712 trunk = ifp->if_vlantrunk; 1713 if (trunk == NULL) { 1714 VLAN_SUNLOCK(); 1715 return; 1716 } 1717 TRUNK_RLOCK(trunk); 1718 VLAN_FOREACH(ifv, trunk) { 1719 vlan_capabilities(ifv); 1720 } 1721 TRUNK_RUNLOCK(trunk); 1722 VLAN_SUNLOCK(); 1723 } 1724 1725 static int 1726 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1727 { 1728 struct ifnet *p; 1729 struct ifreq *ifr; 1730 struct ifaddr *ifa; 1731 struct ifvlan *ifv; 1732 struct ifvlantrunk *trunk; 1733 struct vlanreq vlr; 1734 int error = 0; 1735 1736 ifr = (struct ifreq *)data; 1737 ifa = (struct ifaddr *) data; 1738 ifv = ifp->if_softc; 1739 1740 switch (cmd) { 1741 case SIOCSIFADDR: 1742 ifp->if_flags |= IFF_UP; 1743 #ifdef INET 1744 if (ifa->ifa_addr->sa_family == AF_INET) 1745 arp_ifinit(ifp, ifa); 1746 #endif 1747 break; 1748 case SIOCGIFADDR: 1749 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1750 ifp->if_addrlen); 1751 break; 1752 case SIOCGIFMEDIA: 1753 VLAN_SLOCK(); 1754 if (TRUNK(ifv) != NULL) { 1755 p = PARENT(ifv); 1756 if_ref(p); 1757 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1758 if_rele(p); 1759 /* Limit the result to the parent's current config. */ 1760 if (error == 0) { 1761 struct ifmediareq *ifmr; 1762 1763 ifmr = (struct ifmediareq *)data; 1764 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1765 ifmr->ifm_count = 1; 1766 error = copyout(&ifmr->ifm_current, 1767 ifmr->ifm_ulist, 1768 sizeof(int)); 1769 } 1770 } 1771 } else { 1772 error = EINVAL; 1773 } 1774 VLAN_SUNLOCK(); 1775 break; 1776 1777 case SIOCSIFMEDIA: 1778 error = EINVAL; 1779 break; 1780 1781 case SIOCSIFMTU: 1782 /* 1783 * Set the interface MTU. 1784 */ 1785 VLAN_SLOCK(); 1786 trunk = TRUNK(ifv); 1787 if (trunk != NULL) { 1788 TRUNK_WLOCK(trunk); 1789 if (ifr->ifr_mtu > 1790 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1791 ifr->ifr_mtu < 1792 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1793 error = EINVAL; 1794 else 1795 ifp->if_mtu = ifr->ifr_mtu; 1796 TRUNK_WUNLOCK(trunk); 1797 } else 1798 error = EINVAL; 1799 VLAN_SUNLOCK(); 1800 break; 1801 1802 case SIOCSETVLAN: 1803 #ifdef VIMAGE 1804 /* 1805 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1806 * interface to be delegated to a jail without allowing the 1807 * jail to change what underlying interface/VID it is 1808 * associated with. We are not entirely convinced that this 1809 * is the right way to accomplish that policy goal. 1810 */ 1811 if (ifp->if_vnet != ifp->if_home_vnet) { 1812 error = EPERM; 1813 break; 1814 } 1815 #endif 1816 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1817 if (error) 1818 break; 1819 if (vlr.vlr_parent[0] == '\0') { 1820 vlan_unconfig(ifp); 1821 break; 1822 } 1823 p = ifunit_ref(vlr.vlr_parent); 1824 if (p == NULL) { 1825 error = ENOENT; 1826 break; 1827 } 1828 error = vlan_config(ifv, p, vlr.vlr_tag); 1829 if_rele(p); 1830 break; 1831 1832 case SIOCGETVLAN: 1833 #ifdef VIMAGE 1834 if (ifp->if_vnet != ifp->if_home_vnet) { 1835 error = EPERM; 1836 break; 1837 } 1838 #endif 1839 bzero(&vlr, sizeof(vlr)); 1840 VLAN_SLOCK(); 1841 if (TRUNK(ifv) != NULL) { 1842 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1843 sizeof(vlr.vlr_parent)); 1844 vlr.vlr_tag = ifv->ifv_vid; 1845 } 1846 VLAN_SUNLOCK(); 1847 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1848 break; 1849 1850 case SIOCSIFFLAGS: 1851 /* 1852 * We should propagate selected flags to the parent, 1853 * e.g., promiscuous mode. 1854 */ 1855 VLAN_XLOCK(); 1856 if (TRUNK(ifv) != NULL) 1857 error = vlan_setflags(ifp, 1); 1858 VLAN_XUNLOCK(); 1859 break; 1860 1861 case SIOCADDMULTI: 1862 case SIOCDELMULTI: 1863 /* 1864 * If we don't have a parent, just remember the membership for 1865 * when we do. 1866 * 1867 * XXX We need the rmlock here to avoid sleeping while 1868 * holding in6_multi_mtx. 1869 */ 1870 VLAN_XLOCK(); 1871 trunk = TRUNK(ifv); 1872 if (trunk != NULL) 1873 error = vlan_setmulti(ifp); 1874 VLAN_XUNLOCK(); 1875 1876 break; 1877 case SIOCGVLANPCP: 1878 #ifdef VIMAGE 1879 if (ifp->if_vnet != ifp->if_home_vnet) { 1880 error = EPERM; 1881 break; 1882 } 1883 #endif 1884 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1885 break; 1886 1887 case SIOCSVLANPCP: 1888 #ifdef VIMAGE 1889 if (ifp->if_vnet != ifp->if_home_vnet) { 1890 error = EPERM; 1891 break; 1892 } 1893 #endif 1894 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1895 if (error) 1896 break; 1897 if (ifr->ifr_vlan_pcp > 7) { 1898 error = EINVAL; 1899 break; 1900 } 1901 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 1902 ifp->if_pcp = ifv->ifv_pcp; 1903 vlan_tag_recalculate(ifv); 1904 /* broadcast event about PCP change */ 1905 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 1906 break; 1907 1908 case SIOCSIFCAP: 1909 VLAN_SLOCK(); 1910 ifv->ifv_capenable = ifr->ifr_reqcap; 1911 trunk = TRUNK(ifv); 1912 if (trunk != NULL) { 1913 TRUNK_RLOCK(trunk); 1914 vlan_capabilities(ifv); 1915 TRUNK_RUNLOCK(trunk); 1916 } 1917 VLAN_SUNLOCK(); 1918 break; 1919 1920 default: 1921 error = EINVAL; 1922 break; 1923 } 1924 1925 return (error); 1926 } 1927 1928 #ifdef RATELIMIT 1929 static int 1930 vlan_snd_tag_alloc(struct ifnet *ifp, 1931 union if_snd_tag_alloc_params *params, 1932 struct m_snd_tag **ppmt) 1933 { 1934 1935 /* get trunk device */ 1936 ifp = vlan_trunkdev(ifp); 1937 if (ifp == NULL || (ifp->if_capenable & IFCAP_TXRTLMT) == 0) 1938 return (EOPNOTSUPP); 1939 /* forward allocation request */ 1940 return (ifp->if_snd_tag_alloc(ifp, params, ppmt)); 1941 } 1942 #endif 1943