1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_clone.h> 74 #include <net/if_dl.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 #include <net/vnet.h> 78 79 #ifdef INET 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #endif 83 84 #define VLAN_DEF_HWIDTH 4 85 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 86 87 #define UP_AND_RUNNING(ifp) \ 88 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 89 90 LIST_HEAD(ifvlanhead, ifvlan); 91 92 struct ifvlantrunk { 93 struct ifnet *parent; /* parent interface of this trunk */ 94 struct rmlock lock; 95 #ifdef VLAN_ARRAY 96 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 97 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 98 #else 99 struct ifvlanhead *hash; /* dynamic hash-list table */ 100 uint16_t hmask; 101 uint16_t hwidth; 102 #endif 103 int refcnt; 104 }; 105 106 /* 107 * This macro provides a facility to iterate over every vlan on a trunk with 108 * the assumption that none will be added/removed during iteration. 109 */ 110 #ifdef VLAN_ARRAY 111 #define VLAN_FOREACH(_ifv, _trunk) \ 112 size_t _i; \ 113 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 114 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 115 #else /* VLAN_ARRAY */ 116 #define VLAN_FOREACH(_ifv, _trunk) \ 117 struct ifvlan *_next; \ 118 size_t _i; \ 119 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 120 LIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 121 #endif /* VLAN_ARRAY */ 122 123 /* 124 * This macro provides a facility to iterate over every vlan on a trunk while 125 * also modifying the number of vlans on the trunk. The iteration continues 126 * until some condition is met or there are no more vlans on the trunk. 127 */ 128 #ifdef VLAN_ARRAY 129 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 130 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 131 size_t _i; \ 132 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 133 if (((_ifv) = (_trunk)->vlans[_i])) 134 #else /* VLAN_ARRAY */ 135 /* 136 * The hash table case is more complicated. We allow for the hash table to be 137 * modified (i.e. vlans removed) while we are iterating over it. To allow for 138 * this we must restart the iteration every time we "touch" something during 139 * the iteration, since removal will resize the hash table and invalidate our 140 * current position. If acting on the touched element causes the trunk to be 141 * emptied, then iteration also stops. 142 */ 143 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 144 size_t _i; \ 145 bool _touch = false; \ 146 for (_i = 0; \ 147 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 148 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 149 if (((_ifv) = LIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 150 (_touch = true)) 151 #endif /* VLAN_ARRAY */ 152 153 struct vlan_mc_entry { 154 struct sockaddr_dl mc_addr; 155 SLIST_ENTRY(vlan_mc_entry) mc_entries; 156 }; 157 158 struct ifvlan { 159 struct ifvlantrunk *ifv_trunk; 160 struct ifnet *ifv_ifp; 161 #define TRUNK(ifv) ((ifv)->ifv_trunk) 162 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 163 void *ifv_cookie; 164 int ifv_pflags; /* special flags we have set on parent */ 165 int ifv_capenable; 166 struct ifv_linkmib { 167 int ifvm_encaplen; /* encapsulation length */ 168 int ifvm_mtufudge; /* MTU fudged by this much */ 169 int ifvm_mintu; /* min transmission unit */ 170 uint16_t ifvm_proto; /* encapsulation ethertype */ 171 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 172 uint16_t ifvm_vid; /* VLAN ID */ 173 uint8_t ifvm_pcp; /* Priority Code Point (PCP). */ 174 } ifv_mib; 175 struct task lladdr_task; 176 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 177 #ifndef VLAN_ARRAY 178 LIST_ENTRY(ifvlan) ifv_list; 179 #endif 180 }; 181 #define ifv_proto ifv_mib.ifvm_proto 182 #define ifv_tag ifv_mib.ifvm_tag 183 #define ifv_vid ifv_mib.ifvm_vid 184 #define ifv_pcp ifv_mib.ifvm_pcp 185 #define ifv_encaplen ifv_mib.ifvm_encaplen 186 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 187 #define ifv_mintu ifv_mib.ifvm_mintu 188 189 /* Special flags we should propagate to parent. */ 190 static struct { 191 int flag; 192 int (*func)(struct ifnet *, int); 193 } vlan_pflags[] = { 194 {IFF_PROMISC, ifpromisc}, 195 {IFF_ALLMULTI, if_allmulti}, 196 {0, NULL} 197 }; 198 199 extern int vlan_mtag_pcp; 200 201 static const char vlanname[] = "vlan"; 202 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 203 204 static eventhandler_tag ifdetach_tag; 205 static eventhandler_tag iflladdr_tag; 206 207 /* 208 * if_vlan uses two module-level locks to allow concurrent modification of vlan 209 * interfaces and (mostly) allow for vlans to be destroyed while they are being 210 * used for tx/rx. To accomplish this in a way that has acceptable performance 211 * and cooperation with other parts of the network stack there is a 212 * non-sleepable rmlock(9) and an sx(9). Both locks are exclusively acquired 213 * when destroying a vlan interface, i.e. when the if_vlantrunk field of struct 214 * ifnet is de-allocated and NULL'd. Thus a reader holding either lock has a 215 * guarantee that the struct ifvlantrunk references a valid vlan trunk. 216 * 217 * The performance-sensitive paths that warrant using the rmlock(9) are 218 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 219 * existence using if_vlantrunk, and being in the network tx/rx paths the use 220 * of an rmlock(9) gives a measureable improvement in performance. 221 * 222 * The reason for having an sx(9) is mostly because there are still areas that 223 * must be sleepable and also have safe concurrent access to a vlan interface. 224 * Since the sx(9) exists, it is used by default in most paths unless sleeping 225 * is not permitted, or if it is not clear whether sleeping is permitted. 226 * 227 * Note that despite these protections, there is still an inherent race in the 228 * destruction of vlans since there's no guarantee that the ifnet hasn't been 229 * freed/reused when the tx/rx functions are called by the stack. This can only 230 * be fixed by addressing ifnet's lifetime issues. 231 */ 232 #define _VLAN_RM_ID ifv_rm_lock 233 #define _VLAN_SX_ID ifv_sx 234 235 static struct rmlock _VLAN_RM_ID; 236 static struct sx _VLAN_SX_ID; 237 238 #define VLAN_LOCKING_INIT() \ 239 rm_init(&_VLAN_RM_ID, "vlan_rm"); \ 240 sx_init(&_VLAN_SX_ID, "vlan_sx") 241 242 #define VLAN_LOCKING_DESTROY() \ 243 rm_destroy(&_VLAN_RM_ID); \ 244 sx_destroy(&_VLAN_SX_ID) 245 246 #define _VLAN_RM_TRACKER _vlan_rm_tracker 247 #define VLAN_RLOCK() rm_rlock(&_VLAN_RM_ID, \ 248 &_VLAN_RM_TRACKER) 249 #define VLAN_RUNLOCK() rm_runlock(&_VLAN_RM_ID, \ 250 &_VLAN_RM_TRACKER) 251 #define VLAN_WLOCK() rm_wlock(&_VLAN_RM_ID) 252 #define VLAN_WUNLOCK() rm_wunlock(&_VLAN_RM_ID) 253 #define VLAN_RLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_RLOCKED) 254 #define VLAN_WLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_WLOCKED) 255 #define VLAN_RWLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_LOCKED) 256 #define VLAN_LOCK_READER struct rm_priotracker _VLAN_RM_TRACKER 257 258 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 259 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 260 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 261 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 262 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 263 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 264 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 265 266 267 /* 268 * We also have a per-trunk rmlock(9), that is locked shared on packet 269 * processing and exclusive when configuration is changed. Note: This should 270 * only be acquired while there is a shared lock on either of the global locks 271 * via VLAN_SLOCK or VLAN_RLOCK. Thus, an exclusive lock on the global locks 272 * makes a call to TRUNK_RLOCK/TRUNK_WLOCK technically superfluous. 273 */ 274 #define _TRUNK_RM_TRACKER _trunk_rm_tracker 275 #define TRUNK_LOCK_INIT(trunk) rm_init(&(trunk)->lock, vlanname) 276 #define TRUNK_LOCK_DESTROY(trunk) rm_destroy(&(trunk)->lock) 277 #define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, \ 278 &_TRUNK_RM_TRACKER) 279 #define TRUNK_WLOCK(trunk) rm_wlock(&(trunk)->lock) 280 #define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, \ 281 &_TRUNK_RM_TRACKER) 282 #define TRUNK_WUNLOCK(trunk) rm_wunlock(&(trunk)->lock) 283 #define TRUNK_RLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED) 284 #define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_LOCKED) 285 #define TRUNK_WLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED) 286 #define TRUNK_LOCK_READER struct rm_priotracker _TRUNK_RM_TRACKER 287 288 /* 289 * The VLAN_ARRAY substitutes the dynamic hash with a static array 290 * with 4096 entries. In theory this can give a boost in processing, 291 * however in practice it does not. Probably this is because the array 292 * is too big to fit into CPU cache. 293 */ 294 #ifndef VLAN_ARRAY 295 static void vlan_inithash(struct ifvlantrunk *trunk); 296 static void vlan_freehash(struct ifvlantrunk *trunk); 297 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 298 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 299 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 300 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 301 uint16_t vid); 302 #endif 303 static void trunk_destroy(struct ifvlantrunk *trunk); 304 305 static void vlan_init(void *foo); 306 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 307 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 308 #ifdef RATELIMIT 309 static int vlan_snd_tag_alloc(struct ifnet *, 310 union if_snd_tag_alloc_params *, struct m_snd_tag **); 311 #endif 312 static void vlan_qflush(struct ifnet *ifp); 313 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 314 int (*func)(struct ifnet *, int)); 315 static int vlan_setflags(struct ifnet *ifp, int status); 316 static int vlan_setmulti(struct ifnet *ifp); 317 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 318 static void vlan_unconfig(struct ifnet *ifp); 319 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 320 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 321 static void vlan_link_state(struct ifnet *ifp); 322 static void vlan_capabilities(struct ifvlan *ifv); 323 static void vlan_trunk_capabilities(struct ifnet *ifp); 324 325 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 326 static int vlan_clone_match(struct if_clone *, const char *); 327 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 328 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 329 330 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 331 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 332 333 static void vlan_lladdr_fn(void *arg, int pending); 334 335 static struct if_clone *vlan_cloner; 336 337 #ifdef VIMAGE 338 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 339 #define V_vlan_cloner VNET(vlan_cloner) 340 #endif 341 342 #ifndef VLAN_ARRAY 343 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 344 345 static void 346 vlan_inithash(struct ifvlantrunk *trunk) 347 { 348 int i, n; 349 350 /* 351 * The trunk must not be locked here since we call malloc(M_WAITOK). 352 * It is OK in case this function is called before the trunk struct 353 * gets hooked up and becomes visible from other threads. 354 */ 355 356 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 357 ("%s: hash already initialized", __func__)); 358 359 trunk->hwidth = VLAN_DEF_HWIDTH; 360 n = 1 << trunk->hwidth; 361 trunk->hmask = n - 1; 362 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 363 for (i = 0; i < n; i++) 364 LIST_INIT(&trunk->hash[i]); 365 } 366 367 static void 368 vlan_freehash(struct ifvlantrunk *trunk) 369 { 370 #ifdef INVARIANTS 371 int i; 372 373 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 374 for (i = 0; i < (1 << trunk->hwidth); i++) 375 KASSERT(LIST_EMPTY(&trunk->hash[i]), 376 ("%s: hash table not empty", __func__)); 377 #endif 378 free(trunk->hash, M_VLAN); 379 trunk->hash = NULL; 380 trunk->hwidth = trunk->hmask = 0; 381 } 382 383 static int 384 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 385 { 386 int i, b; 387 struct ifvlan *ifv2; 388 389 TRUNK_WLOCK_ASSERT(trunk); 390 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 391 392 b = 1 << trunk->hwidth; 393 i = HASH(ifv->ifv_vid, trunk->hmask); 394 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 395 if (ifv->ifv_vid == ifv2->ifv_vid) 396 return (EEXIST); 397 398 /* 399 * Grow the hash when the number of vlans exceeds half of the number of 400 * hash buckets squared. This will make the average linked-list length 401 * buckets/2. 402 */ 403 if (trunk->refcnt > (b * b) / 2) { 404 vlan_growhash(trunk, 1); 405 i = HASH(ifv->ifv_vid, trunk->hmask); 406 } 407 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 408 trunk->refcnt++; 409 410 return (0); 411 } 412 413 static int 414 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 415 { 416 int i, b; 417 struct ifvlan *ifv2; 418 419 TRUNK_WLOCK_ASSERT(trunk); 420 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 421 422 b = 1 << trunk->hwidth; 423 i = HASH(ifv->ifv_vid, trunk->hmask); 424 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 425 if (ifv2 == ifv) { 426 trunk->refcnt--; 427 LIST_REMOVE(ifv2, ifv_list); 428 if (trunk->refcnt < (b * b) / 2) 429 vlan_growhash(trunk, -1); 430 return (0); 431 } 432 433 panic("%s: vlan not found\n", __func__); 434 return (ENOENT); /*NOTREACHED*/ 435 } 436 437 /* 438 * Grow the hash larger or smaller if memory permits. 439 */ 440 static void 441 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 442 { 443 struct ifvlan *ifv; 444 struct ifvlanhead *hash2; 445 int hwidth2, i, j, n, n2; 446 447 TRUNK_WLOCK_ASSERT(trunk); 448 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 449 450 if (howmuch == 0) { 451 /* Harmless yet obvious coding error */ 452 printf("%s: howmuch is 0\n", __func__); 453 return; 454 } 455 456 hwidth2 = trunk->hwidth + howmuch; 457 n = 1 << trunk->hwidth; 458 n2 = 1 << hwidth2; 459 /* Do not shrink the table below the default */ 460 if (hwidth2 < VLAN_DEF_HWIDTH) 461 return; 462 463 /* M_NOWAIT because we're called with trunk mutex held */ 464 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 465 if (hash2 == NULL) { 466 printf("%s: out of memory -- hash size not changed\n", 467 __func__); 468 return; /* We can live with the old hash table */ 469 } 470 for (j = 0; j < n2; j++) 471 LIST_INIT(&hash2[j]); 472 for (i = 0; i < n; i++) 473 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 474 LIST_REMOVE(ifv, ifv_list); 475 j = HASH(ifv->ifv_vid, n2 - 1); 476 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 477 } 478 free(trunk->hash, M_VLAN); 479 trunk->hash = hash2; 480 trunk->hwidth = hwidth2; 481 trunk->hmask = n2 - 1; 482 483 if (bootverbose) 484 if_printf(trunk->parent, 485 "VLAN hash table resized from %d to %d buckets\n", n, n2); 486 } 487 488 static __inline struct ifvlan * 489 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 490 { 491 struct ifvlan *ifv; 492 493 TRUNK_RLOCK_ASSERT(trunk); 494 495 LIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 496 if (ifv->ifv_vid == vid) 497 return (ifv); 498 return (NULL); 499 } 500 501 #if 0 502 /* Debugging code to view the hashtables. */ 503 static void 504 vlan_dumphash(struct ifvlantrunk *trunk) 505 { 506 int i; 507 struct ifvlan *ifv; 508 509 for (i = 0; i < (1 << trunk->hwidth); i++) { 510 printf("%d: ", i); 511 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 512 printf("%s ", ifv->ifv_ifp->if_xname); 513 printf("\n"); 514 } 515 } 516 #endif /* 0 */ 517 #else 518 519 static __inline struct ifvlan * 520 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 521 { 522 523 return trunk->vlans[vid]; 524 } 525 526 static __inline int 527 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 528 { 529 530 if (trunk->vlans[ifv->ifv_vid] != NULL) 531 return EEXIST; 532 trunk->vlans[ifv->ifv_vid] = ifv; 533 trunk->refcnt++; 534 535 return (0); 536 } 537 538 static __inline int 539 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 540 { 541 542 trunk->vlans[ifv->ifv_vid] = NULL; 543 trunk->refcnt--; 544 545 return (0); 546 } 547 548 static __inline void 549 vlan_freehash(struct ifvlantrunk *trunk) 550 { 551 } 552 553 static __inline void 554 vlan_inithash(struct ifvlantrunk *trunk) 555 { 556 } 557 558 #endif /* !VLAN_ARRAY */ 559 560 static void 561 trunk_destroy(struct ifvlantrunk *trunk) 562 { 563 VLAN_XLOCK_ASSERT(); 564 VLAN_WLOCK_ASSERT(); 565 566 vlan_freehash(trunk); 567 trunk->parent->if_vlantrunk = NULL; 568 TRUNK_LOCK_DESTROY(trunk); 569 if_rele(trunk->parent); 570 free(trunk, M_VLAN); 571 } 572 573 /* 574 * Program our multicast filter. What we're actually doing is 575 * programming the multicast filter of the parent. This has the 576 * side effect of causing the parent interface to receive multicast 577 * traffic that it doesn't really want, which ends up being discarded 578 * later by the upper protocol layers. Unfortunately, there's no way 579 * to avoid this: there really is only one physical interface. 580 */ 581 static int 582 vlan_setmulti(struct ifnet *ifp) 583 { 584 struct ifnet *ifp_p; 585 struct ifmultiaddr *ifma; 586 struct ifvlan *sc; 587 struct vlan_mc_entry *mc; 588 int error; 589 590 /* 591 * XXX This stupidly needs the rmlock to avoid sleeping while holding 592 * the in6_multi_mtx (see in6_mc_join_locked). 593 */ 594 VLAN_RWLOCK_ASSERT(); 595 596 /* Find the parent. */ 597 sc = ifp->if_softc; 598 TRUNK_WLOCK_ASSERT(TRUNK(sc)); 599 ifp_p = PARENT(sc); 600 601 CURVNET_SET_QUIET(ifp_p->if_vnet); 602 603 /* First, remove any existing filter entries. */ 604 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 605 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 606 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 607 free(mc, M_VLAN); 608 } 609 610 /* Now program new ones. */ 611 IF_ADDR_WLOCK(ifp); 612 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 613 if (ifma->ifma_addr->sa_family != AF_LINK) 614 continue; 615 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 616 if (mc == NULL) { 617 IF_ADDR_WUNLOCK(ifp); 618 return (ENOMEM); 619 } 620 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 621 mc->mc_addr.sdl_index = ifp_p->if_index; 622 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 623 } 624 IF_ADDR_WUNLOCK(ifp); 625 SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 626 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 627 NULL); 628 if (error) 629 return (error); 630 } 631 632 CURVNET_RESTORE(); 633 return (0); 634 } 635 636 /* 637 * A handler for parent interface link layer address changes. 638 * If the parent interface link layer address is changed we 639 * should also change it on all children vlans. 640 */ 641 static void 642 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 643 { 644 struct ifvlan *ifv; 645 struct ifnet *ifv_ifp; 646 struct ifvlantrunk *trunk; 647 struct sockaddr_dl *sdl; 648 VLAN_LOCK_READER; 649 650 /* Need the rmlock since this is run on taskqueue_swi. */ 651 VLAN_RLOCK(); 652 trunk = ifp->if_vlantrunk; 653 if (trunk == NULL) { 654 VLAN_RUNLOCK(); 655 return; 656 } 657 658 /* 659 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 660 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 661 * ioctl calls on the parent garbling the lladdr of the child vlan. 662 */ 663 TRUNK_WLOCK(trunk); 664 VLAN_FOREACH(ifv, trunk) { 665 /* 666 * Copy new new lladdr into the ifv_ifp, enqueue a task 667 * to actually call if_setlladdr. if_setlladdr needs to 668 * be deferred to a taskqueue because it will call into 669 * the if_vlan ioctl path and try to acquire the global 670 * lock. 671 */ 672 ifv_ifp = ifv->ifv_ifp; 673 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 674 ifp->if_addrlen); 675 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 676 sdl->sdl_alen = ifp->if_addrlen; 677 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 678 } 679 TRUNK_WUNLOCK(trunk); 680 VLAN_RUNLOCK(); 681 } 682 683 /* 684 * A handler for network interface departure events. 685 * Track departure of trunks here so that we don't access invalid 686 * pointers or whatever if a trunk is ripped from under us, e.g., 687 * by ejecting its hot-plug card. However, if an ifnet is simply 688 * being renamed, then there's no need to tear down the state. 689 */ 690 static void 691 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 692 { 693 struct ifvlan *ifv; 694 struct ifvlantrunk *trunk; 695 696 /* If the ifnet is just being renamed, don't do anything. */ 697 if (ifp->if_flags & IFF_RENAMING) 698 return; 699 VLAN_XLOCK(); 700 trunk = ifp->if_vlantrunk; 701 if (trunk == NULL) { 702 VLAN_XUNLOCK(); 703 return; 704 } 705 706 /* 707 * OK, it's a trunk. Loop over and detach all vlan's on it. 708 * Check trunk pointer after each vlan_unconfig() as it will 709 * free it and set to NULL after the last vlan was detached. 710 */ 711 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 712 ifp->if_vlantrunk == NULL) 713 vlan_unconfig_locked(ifv->ifv_ifp, 1); 714 715 /* Trunk should have been destroyed in vlan_unconfig(). */ 716 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 717 VLAN_XUNLOCK(); 718 } 719 720 /* 721 * Return the trunk device for a virtual interface. 722 */ 723 static struct ifnet * 724 vlan_trunkdev(struct ifnet *ifp) 725 { 726 struct ifvlan *ifv; 727 VLAN_LOCK_READER; 728 729 if (ifp->if_type != IFT_L2VLAN) 730 return (NULL); 731 732 /* Not clear if callers are sleepable, so acquire the rmlock. */ 733 VLAN_RLOCK(); 734 ifv = ifp->if_softc; 735 ifp = NULL; 736 if (ifv->ifv_trunk) 737 ifp = PARENT(ifv); 738 VLAN_RUNLOCK(); 739 return (ifp); 740 } 741 742 /* 743 * Return the 12-bit VLAN VID for this interface, for use by external 744 * components such as Infiniband. 745 * 746 * XXXRW: Note that the function name here is historical; it should be named 747 * vlan_vid(). 748 */ 749 static int 750 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 751 { 752 struct ifvlan *ifv; 753 754 if (ifp->if_type != IFT_L2VLAN) 755 return (EINVAL); 756 ifv = ifp->if_softc; 757 *vidp = ifv->ifv_vid; 758 return (0); 759 } 760 761 static int 762 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 763 { 764 struct ifvlan *ifv; 765 766 if (ifp->if_type != IFT_L2VLAN) 767 return (EINVAL); 768 ifv = ifp->if_softc; 769 *pcpp = ifv->ifv_pcp; 770 return (0); 771 } 772 773 /* 774 * Return a driver specific cookie for this interface. Synchronization 775 * with setcookie must be provided by the driver. 776 */ 777 static void * 778 vlan_cookie(struct ifnet *ifp) 779 { 780 struct ifvlan *ifv; 781 782 if (ifp->if_type != IFT_L2VLAN) 783 return (NULL); 784 ifv = ifp->if_softc; 785 return (ifv->ifv_cookie); 786 } 787 788 /* 789 * Store a cookie in our softc that drivers can use to store driver 790 * private per-instance data in. 791 */ 792 static int 793 vlan_setcookie(struct ifnet *ifp, void *cookie) 794 { 795 struct ifvlan *ifv; 796 797 if (ifp->if_type != IFT_L2VLAN) 798 return (EINVAL); 799 ifv = ifp->if_softc; 800 ifv->ifv_cookie = cookie; 801 return (0); 802 } 803 804 /* 805 * Return the vlan device present at the specific VID. 806 */ 807 static struct ifnet * 808 vlan_devat(struct ifnet *ifp, uint16_t vid) 809 { 810 struct ifvlantrunk *trunk; 811 struct ifvlan *ifv; 812 VLAN_LOCK_READER; 813 TRUNK_LOCK_READER; 814 815 /* Not clear if callers are sleepable, so acquire the rmlock. */ 816 VLAN_RLOCK(); 817 trunk = ifp->if_vlantrunk; 818 if (trunk == NULL) { 819 VLAN_RUNLOCK(); 820 return (NULL); 821 } 822 ifp = NULL; 823 TRUNK_RLOCK(trunk); 824 ifv = vlan_gethash(trunk, vid); 825 if (ifv) 826 ifp = ifv->ifv_ifp; 827 TRUNK_RUNLOCK(trunk); 828 VLAN_RUNLOCK(); 829 return (ifp); 830 } 831 832 /* 833 * Recalculate the cached VLAN tag exposed via the MIB. 834 */ 835 static void 836 vlan_tag_recalculate(struct ifvlan *ifv) 837 { 838 839 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 840 } 841 842 /* 843 * VLAN support can be loaded as a module. The only place in the 844 * system that's intimately aware of this is ether_input. We hook 845 * into this code through vlan_input_p which is defined there and 846 * set here. No one else in the system should be aware of this so 847 * we use an explicit reference here. 848 */ 849 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 850 851 /* For if_link_state_change() eyes only... */ 852 extern void (*vlan_link_state_p)(struct ifnet *); 853 854 static int 855 vlan_modevent(module_t mod, int type, void *data) 856 { 857 858 switch (type) { 859 case MOD_LOAD: 860 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 861 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 862 if (ifdetach_tag == NULL) 863 return (ENOMEM); 864 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 865 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 866 if (iflladdr_tag == NULL) 867 return (ENOMEM); 868 VLAN_LOCKING_INIT(); 869 vlan_input_p = vlan_input; 870 vlan_link_state_p = vlan_link_state; 871 vlan_trunk_cap_p = vlan_trunk_capabilities; 872 vlan_trunkdev_p = vlan_trunkdev; 873 vlan_cookie_p = vlan_cookie; 874 vlan_setcookie_p = vlan_setcookie; 875 vlan_tag_p = vlan_tag; 876 vlan_pcp_p = vlan_pcp; 877 vlan_devat_p = vlan_devat; 878 #ifndef VIMAGE 879 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 880 vlan_clone_create, vlan_clone_destroy); 881 #endif 882 if (bootverbose) 883 printf("vlan: initialized, using " 884 #ifdef VLAN_ARRAY 885 "full-size arrays" 886 #else 887 "hash tables with chaining" 888 #endif 889 890 "\n"); 891 break; 892 case MOD_UNLOAD: 893 #ifndef VIMAGE 894 if_clone_detach(vlan_cloner); 895 #endif 896 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 897 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 898 vlan_input_p = NULL; 899 vlan_link_state_p = NULL; 900 vlan_trunk_cap_p = NULL; 901 vlan_trunkdev_p = NULL; 902 vlan_tag_p = NULL; 903 vlan_cookie_p = NULL; 904 vlan_setcookie_p = NULL; 905 vlan_devat_p = NULL; 906 VLAN_LOCKING_DESTROY(); 907 if (bootverbose) 908 printf("vlan: unloaded\n"); 909 break; 910 default: 911 return (EOPNOTSUPP); 912 } 913 return (0); 914 } 915 916 static moduledata_t vlan_mod = { 917 "if_vlan", 918 vlan_modevent, 919 0 920 }; 921 922 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 923 MODULE_VERSION(if_vlan, 3); 924 925 #ifdef VIMAGE 926 static void 927 vnet_vlan_init(const void *unused __unused) 928 { 929 930 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 931 vlan_clone_create, vlan_clone_destroy); 932 V_vlan_cloner = vlan_cloner; 933 } 934 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 935 vnet_vlan_init, NULL); 936 937 static void 938 vnet_vlan_uninit(const void *unused __unused) 939 { 940 941 if_clone_detach(V_vlan_cloner); 942 } 943 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 944 vnet_vlan_uninit, NULL); 945 #endif 946 947 /* 948 * Check for <etherif>.<vlan> style interface names. 949 */ 950 static struct ifnet * 951 vlan_clone_match_ethervid(const char *name, int *vidp) 952 { 953 char ifname[IFNAMSIZ]; 954 char *cp; 955 struct ifnet *ifp; 956 int vid; 957 958 strlcpy(ifname, name, IFNAMSIZ); 959 if ((cp = strchr(ifname, '.')) == NULL) 960 return (NULL); 961 *cp = '\0'; 962 if ((ifp = ifunit_ref(ifname)) == NULL) 963 return (NULL); 964 /* Parse VID. */ 965 if (*++cp == '\0') { 966 if_rele(ifp); 967 return (NULL); 968 } 969 vid = 0; 970 for(; *cp >= '0' && *cp <= '9'; cp++) 971 vid = (vid * 10) + (*cp - '0'); 972 if (*cp != '\0') { 973 if_rele(ifp); 974 return (NULL); 975 } 976 if (vidp != NULL) 977 *vidp = vid; 978 979 return (ifp); 980 } 981 982 static int 983 vlan_clone_match(struct if_clone *ifc, const char *name) 984 { 985 const char *cp; 986 987 if (vlan_clone_match_ethervid(name, NULL) != NULL) 988 return (1); 989 990 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 991 return (0); 992 for (cp = name + 4; *cp != '\0'; cp++) { 993 if (*cp < '0' || *cp > '9') 994 return (0); 995 } 996 997 return (1); 998 } 999 1000 static int 1001 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 1002 { 1003 char *dp; 1004 int wildcard; 1005 int unit; 1006 int error; 1007 int vid; 1008 struct ifvlan *ifv; 1009 struct ifnet *ifp; 1010 struct ifnet *p; 1011 struct ifaddr *ifa; 1012 struct sockaddr_dl *sdl; 1013 struct vlanreq vlr; 1014 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 1015 1016 /* 1017 * There are 3 (ugh) ways to specify the cloned device: 1018 * o pass a parameter block with the clone request. 1019 * o specify parameters in the text of the clone device name 1020 * o specify no parameters and get an unattached device that 1021 * must be configured separately. 1022 * The first technique is preferred; the latter two are 1023 * supported for backwards compatibility. 1024 * 1025 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1026 * called for. 1027 */ 1028 if (params) { 1029 error = copyin(params, &vlr, sizeof(vlr)); 1030 if (error) 1031 return error; 1032 p = ifunit_ref(vlr.vlr_parent); 1033 if (p == NULL) 1034 return (ENXIO); 1035 error = ifc_name2unit(name, &unit); 1036 if (error != 0) { 1037 if_rele(p); 1038 return (error); 1039 } 1040 vid = vlr.vlr_tag; 1041 wildcard = (unit < 0); 1042 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1043 unit = -1; 1044 wildcard = 0; 1045 } else { 1046 p = NULL; 1047 error = ifc_name2unit(name, &unit); 1048 if (error != 0) 1049 return (error); 1050 1051 wildcard = (unit < 0); 1052 } 1053 1054 error = ifc_alloc_unit(ifc, &unit); 1055 if (error != 0) { 1056 if (p != NULL) 1057 if_rele(p); 1058 return (error); 1059 } 1060 1061 /* In the wildcard case, we need to update the name. */ 1062 if (wildcard) { 1063 for (dp = name; *dp != '\0'; dp++); 1064 if (snprintf(dp, len - (dp-name), "%d", unit) > 1065 len - (dp-name) - 1) { 1066 panic("%s: interface name too long", __func__); 1067 } 1068 } 1069 1070 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1071 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1072 if (ifp == NULL) { 1073 ifc_free_unit(ifc, unit); 1074 free(ifv, M_VLAN); 1075 if (p != NULL) 1076 if_rele(p); 1077 return (ENOSPC); 1078 } 1079 SLIST_INIT(&ifv->vlan_mc_listhead); 1080 ifp->if_softc = ifv; 1081 /* 1082 * Set the name manually rather than using if_initname because 1083 * we don't conform to the default naming convention for interfaces. 1084 */ 1085 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1086 ifp->if_dname = vlanname; 1087 ifp->if_dunit = unit; 1088 /* NB: flags are not set here */ 1089 ifp->if_linkmib = &ifv->ifv_mib; 1090 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 1091 /* NB: mtu is not set here */ 1092 1093 ifp->if_init = vlan_init; 1094 ifp->if_transmit = vlan_transmit; 1095 ifp->if_qflush = vlan_qflush; 1096 ifp->if_ioctl = vlan_ioctl; 1097 #ifdef RATELIMIT 1098 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1099 #endif 1100 ifp->if_flags = VLAN_IFFLAGS; 1101 ether_ifattach(ifp, eaddr); 1102 /* Now undo some of the damage... */ 1103 ifp->if_baudrate = 0; 1104 ifp->if_type = IFT_L2VLAN; 1105 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1106 ifa = ifp->if_addr; 1107 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1108 sdl->sdl_type = IFT_L2VLAN; 1109 1110 if (p != NULL) { 1111 error = vlan_config(ifv, p, vid); 1112 if_rele(p); 1113 if (error != 0) { 1114 /* 1115 * Since we've partially failed, we need to back 1116 * out all the way, otherwise userland could get 1117 * confused. Thus, we destroy the interface. 1118 */ 1119 ether_ifdetach(ifp); 1120 vlan_unconfig(ifp); 1121 if_free(ifp); 1122 ifc_free_unit(ifc, unit); 1123 free(ifv, M_VLAN); 1124 1125 return (error); 1126 } 1127 } 1128 1129 return (0); 1130 } 1131 1132 static int 1133 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1134 { 1135 struct ifvlan *ifv = ifp->if_softc; 1136 int unit = ifp->if_dunit; 1137 1138 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1139 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1140 /* 1141 * We should have the only reference to the ifv now, so we can now 1142 * drain any remaining lladdr task before freeing the ifnet and the 1143 * ifvlan. 1144 */ 1145 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1146 if_free(ifp); 1147 free(ifv, M_VLAN); 1148 ifc_free_unit(ifc, unit); 1149 1150 return (0); 1151 } 1152 1153 /* 1154 * The ifp->if_init entry point for vlan(4) is a no-op. 1155 */ 1156 static void 1157 vlan_init(void *foo __unused) 1158 { 1159 } 1160 1161 /* 1162 * The if_transmit method for vlan(4) interface. 1163 */ 1164 static int 1165 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1166 { 1167 struct ifvlan *ifv; 1168 struct ifnet *p; 1169 int error, len, mcast; 1170 VLAN_LOCK_READER; 1171 1172 VLAN_RLOCK(); 1173 ifv = ifp->if_softc; 1174 if (TRUNK(ifv) == NULL) { 1175 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1176 VLAN_RUNLOCK(); 1177 m_freem(m); 1178 return (ENETDOWN); 1179 } 1180 p = PARENT(ifv); 1181 len = m->m_pkthdr.len; 1182 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1183 1184 BPF_MTAP(ifp, m); 1185 1186 /* 1187 * Do not run parent's if_transmit() if the parent is not up, 1188 * or parent's driver will cause a system crash. 1189 */ 1190 if (!UP_AND_RUNNING(p)) { 1191 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1192 VLAN_RUNLOCK(); 1193 m_freem(m); 1194 return (ENETDOWN); 1195 } 1196 1197 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1198 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1199 VLAN_RUNLOCK(); 1200 return (0); 1201 } 1202 1203 /* 1204 * Send it, precisely as ether_output() would have. 1205 */ 1206 error = (p->if_transmit)(p, m); 1207 if (error == 0) { 1208 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1209 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1210 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1211 } else 1212 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1213 VLAN_RUNLOCK(); 1214 return (error); 1215 } 1216 1217 /* 1218 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1219 */ 1220 static void 1221 vlan_qflush(struct ifnet *ifp __unused) 1222 { 1223 } 1224 1225 static void 1226 vlan_input(struct ifnet *ifp, struct mbuf *m) 1227 { 1228 struct ifvlantrunk *trunk; 1229 struct ifvlan *ifv; 1230 VLAN_LOCK_READER; 1231 TRUNK_LOCK_READER; 1232 struct m_tag *mtag; 1233 uint16_t vid, tag; 1234 1235 VLAN_RLOCK(); 1236 trunk = ifp->if_vlantrunk; 1237 if (trunk == NULL) { 1238 VLAN_RUNLOCK(); 1239 m_freem(m); 1240 return; 1241 } 1242 1243 if (m->m_flags & M_VLANTAG) { 1244 /* 1245 * Packet is tagged, but m contains a normal 1246 * Ethernet frame; the tag is stored out-of-band. 1247 */ 1248 tag = m->m_pkthdr.ether_vtag; 1249 m->m_flags &= ~M_VLANTAG; 1250 } else { 1251 struct ether_vlan_header *evl; 1252 1253 /* 1254 * Packet is tagged in-band as specified by 802.1q. 1255 */ 1256 switch (ifp->if_type) { 1257 case IFT_ETHER: 1258 if (m->m_len < sizeof(*evl) && 1259 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1260 if_printf(ifp, "cannot pullup VLAN header\n"); 1261 VLAN_RUNLOCK(); 1262 return; 1263 } 1264 evl = mtod(m, struct ether_vlan_header *); 1265 tag = ntohs(evl->evl_tag); 1266 1267 /* 1268 * Remove the 802.1q header by copying the Ethernet 1269 * addresses over it and adjusting the beginning of 1270 * the data in the mbuf. The encapsulated Ethernet 1271 * type field is already in place. 1272 */ 1273 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1274 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1275 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1276 break; 1277 1278 default: 1279 #ifdef INVARIANTS 1280 panic("%s: %s has unsupported if_type %u", 1281 __func__, ifp->if_xname, ifp->if_type); 1282 #endif 1283 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1284 VLAN_RUNLOCK(); 1285 m_freem(m); 1286 return; 1287 } 1288 } 1289 1290 vid = EVL_VLANOFTAG(tag); 1291 1292 TRUNK_RLOCK(trunk); 1293 ifv = vlan_gethash(trunk, vid); 1294 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1295 TRUNK_RUNLOCK(trunk); 1296 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1297 VLAN_RUNLOCK(); 1298 m_freem(m); 1299 return; 1300 } 1301 TRUNK_RUNLOCK(trunk); 1302 1303 if (vlan_mtag_pcp) { 1304 /* 1305 * While uncommon, it is possible that we will find a 802.1q 1306 * packet encapsulated inside another packet that also had an 1307 * 802.1q header. For example, ethernet tunneled over IPSEC 1308 * arriving over ethernet. In that case, we replace the 1309 * existing 802.1q PCP m_tag value. 1310 */ 1311 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1312 if (mtag == NULL) { 1313 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1314 sizeof(uint8_t), M_NOWAIT); 1315 if (mtag == NULL) { 1316 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1317 VLAN_RUNLOCK(); 1318 m_freem(m); 1319 return; 1320 } 1321 m_tag_prepend(m, mtag); 1322 } 1323 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1324 } 1325 1326 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1327 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1328 VLAN_RUNLOCK(); 1329 1330 /* Pass it back through the parent's input routine. */ 1331 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1332 } 1333 1334 static void 1335 vlan_lladdr_fn(void *arg, int pending __unused) 1336 { 1337 struct ifvlan *ifv; 1338 struct ifnet *ifp; 1339 1340 ifv = (struct ifvlan *)arg; 1341 ifp = ifv->ifv_ifp; 1342 /* The ifv_ifp already has the lladdr copied in. */ 1343 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1344 } 1345 1346 static int 1347 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1348 { 1349 struct ifvlantrunk *trunk; 1350 struct ifnet *ifp; 1351 int error = 0; 1352 1353 /* 1354 * We can handle non-ethernet hardware types as long as 1355 * they handle the tagging and headers themselves. 1356 */ 1357 if (p->if_type != IFT_ETHER && 1358 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1359 return (EPROTONOSUPPORT); 1360 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1361 return (EPROTONOSUPPORT); 1362 /* 1363 * Don't let the caller set up a VLAN VID with 1364 * anything except VLID bits. 1365 * VID numbers 0x0 and 0xFFF are reserved. 1366 */ 1367 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1368 return (EINVAL); 1369 if (ifv->ifv_trunk) 1370 return (EBUSY); 1371 1372 /* Acquire rmlock after the branch so we can M_WAITOK. */ 1373 VLAN_XLOCK(); 1374 if (p->if_vlantrunk == NULL) { 1375 trunk = malloc(sizeof(struct ifvlantrunk), 1376 M_VLAN, M_WAITOK | M_ZERO); 1377 vlan_inithash(trunk); 1378 TRUNK_LOCK_INIT(trunk); 1379 VLAN_WLOCK(); 1380 TRUNK_WLOCK(trunk); 1381 p->if_vlantrunk = trunk; 1382 trunk->parent = p; 1383 if_ref(trunk->parent); 1384 } else { 1385 VLAN_WLOCK(); 1386 trunk = p->if_vlantrunk; 1387 TRUNK_WLOCK(trunk); 1388 } 1389 1390 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1391 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1392 vlan_tag_recalculate(ifv); 1393 error = vlan_inshash(trunk, ifv); 1394 if (error) 1395 goto done; 1396 ifv->ifv_proto = ETHERTYPE_VLAN; 1397 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1398 ifv->ifv_mintu = ETHERMIN; 1399 ifv->ifv_pflags = 0; 1400 ifv->ifv_capenable = -1; 1401 1402 /* 1403 * If the parent supports the VLAN_MTU capability, 1404 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1405 * use it. 1406 */ 1407 if (p->if_capenable & IFCAP_VLAN_MTU) { 1408 /* 1409 * No need to fudge the MTU since the parent can 1410 * handle extended frames. 1411 */ 1412 ifv->ifv_mtufudge = 0; 1413 } else { 1414 /* 1415 * Fudge the MTU by the encapsulation size. This 1416 * makes us incompatible with strictly compliant 1417 * 802.1Q implementations, but allows us to use 1418 * the feature with other NetBSD implementations, 1419 * which might still be useful. 1420 */ 1421 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1422 } 1423 1424 ifv->ifv_trunk = trunk; 1425 ifp = ifv->ifv_ifp; 1426 /* 1427 * Initialize fields from our parent. This duplicates some 1428 * work with ether_ifattach() but allows for non-ethernet 1429 * interfaces to also work. 1430 */ 1431 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1432 ifp->if_baudrate = p->if_baudrate; 1433 ifp->if_output = p->if_output; 1434 ifp->if_input = p->if_input; 1435 ifp->if_resolvemulti = p->if_resolvemulti; 1436 ifp->if_addrlen = p->if_addrlen; 1437 ifp->if_broadcastaddr = p->if_broadcastaddr; 1438 ifp->if_pcp = ifv->ifv_pcp; 1439 1440 /* 1441 * Copy only a selected subset of flags from the parent. 1442 * Other flags are none of our business. 1443 */ 1444 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1445 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1446 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1447 #undef VLAN_COPY_FLAGS 1448 1449 ifp->if_link_state = p->if_link_state; 1450 1451 vlan_capabilities(ifv); 1452 1453 /* 1454 * Set up our interface address to reflect the underlying 1455 * physical interface's. 1456 */ 1457 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1458 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1459 p->if_addrlen; 1460 1461 /* 1462 * Configure multicast addresses that may already be 1463 * joined on the vlan device. 1464 */ 1465 (void)vlan_setmulti(ifp); 1466 1467 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1468 1469 /* We are ready for operation now. */ 1470 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1471 1472 /* Update flags on the parent, if necessary. */ 1473 vlan_setflags(ifp, 1); 1474 done: 1475 /* 1476 * We need to drop the non-sleepable rmlock so that the underlying 1477 * devices can sleep in their vlan_config hooks. 1478 */ 1479 TRUNK_WUNLOCK(trunk); 1480 VLAN_WUNLOCK(); 1481 if (error == 0) 1482 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1483 VLAN_XUNLOCK(); 1484 1485 return (error); 1486 } 1487 1488 static void 1489 vlan_unconfig(struct ifnet *ifp) 1490 { 1491 1492 VLAN_XLOCK(); 1493 vlan_unconfig_locked(ifp, 0); 1494 VLAN_XUNLOCK(); 1495 } 1496 1497 static void 1498 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1499 { 1500 struct ifvlantrunk *trunk; 1501 struct vlan_mc_entry *mc; 1502 struct ifvlan *ifv; 1503 struct ifnet *parent; 1504 int error; 1505 1506 VLAN_XLOCK_ASSERT(); 1507 1508 ifv = ifp->if_softc; 1509 trunk = ifv->ifv_trunk; 1510 parent = NULL; 1511 1512 if (trunk != NULL) { 1513 /* 1514 * Both vlan_transmit and vlan_input rely on the trunk fields 1515 * being NULL to determine whether to bail, so we need to get 1516 * an exclusive lock here to prevent them from using bad 1517 * ifvlans. 1518 */ 1519 VLAN_WLOCK(); 1520 parent = trunk->parent; 1521 1522 /* 1523 * Since the interface is being unconfigured, we need to 1524 * empty the list of multicast groups that we may have joined 1525 * while we were alive from the parent's list. 1526 */ 1527 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1528 /* 1529 * If the parent interface is being detached, 1530 * all its multicast addresses have already 1531 * been removed. Warn about errors if 1532 * if_delmulti() does fail, but don't abort as 1533 * all callers expect vlan destruction to 1534 * succeed. 1535 */ 1536 if (!departing) { 1537 error = if_delmulti(parent, 1538 (struct sockaddr *)&mc->mc_addr); 1539 if (error) 1540 if_printf(ifp, 1541 "Failed to delete multicast address from parent: %d\n", 1542 error); 1543 } 1544 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1545 free(mc, M_VLAN); 1546 } 1547 1548 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1549 1550 /* 1551 * The trunk lock isn't actually required here, but 1552 * vlan_remhash expects it. 1553 */ 1554 TRUNK_WLOCK(trunk); 1555 vlan_remhash(trunk, ifv); 1556 TRUNK_WUNLOCK(trunk); 1557 ifv->ifv_trunk = NULL; 1558 1559 /* 1560 * Check if we were the last. 1561 */ 1562 if (trunk->refcnt == 0) { 1563 parent->if_vlantrunk = NULL; 1564 trunk_destroy(trunk); 1565 } 1566 VLAN_WUNLOCK(); 1567 } 1568 1569 /* Disconnect from parent. */ 1570 if (ifv->ifv_pflags) 1571 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1572 ifp->if_mtu = ETHERMTU; 1573 ifp->if_link_state = LINK_STATE_UNKNOWN; 1574 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1575 1576 /* 1577 * Only dispatch an event if vlan was 1578 * attached, otherwise there is nothing 1579 * to cleanup anyway. 1580 */ 1581 if (parent != NULL) 1582 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1583 } 1584 1585 /* Handle a reference counted flag that should be set on the parent as well */ 1586 static int 1587 vlan_setflag(struct ifnet *ifp, int flag, int status, 1588 int (*func)(struct ifnet *, int)) 1589 { 1590 struct ifvlan *ifv; 1591 int error; 1592 1593 VLAN_SXLOCK_ASSERT(); 1594 1595 ifv = ifp->if_softc; 1596 status = status ? (ifp->if_flags & flag) : 0; 1597 /* Now "status" contains the flag value or 0 */ 1598 1599 /* 1600 * See if recorded parent's status is different from what 1601 * we want it to be. If it is, flip it. We record parent's 1602 * status in ifv_pflags so that we won't clear parent's flag 1603 * we haven't set. In fact, we don't clear or set parent's 1604 * flags directly, but get or release references to them. 1605 * That's why we can be sure that recorded flags still are 1606 * in accord with actual parent's flags. 1607 */ 1608 if (status != (ifv->ifv_pflags & flag)) { 1609 error = (*func)(PARENT(ifv), status); 1610 if (error) 1611 return (error); 1612 ifv->ifv_pflags &= ~flag; 1613 ifv->ifv_pflags |= status; 1614 } 1615 return (0); 1616 } 1617 1618 /* 1619 * Handle IFF_* flags that require certain changes on the parent: 1620 * if "status" is true, update parent's flags respective to our if_flags; 1621 * if "status" is false, forcedly clear the flags set on parent. 1622 */ 1623 static int 1624 vlan_setflags(struct ifnet *ifp, int status) 1625 { 1626 int error, i; 1627 1628 for (i = 0; vlan_pflags[i].flag; i++) { 1629 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1630 status, vlan_pflags[i].func); 1631 if (error) 1632 return (error); 1633 } 1634 return (0); 1635 } 1636 1637 /* Inform all vlans that their parent has changed link state */ 1638 static void 1639 vlan_link_state(struct ifnet *ifp) 1640 { 1641 struct ifvlantrunk *trunk; 1642 struct ifvlan *ifv; 1643 VLAN_LOCK_READER; 1644 1645 /* Called from a taskqueue_swi task, so we cannot sleep. */ 1646 VLAN_RLOCK(); 1647 trunk = ifp->if_vlantrunk; 1648 if (trunk == NULL) { 1649 VLAN_RUNLOCK(); 1650 return; 1651 } 1652 1653 TRUNK_WLOCK(trunk); 1654 VLAN_FOREACH(ifv, trunk) { 1655 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1656 if_link_state_change(ifv->ifv_ifp, 1657 trunk->parent->if_link_state); 1658 } 1659 TRUNK_WUNLOCK(trunk); 1660 VLAN_RUNLOCK(); 1661 } 1662 1663 static void 1664 vlan_capabilities(struct ifvlan *ifv) 1665 { 1666 struct ifnet *p; 1667 struct ifnet *ifp; 1668 struct ifnet_hw_tsomax hw_tsomax; 1669 int cap = 0, ena = 0, mena; 1670 u_long hwa = 0; 1671 1672 VLAN_SXLOCK_ASSERT(); 1673 TRUNK_WLOCK_ASSERT(TRUNK(ifv)); 1674 p = PARENT(ifv); 1675 ifp = ifv->ifv_ifp; 1676 1677 /* Mask parent interface enabled capabilities disabled by user. */ 1678 mena = p->if_capenable & ifv->ifv_capenable; 1679 1680 /* 1681 * If the parent interface can do checksum offloading 1682 * on VLANs, then propagate its hardware-assisted 1683 * checksumming flags. Also assert that checksum 1684 * offloading requires hardware VLAN tagging. 1685 */ 1686 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1687 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1688 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1689 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1690 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1691 if (ena & IFCAP_TXCSUM) 1692 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1693 CSUM_UDP | CSUM_SCTP); 1694 if (ena & IFCAP_TXCSUM_IPV6) 1695 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1696 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1697 } 1698 1699 /* 1700 * If the parent interface can do TSO on VLANs then 1701 * propagate the hardware-assisted flag. TSO on VLANs 1702 * does not necessarily require hardware VLAN tagging. 1703 */ 1704 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1705 if_hw_tsomax_common(p, &hw_tsomax); 1706 if_hw_tsomax_update(ifp, &hw_tsomax); 1707 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1708 cap |= p->if_capabilities & IFCAP_TSO; 1709 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1710 ena |= mena & IFCAP_TSO; 1711 if (ena & IFCAP_TSO) 1712 hwa |= p->if_hwassist & CSUM_TSO; 1713 } 1714 1715 /* 1716 * If the parent interface can do LRO and checksum offloading on 1717 * VLANs, then guess it may do LRO on VLANs. False positive here 1718 * cost nothing, while false negative may lead to some confusions. 1719 */ 1720 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1721 cap |= p->if_capabilities & IFCAP_LRO; 1722 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1723 ena |= p->if_capenable & IFCAP_LRO; 1724 1725 /* 1726 * If the parent interface can offload TCP connections over VLANs then 1727 * propagate its TOE capability to the VLAN interface. 1728 * 1729 * All TOE drivers in the tree today can deal with VLANs. If this 1730 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1731 * with its own bit. 1732 */ 1733 #define IFCAP_VLAN_TOE IFCAP_TOE 1734 if (p->if_capabilities & IFCAP_VLAN_TOE) 1735 cap |= p->if_capabilities & IFCAP_TOE; 1736 if (p->if_capenable & IFCAP_VLAN_TOE) { 1737 TOEDEV(ifp) = TOEDEV(p); 1738 ena |= mena & IFCAP_TOE; 1739 } 1740 1741 /* 1742 * If the parent interface supports dynamic link state, so does the 1743 * VLAN interface. 1744 */ 1745 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1746 ena |= (mena & IFCAP_LINKSTATE); 1747 1748 #ifdef RATELIMIT 1749 /* 1750 * If the parent interface supports ratelimiting, so does the 1751 * VLAN interface. 1752 */ 1753 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1754 ena |= (mena & IFCAP_TXRTLMT); 1755 #endif 1756 1757 ifp->if_capabilities = cap; 1758 ifp->if_capenable = ena; 1759 ifp->if_hwassist = hwa; 1760 } 1761 1762 static void 1763 vlan_trunk_capabilities(struct ifnet *ifp) 1764 { 1765 struct ifvlantrunk *trunk; 1766 struct ifvlan *ifv; 1767 1768 VLAN_SLOCK(); 1769 trunk = ifp->if_vlantrunk; 1770 if (trunk == NULL) { 1771 VLAN_SUNLOCK(); 1772 return; 1773 } 1774 TRUNK_WLOCK(trunk); 1775 VLAN_FOREACH(ifv, trunk) { 1776 vlan_capabilities(ifv); 1777 } 1778 TRUNK_WUNLOCK(trunk); 1779 VLAN_SUNLOCK(); 1780 } 1781 1782 static int 1783 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1784 { 1785 struct ifnet *p; 1786 struct ifreq *ifr; 1787 struct ifaddr *ifa; 1788 struct ifvlan *ifv; 1789 struct ifvlantrunk *trunk; 1790 struct vlanreq vlr; 1791 int error = 0; 1792 VLAN_LOCK_READER; 1793 1794 ifr = (struct ifreq *)data; 1795 ifa = (struct ifaddr *) data; 1796 ifv = ifp->if_softc; 1797 1798 switch (cmd) { 1799 case SIOCSIFADDR: 1800 ifp->if_flags |= IFF_UP; 1801 #ifdef INET 1802 if (ifa->ifa_addr->sa_family == AF_INET) 1803 arp_ifinit(ifp, ifa); 1804 #endif 1805 break; 1806 case SIOCGIFADDR: 1807 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1808 ifp->if_addrlen); 1809 break; 1810 case SIOCGIFMEDIA: 1811 VLAN_SLOCK(); 1812 if (TRUNK(ifv) != NULL) { 1813 p = PARENT(ifv); 1814 if_ref(p); 1815 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1816 if_rele(p); 1817 /* Limit the result to the parent's current config. */ 1818 if (error == 0) { 1819 struct ifmediareq *ifmr; 1820 1821 ifmr = (struct ifmediareq *)data; 1822 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1823 ifmr->ifm_count = 1; 1824 error = copyout(&ifmr->ifm_current, 1825 ifmr->ifm_ulist, 1826 sizeof(int)); 1827 } 1828 } 1829 } else { 1830 error = EINVAL; 1831 } 1832 VLAN_SUNLOCK(); 1833 break; 1834 1835 case SIOCSIFMEDIA: 1836 error = EINVAL; 1837 break; 1838 1839 case SIOCSIFMTU: 1840 /* 1841 * Set the interface MTU. 1842 */ 1843 VLAN_SLOCK(); 1844 trunk = TRUNK(ifv); 1845 if (trunk != NULL) { 1846 TRUNK_WLOCK(trunk); 1847 if (ifr->ifr_mtu > 1848 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1849 ifr->ifr_mtu < 1850 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1851 error = EINVAL; 1852 else 1853 ifp->if_mtu = ifr->ifr_mtu; 1854 TRUNK_WUNLOCK(trunk); 1855 } else 1856 error = EINVAL; 1857 VLAN_SUNLOCK(); 1858 break; 1859 1860 case SIOCSETVLAN: 1861 #ifdef VIMAGE 1862 /* 1863 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1864 * interface to be delegated to a jail without allowing the 1865 * jail to change what underlying interface/VID it is 1866 * associated with. We are not entirely convinced that this 1867 * is the right way to accomplish that policy goal. 1868 */ 1869 if (ifp->if_vnet != ifp->if_home_vnet) { 1870 error = EPERM; 1871 break; 1872 } 1873 #endif 1874 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1875 if (error) 1876 break; 1877 if (vlr.vlr_parent[0] == '\0') { 1878 vlan_unconfig(ifp); 1879 break; 1880 } 1881 p = ifunit_ref(vlr.vlr_parent); 1882 if (p == NULL) { 1883 error = ENOENT; 1884 break; 1885 } 1886 error = vlan_config(ifv, p, vlr.vlr_tag); 1887 if_rele(p); 1888 break; 1889 1890 case SIOCGETVLAN: 1891 #ifdef VIMAGE 1892 if (ifp->if_vnet != ifp->if_home_vnet) { 1893 error = EPERM; 1894 break; 1895 } 1896 #endif 1897 bzero(&vlr, sizeof(vlr)); 1898 VLAN_SLOCK(); 1899 if (TRUNK(ifv) != NULL) { 1900 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1901 sizeof(vlr.vlr_parent)); 1902 vlr.vlr_tag = ifv->ifv_vid; 1903 } 1904 VLAN_SUNLOCK(); 1905 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1906 break; 1907 1908 case SIOCSIFFLAGS: 1909 /* 1910 * We should propagate selected flags to the parent, 1911 * e.g., promiscuous mode. 1912 */ 1913 VLAN_XLOCK(); 1914 if (TRUNK(ifv) != NULL) 1915 error = vlan_setflags(ifp, 1); 1916 VLAN_XUNLOCK(); 1917 break; 1918 1919 case SIOCADDMULTI: 1920 case SIOCDELMULTI: 1921 /* 1922 * If we don't have a parent, just remember the membership for 1923 * when we do. 1924 * 1925 * XXX We need the rmlock here to avoid sleeping while 1926 * holding in6_multi_mtx. 1927 */ 1928 VLAN_RLOCK(); 1929 trunk = TRUNK(ifv); 1930 if (trunk != NULL) { 1931 TRUNK_WLOCK(trunk); 1932 error = vlan_setmulti(ifp); 1933 TRUNK_WUNLOCK(trunk); 1934 } 1935 VLAN_RUNLOCK(); 1936 break; 1937 1938 case SIOCGVLANPCP: 1939 #ifdef VIMAGE 1940 if (ifp->if_vnet != ifp->if_home_vnet) { 1941 error = EPERM; 1942 break; 1943 } 1944 #endif 1945 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1946 break; 1947 1948 case SIOCSVLANPCP: 1949 #ifdef VIMAGE 1950 if (ifp->if_vnet != ifp->if_home_vnet) { 1951 error = EPERM; 1952 break; 1953 } 1954 #endif 1955 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1956 if (error) 1957 break; 1958 if (ifr->ifr_vlan_pcp > 7) { 1959 error = EINVAL; 1960 break; 1961 } 1962 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 1963 ifp->if_pcp = ifv->ifv_pcp; 1964 vlan_tag_recalculate(ifv); 1965 /* broadcast event about PCP change */ 1966 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 1967 break; 1968 1969 case SIOCSIFCAP: 1970 VLAN_SLOCK(); 1971 ifv->ifv_capenable = ifr->ifr_reqcap; 1972 trunk = TRUNK(ifv); 1973 if (trunk != NULL) { 1974 TRUNK_WLOCK(trunk); 1975 vlan_capabilities(ifv); 1976 TRUNK_WUNLOCK(trunk); 1977 } 1978 VLAN_SUNLOCK(); 1979 break; 1980 1981 default: 1982 error = EINVAL; 1983 break; 1984 } 1985 1986 return (error); 1987 } 1988 1989 #ifdef RATELIMIT 1990 static int 1991 vlan_snd_tag_alloc(struct ifnet *ifp, 1992 union if_snd_tag_alloc_params *params, 1993 struct m_snd_tag **ppmt) 1994 { 1995 1996 /* get trunk device */ 1997 ifp = vlan_trunkdev(ifp); 1998 if (ifp == NULL || (ifp->if_capenable & IFCAP_TXRTLMT) == 0) 1999 return (EOPNOTSUPP); 2000 /* forward allocation request */ 2001 return (ifp->if_snd_tag_alloc(ifp, params, ppmt)); 2002 } 2003 #endif 2004