1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_inet6.h" 50 #include "opt_kern_tls.h" 51 #include "opt_vlan.h" 52 #include "opt_ratelimit.h" 53 54 #include <sys/param.h> 55 #include <sys/eventhandler.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/module.h> 61 #include <sys/rmlock.h> 62 #include <sys/priv.h> 63 #include <sys/queue.h> 64 #include <sys/socket.h> 65 #include <sys/sockio.h> 66 #include <sys/sysctl.h> 67 #include <sys/systm.h> 68 #include <sys/sx.h> 69 #include <sys/taskqueue.h> 70 71 #include <net/bpf.h> 72 #include <net/ethernet.h> 73 #include <net/if.h> 74 #include <net/if_var.h> 75 #include <net/if_clone.h> 76 #include <net/if_dl.h> 77 #include <net/if_types.h> 78 #include <net/if_vlan_var.h> 79 #include <net/route.h> 80 #include <net/vnet.h> 81 82 #ifdef INET 83 #include <netinet/in.h> 84 #include <netinet/if_ether.h> 85 #endif 86 87 #ifdef INET6 88 /* 89 * XXX: declare here to avoid to include many inet6 related files.. 90 * should be more generalized? 91 */ 92 extern void nd6_setmtu(struct ifnet *); 93 #endif 94 95 #define VLAN_DEF_HWIDTH 4 96 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 97 98 #define UP_AND_RUNNING(ifp) \ 99 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 100 101 CK_SLIST_HEAD(ifvlanhead, ifvlan); 102 103 struct ifvlantrunk { 104 struct ifnet *parent; /* parent interface of this trunk */ 105 struct mtx lock; 106 #ifdef VLAN_ARRAY 107 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 108 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 109 #else 110 struct ifvlanhead *hash; /* dynamic hash-list table */ 111 uint16_t hmask; 112 uint16_t hwidth; 113 #endif 114 int refcnt; 115 }; 116 117 #if defined(KERN_TLS) || defined(RATELIMIT) 118 struct vlan_snd_tag { 119 struct m_snd_tag com; 120 struct m_snd_tag *tag; 121 }; 122 123 static inline struct vlan_snd_tag * 124 mst_to_vst(struct m_snd_tag *mst) 125 { 126 127 return (__containerof(mst, struct vlan_snd_tag, com)); 128 } 129 #endif 130 131 /* 132 * This macro provides a facility to iterate over every vlan on a trunk with 133 * the assumption that none will be added/removed during iteration. 134 */ 135 #ifdef VLAN_ARRAY 136 #define VLAN_FOREACH(_ifv, _trunk) \ 137 size_t _i; \ 138 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 139 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 140 #else /* VLAN_ARRAY */ 141 #define VLAN_FOREACH(_ifv, _trunk) \ 142 struct ifvlan *_next; \ 143 size_t _i; \ 144 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 145 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 146 #endif /* VLAN_ARRAY */ 147 148 /* 149 * This macro provides a facility to iterate over every vlan on a trunk while 150 * also modifying the number of vlans on the trunk. The iteration continues 151 * until some condition is met or there are no more vlans on the trunk. 152 */ 153 #ifdef VLAN_ARRAY 154 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 155 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 156 size_t _i; \ 157 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 158 if (((_ifv) = (_trunk)->vlans[_i])) 159 #else /* VLAN_ARRAY */ 160 /* 161 * The hash table case is more complicated. We allow for the hash table to be 162 * modified (i.e. vlans removed) while we are iterating over it. To allow for 163 * this we must restart the iteration every time we "touch" something during 164 * the iteration, since removal will resize the hash table and invalidate our 165 * current position. If acting on the touched element causes the trunk to be 166 * emptied, then iteration also stops. 167 */ 168 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 169 size_t _i; \ 170 bool _touch = false; \ 171 for (_i = 0; \ 172 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 173 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 174 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 175 (_touch = true)) 176 #endif /* VLAN_ARRAY */ 177 178 struct vlan_mc_entry { 179 struct sockaddr_dl mc_addr; 180 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 181 struct epoch_context mc_epoch_ctx; 182 }; 183 184 struct ifvlan { 185 struct ifvlantrunk *ifv_trunk; 186 struct ifnet *ifv_ifp; 187 #define TRUNK(ifv) ((ifv)->ifv_trunk) 188 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 189 void *ifv_cookie; 190 int ifv_pflags; /* special flags we have set on parent */ 191 int ifv_capenable; 192 int ifv_encaplen; /* encapsulation length */ 193 int ifv_mtufudge; /* MTU fudged by this much */ 194 int ifv_mintu; /* min transmission unit */ 195 uint16_t ifv_proto; /* encapsulation ethertype */ 196 uint16_t ifv_tag; /* tag to apply on packets leaving if */ 197 uint16_t ifv_vid; /* VLAN ID */ 198 uint8_t ifv_pcp; /* Priority Code Point (PCP). */ 199 struct task lladdr_task; 200 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 201 #ifndef VLAN_ARRAY 202 CK_SLIST_ENTRY(ifvlan) ifv_list; 203 #endif 204 }; 205 206 /* Special flags we should propagate to parent. */ 207 static struct { 208 int flag; 209 int (*func)(struct ifnet *, int); 210 } vlan_pflags[] = { 211 {IFF_PROMISC, ifpromisc}, 212 {IFF_ALLMULTI, if_allmulti}, 213 {0, NULL} 214 }; 215 216 extern int vlan_mtag_pcp; 217 218 static const char vlanname[] = "vlan"; 219 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 220 221 static eventhandler_tag ifdetach_tag; 222 static eventhandler_tag iflladdr_tag; 223 224 /* 225 * if_vlan uses two module-level synchronizations primitives to allow concurrent 226 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 227 * while they are being used for tx/rx. To accomplish this in a way that has 228 * acceptable performance and cooperation with other parts of the network stack 229 * there is a non-sleepable epoch(9) and an sx(9). 230 * 231 * The performance-sensitive paths that warrant using the epoch(9) are 232 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 233 * existence using if_vlantrunk, and being in the network tx/rx paths the use 234 * of an epoch(9) gives a measureable improvement in performance. 235 * 236 * The reason for having an sx(9) is mostly because there are still areas that 237 * must be sleepable and also have safe concurrent access to a vlan interface. 238 * Since the sx(9) exists, it is used by default in most paths unless sleeping 239 * is not permitted, or if it is not clear whether sleeping is permitted. 240 * 241 */ 242 #define _VLAN_SX_ID ifv_sx 243 244 static struct sx _VLAN_SX_ID; 245 246 #define VLAN_LOCKING_INIT() \ 247 sx_init(&_VLAN_SX_ID, "vlan_sx") 248 249 #define VLAN_LOCKING_DESTROY() \ 250 sx_destroy(&_VLAN_SX_ID) 251 252 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 253 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 254 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 255 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 256 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 257 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 258 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 259 260 /* 261 * We also have a per-trunk mutex that should be acquired when changing 262 * its state. 263 */ 264 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 265 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 266 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 267 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 268 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 269 270 /* 271 * The VLAN_ARRAY substitutes the dynamic hash with a static array 272 * with 4096 entries. In theory this can give a boost in processing, 273 * however in practice it does not. Probably this is because the array 274 * is too big to fit into CPU cache. 275 */ 276 #ifndef VLAN_ARRAY 277 static void vlan_inithash(struct ifvlantrunk *trunk); 278 static void vlan_freehash(struct ifvlantrunk *trunk); 279 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 280 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 281 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 282 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 283 uint16_t vid); 284 #endif 285 static void trunk_destroy(struct ifvlantrunk *trunk); 286 287 static void vlan_init(void *foo); 288 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 289 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 290 #if defined(KERN_TLS) || defined(RATELIMIT) 291 static int vlan_snd_tag_alloc(struct ifnet *, 292 union if_snd_tag_alloc_params *, struct m_snd_tag **); 293 static int vlan_snd_tag_modify(struct m_snd_tag *, 294 union if_snd_tag_modify_params *); 295 static int vlan_snd_tag_query(struct m_snd_tag *, 296 union if_snd_tag_query_params *); 297 static void vlan_snd_tag_free(struct m_snd_tag *); 298 #endif 299 static void vlan_qflush(struct ifnet *ifp); 300 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 301 int (*func)(struct ifnet *, int)); 302 static int vlan_setflags(struct ifnet *ifp, int status); 303 static int vlan_setmulti(struct ifnet *ifp); 304 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 305 static int vlan_output(struct ifnet *ifp, struct mbuf *m, 306 const struct sockaddr *dst, struct route *ro); 307 static void vlan_unconfig(struct ifnet *ifp); 308 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 309 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 310 static void vlan_link_state(struct ifnet *ifp); 311 static void vlan_capabilities(struct ifvlan *ifv); 312 static void vlan_trunk_capabilities(struct ifnet *ifp); 313 314 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 315 static int vlan_clone_match(struct if_clone *, const char *); 316 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 317 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 318 319 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 320 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 321 322 static void vlan_lladdr_fn(void *arg, int pending); 323 324 static struct if_clone *vlan_cloner; 325 326 #ifdef VIMAGE 327 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 328 #define V_vlan_cloner VNET(vlan_cloner) 329 #endif 330 331 static void 332 vlan_mc_free(struct epoch_context *ctx) 333 { 334 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 335 free(mc, M_VLAN); 336 } 337 338 #ifndef VLAN_ARRAY 339 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 340 341 static void 342 vlan_inithash(struct ifvlantrunk *trunk) 343 { 344 int i, n; 345 346 /* 347 * The trunk must not be locked here since we call malloc(M_WAITOK). 348 * It is OK in case this function is called before the trunk struct 349 * gets hooked up and becomes visible from other threads. 350 */ 351 352 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 353 ("%s: hash already initialized", __func__)); 354 355 trunk->hwidth = VLAN_DEF_HWIDTH; 356 n = 1 << trunk->hwidth; 357 trunk->hmask = n - 1; 358 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 359 for (i = 0; i < n; i++) 360 CK_SLIST_INIT(&trunk->hash[i]); 361 } 362 363 static void 364 vlan_freehash(struct ifvlantrunk *trunk) 365 { 366 #ifdef INVARIANTS 367 int i; 368 369 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 370 for (i = 0; i < (1 << trunk->hwidth); i++) 371 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 372 ("%s: hash table not empty", __func__)); 373 #endif 374 free(trunk->hash, M_VLAN); 375 trunk->hash = NULL; 376 trunk->hwidth = trunk->hmask = 0; 377 } 378 379 static int 380 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 381 { 382 int i, b; 383 struct ifvlan *ifv2; 384 385 VLAN_XLOCK_ASSERT(); 386 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 387 388 b = 1 << trunk->hwidth; 389 i = HASH(ifv->ifv_vid, trunk->hmask); 390 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 391 if (ifv->ifv_vid == ifv2->ifv_vid) 392 return (EEXIST); 393 394 /* 395 * Grow the hash when the number of vlans exceeds half of the number of 396 * hash buckets squared. This will make the average linked-list length 397 * buckets/2. 398 */ 399 if (trunk->refcnt > (b * b) / 2) { 400 vlan_growhash(trunk, 1); 401 i = HASH(ifv->ifv_vid, trunk->hmask); 402 } 403 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 404 trunk->refcnt++; 405 406 return (0); 407 } 408 409 static int 410 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 411 { 412 int i, b; 413 struct ifvlan *ifv2; 414 415 VLAN_XLOCK_ASSERT(); 416 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 417 418 b = 1 << trunk->hwidth; 419 i = HASH(ifv->ifv_vid, trunk->hmask); 420 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 421 if (ifv2 == ifv) { 422 trunk->refcnt--; 423 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 424 if (trunk->refcnt < (b * b) / 2) 425 vlan_growhash(trunk, -1); 426 return (0); 427 } 428 429 panic("%s: vlan not found\n", __func__); 430 return (ENOENT); /*NOTREACHED*/ 431 } 432 433 /* 434 * Grow the hash larger or smaller if memory permits. 435 */ 436 static void 437 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 438 { 439 struct ifvlan *ifv; 440 struct ifvlanhead *hash2; 441 int hwidth2, i, j, n, n2; 442 443 VLAN_XLOCK_ASSERT(); 444 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 445 446 if (howmuch == 0) { 447 /* Harmless yet obvious coding error */ 448 printf("%s: howmuch is 0\n", __func__); 449 return; 450 } 451 452 hwidth2 = trunk->hwidth + howmuch; 453 n = 1 << trunk->hwidth; 454 n2 = 1 << hwidth2; 455 /* Do not shrink the table below the default */ 456 if (hwidth2 < VLAN_DEF_HWIDTH) 457 return; 458 459 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 460 if (hash2 == NULL) { 461 printf("%s: out of memory -- hash size not changed\n", 462 __func__); 463 return; /* We can live with the old hash table */ 464 } 465 for (j = 0; j < n2; j++) 466 CK_SLIST_INIT(&hash2[j]); 467 for (i = 0; i < n; i++) 468 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 469 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 470 j = HASH(ifv->ifv_vid, n2 - 1); 471 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 472 } 473 NET_EPOCH_WAIT(); 474 free(trunk->hash, M_VLAN); 475 trunk->hash = hash2; 476 trunk->hwidth = hwidth2; 477 trunk->hmask = n2 - 1; 478 479 if (bootverbose) 480 if_printf(trunk->parent, 481 "VLAN hash table resized from %d to %d buckets\n", n, n2); 482 } 483 484 static __inline struct ifvlan * 485 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 486 { 487 struct ifvlan *ifv; 488 489 NET_EPOCH_ASSERT(); 490 491 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 492 if (ifv->ifv_vid == vid) 493 return (ifv); 494 return (NULL); 495 } 496 497 #if 0 498 /* Debugging code to view the hashtables. */ 499 static void 500 vlan_dumphash(struct ifvlantrunk *trunk) 501 { 502 int i; 503 struct ifvlan *ifv; 504 505 for (i = 0; i < (1 << trunk->hwidth); i++) { 506 printf("%d: ", i); 507 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 508 printf("%s ", ifv->ifv_ifp->if_xname); 509 printf("\n"); 510 } 511 } 512 #endif /* 0 */ 513 #else 514 515 static __inline struct ifvlan * 516 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 517 { 518 519 return trunk->vlans[vid]; 520 } 521 522 static __inline int 523 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 524 { 525 526 if (trunk->vlans[ifv->ifv_vid] != NULL) 527 return EEXIST; 528 trunk->vlans[ifv->ifv_vid] = ifv; 529 trunk->refcnt++; 530 531 return (0); 532 } 533 534 static __inline int 535 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 536 { 537 538 trunk->vlans[ifv->ifv_vid] = NULL; 539 trunk->refcnt--; 540 541 return (0); 542 } 543 544 static __inline void 545 vlan_freehash(struct ifvlantrunk *trunk) 546 { 547 } 548 549 static __inline void 550 vlan_inithash(struct ifvlantrunk *trunk) 551 { 552 } 553 554 #endif /* !VLAN_ARRAY */ 555 556 static void 557 trunk_destroy(struct ifvlantrunk *trunk) 558 { 559 VLAN_XLOCK_ASSERT(); 560 561 vlan_freehash(trunk); 562 trunk->parent->if_vlantrunk = NULL; 563 TRUNK_LOCK_DESTROY(trunk); 564 if_rele(trunk->parent); 565 free(trunk, M_VLAN); 566 } 567 568 /* 569 * Program our multicast filter. What we're actually doing is 570 * programming the multicast filter of the parent. This has the 571 * side effect of causing the parent interface to receive multicast 572 * traffic that it doesn't really want, which ends up being discarded 573 * later by the upper protocol layers. Unfortunately, there's no way 574 * to avoid this: there really is only one physical interface. 575 */ 576 static int 577 vlan_setmulti(struct ifnet *ifp) 578 { 579 struct ifnet *ifp_p; 580 struct ifmultiaddr *ifma; 581 struct ifvlan *sc; 582 struct vlan_mc_entry *mc; 583 int error; 584 585 VLAN_XLOCK_ASSERT(); 586 587 /* Find the parent. */ 588 sc = ifp->if_softc; 589 ifp_p = PARENT(sc); 590 591 CURVNET_SET_QUIET(ifp_p->if_vnet); 592 593 /* First, remove any existing filter entries. */ 594 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 595 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 596 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 597 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 598 } 599 600 /* Now program new ones. */ 601 IF_ADDR_WLOCK(ifp); 602 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 603 if (ifma->ifma_addr->sa_family != AF_LINK) 604 continue; 605 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 606 if (mc == NULL) { 607 IF_ADDR_WUNLOCK(ifp); 608 return (ENOMEM); 609 } 610 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 611 mc->mc_addr.sdl_index = ifp_p->if_index; 612 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 613 } 614 IF_ADDR_WUNLOCK(ifp); 615 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 616 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 617 NULL); 618 if (error) 619 return (error); 620 } 621 622 CURVNET_RESTORE(); 623 return (0); 624 } 625 626 /* 627 * A handler for parent interface link layer address changes. 628 * If the parent interface link layer address is changed we 629 * should also change it on all children vlans. 630 */ 631 static void 632 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 633 { 634 struct epoch_tracker et; 635 struct ifvlan *ifv; 636 struct ifnet *ifv_ifp; 637 struct ifvlantrunk *trunk; 638 struct sockaddr_dl *sdl; 639 640 /* Need the epoch since this is run on taskqueue_swi. */ 641 NET_EPOCH_ENTER(et); 642 trunk = ifp->if_vlantrunk; 643 if (trunk == NULL) { 644 NET_EPOCH_EXIT(et); 645 return; 646 } 647 648 /* 649 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 650 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 651 * ioctl calls on the parent garbling the lladdr of the child vlan. 652 */ 653 TRUNK_WLOCK(trunk); 654 VLAN_FOREACH(ifv, trunk) { 655 /* 656 * Copy new new lladdr into the ifv_ifp, enqueue a task 657 * to actually call if_setlladdr. if_setlladdr needs to 658 * be deferred to a taskqueue because it will call into 659 * the if_vlan ioctl path and try to acquire the global 660 * lock. 661 */ 662 ifv_ifp = ifv->ifv_ifp; 663 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 664 ifp->if_addrlen); 665 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 666 sdl->sdl_alen = ifp->if_addrlen; 667 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 668 } 669 TRUNK_WUNLOCK(trunk); 670 NET_EPOCH_EXIT(et); 671 } 672 673 /* 674 * A handler for network interface departure events. 675 * Track departure of trunks here so that we don't access invalid 676 * pointers or whatever if a trunk is ripped from under us, e.g., 677 * by ejecting its hot-plug card. However, if an ifnet is simply 678 * being renamed, then there's no need to tear down the state. 679 */ 680 static void 681 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 682 { 683 struct ifvlan *ifv; 684 struct ifvlantrunk *trunk; 685 686 /* If the ifnet is just being renamed, don't do anything. */ 687 if (ifp->if_flags & IFF_RENAMING) 688 return; 689 VLAN_XLOCK(); 690 trunk = ifp->if_vlantrunk; 691 if (trunk == NULL) { 692 VLAN_XUNLOCK(); 693 return; 694 } 695 696 /* 697 * OK, it's a trunk. Loop over and detach all vlan's on it. 698 * Check trunk pointer after each vlan_unconfig() as it will 699 * free it and set to NULL after the last vlan was detached. 700 */ 701 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 702 ifp->if_vlantrunk == NULL) 703 vlan_unconfig_locked(ifv->ifv_ifp, 1); 704 705 /* Trunk should have been destroyed in vlan_unconfig(). */ 706 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 707 VLAN_XUNLOCK(); 708 } 709 710 /* 711 * Return the trunk device for a virtual interface. 712 */ 713 static struct ifnet * 714 vlan_trunkdev(struct ifnet *ifp) 715 { 716 struct ifvlan *ifv; 717 718 NET_EPOCH_ASSERT(); 719 720 if (ifp->if_type != IFT_L2VLAN) 721 return (NULL); 722 723 ifv = ifp->if_softc; 724 ifp = NULL; 725 if (ifv->ifv_trunk) 726 ifp = PARENT(ifv); 727 return (ifp); 728 } 729 730 /* 731 * Return the 12-bit VLAN VID for this interface, for use by external 732 * components such as Infiniband. 733 * 734 * XXXRW: Note that the function name here is historical; it should be named 735 * vlan_vid(). 736 */ 737 static int 738 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 739 { 740 struct ifvlan *ifv; 741 742 if (ifp->if_type != IFT_L2VLAN) 743 return (EINVAL); 744 ifv = ifp->if_softc; 745 *vidp = ifv->ifv_vid; 746 return (0); 747 } 748 749 static int 750 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 751 { 752 struct ifvlan *ifv; 753 754 if (ifp->if_type != IFT_L2VLAN) 755 return (EINVAL); 756 ifv = ifp->if_softc; 757 *pcpp = ifv->ifv_pcp; 758 return (0); 759 } 760 761 /* 762 * Return a driver specific cookie for this interface. Synchronization 763 * with setcookie must be provided by the driver. 764 */ 765 static void * 766 vlan_cookie(struct ifnet *ifp) 767 { 768 struct ifvlan *ifv; 769 770 if (ifp->if_type != IFT_L2VLAN) 771 return (NULL); 772 ifv = ifp->if_softc; 773 return (ifv->ifv_cookie); 774 } 775 776 /* 777 * Store a cookie in our softc that drivers can use to store driver 778 * private per-instance data in. 779 */ 780 static int 781 vlan_setcookie(struct ifnet *ifp, void *cookie) 782 { 783 struct ifvlan *ifv; 784 785 if (ifp->if_type != IFT_L2VLAN) 786 return (EINVAL); 787 ifv = ifp->if_softc; 788 ifv->ifv_cookie = cookie; 789 return (0); 790 } 791 792 /* 793 * Return the vlan device present at the specific VID. 794 */ 795 static struct ifnet * 796 vlan_devat(struct ifnet *ifp, uint16_t vid) 797 { 798 struct ifvlantrunk *trunk; 799 struct ifvlan *ifv; 800 801 NET_EPOCH_ASSERT(); 802 803 trunk = ifp->if_vlantrunk; 804 if (trunk == NULL) 805 return (NULL); 806 ifp = NULL; 807 ifv = vlan_gethash(trunk, vid); 808 if (ifv) 809 ifp = ifv->ifv_ifp; 810 return (ifp); 811 } 812 813 /* 814 * Recalculate the cached VLAN tag exposed via the MIB. 815 */ 816 static void 817 vlan_tag_recalculate(struct ifvlan *ifv) 818 { 819 820 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 821 } 822 823 /* 824 * VLAN support can be loaded as a module. The only place in the 825 * system that's intimately aware of this is ether_input. We hook 826 * into this code through vlan_input_p which is defined there and 827 * set here. No one else in the system should be aware of this so 828 * we use an explicit reference here. 829 */ 830 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 831 832 /* For if_link_state_change() eyes only... */ 833 extern void (*vlan_link_state_p)(struct ifnet *); 834 835 static int 836 vlan_modevent(module_t mod, int type, void *data) 837 { 838 839 switch (type) { 840 case MOD_LOAD: 841 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 842 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 843 if (ifdetach_tag == NULL) 844 return (ENOMEM); 845 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 846 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 847 if (iflladdr_tag == NULL) 848 return (ENOMEM); 849 VLAN_LOCKING_INIT(); 850 vlan_input_p = vlan_input; 851 vlan_link_state_p = vlan_link_state; 852 vlan_trunk_cap_p = vlan_trunk_capabilities; 853 vlan_trunkdev_p = vlan_trunkdev; 854 vlan_cookie_p = vlan_cookie; 855 vlan_setcookie_p = vlan_setcookie; 856 vlan_tag_p = vlan_tag; 857 vlan_pcp_p = vlan_pcp; 858 vlan_devat_p = vlan_devat; 859 #ifndef VIMAGE 860 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 861 vlan_clone_create, vlan_clone_destroy); 862 #endif 863 if (bootverbose) 864 printf("vlan: initialized, using " 865 #ifdef VLAN_ARRAY 866 "full-size arrays" 867 #else 868 "hash tables with chaining" 869 #endif 870 871 "\n"); 872 break; 873 case MOD_UNLOAD: 874 #ifndef VIMAGE 875 if_clone_detach(vlan_cloner); 876 #endif 877 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 878 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 879 vlan_input_p = NULL; 880 vlan_link_state_p = NULL; 881 vlan_trunk_cap_p = NULL; 882 vlan_trunkdev_p = NULL; 883 vlan_tag_p = NULL; 884 vlan_cookie_p = NULL; 885 vlan_setcookie_p = NULL; 886 vlan_devat_p = NULL; 887 VLAN_LOCKING_DESTROY(); 888 if (bootverbose) 889 printf("vlan: unloaded\n"); 890 break; 891 default: 892 return (EOPNOTSUPP); 893 } 894 return (0); 895 } 896 897 static moduledata_t vlan_mod = { 898 "if_vlan", 899 vlan_modevent, 900 0 901 }; 902 903 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 904 MODULE_VERSION(if_vlan, 3); 905 906 #ifdef VIMAGE 907 static void 908 vnet_vlan_init(const void *unused __unused) 909 { 910 911 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 912 vlan_clone_create, vlan_clone_destroy); 913 V_vlan_cloner = vlan_cloner; 914 } 915 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 916 vnet_vlan_init, NULL); 917 918 static void 919 vnet_vlan_uninit(const void *unused __unused) 920 { 921 922 if_clone_detach(V_vlan_cloner); 923 } 924 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 925 vnet_vlan_uninit, NULL); 926 #endif 927 928 /* 929 * Check for <etherif>.<vlan> style interface names. 930 */ 931 static struct ifnet * 932 vlan_clone_match_ethervid(const char *name, int *vidp) 933 { 934 char ifname[IFNAMSIZ]; 935 char *cp; 936 struct ifnet *ifp; 937 int vid; 938 939 strlcpy(ifname, name, IFNAMSIZ); 940 if ((cp = strchr(ifname, '.')) == NULL) 941 return (NULL); 942 *cp = '\0'; 943 if ((ifp = ifunit_ref(ifname)) == NULL) 944 return (NULL); 945 /* Parse VID. */ 946 if (*++cp == '\0') { 947 if_rele(ifp); 948 return (NULL); 949 } 950 vid = 0; 951 for(; *cp >= '0' && *cp <= '9'; cp++) 952 vid = (vid * 10) + (*cp - '0'); 953 if (*cp != '\0') { 954 if_rele(ifp); 955 return (NULL); 956 } 957 if (vidp != NULL) 958 *vidp = vid; 959 960 return (ifp); 961 } 962 963 static int 964 vlan_clone_match(struct if_clone *ifc, const char *name) 965 { 966 const char *cp; 967 968 if (vlan_clone_match_ethervid(name, NULL) != NULL) 969 return (1); 970 971 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 972 return (0); 973 for (cp = name + 4; *cp != '\0'; cp++) { 974 if (*cp < '0' || *cp > '9') 975 return (0); 976 } 977 978 return (1); 979 } 980 981 static int 982 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 983 { 984 char *dp; 985 int wildcard; 986 int unit; 987 int error; 988 int vid; 989 struct ifvlan *ifv; 990 struct ifnet *ifp; 991 struct ifnet *p; 992 struct ifaddr *ifa; 993 struct sockaddr_dl *sdl; 994 struct vlanreq vlr; 995 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 996 997 /* 998 * There are 3 (ugh) ways to specify the cloned device: 999 * o pass a parameter block with the clone request. 1000 * o specify parameters in the text of the clone device name 1001 * o specify no parameters and get an unattached device that 1002 * must be configured separately. 1003 * The first technique is preferred; the latter two are 1004 * supported for backwards compatibility. 1005 * 1006 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1007 * called for. 1008 */ 1009 if (params) { 1010 error = copyin(params, &vlr, sizeof(vlr)); 1011 if (error) 1012 return error; 1013 p = ifunit_ref(vlr.vlr_parent); 1014 if (p == NULL) 1015 return (ENXIO); 1016 error = ifc_name2unit(name, &unit); 1017 if (error != 0) { 1018 if_rele(p); 1019 return (error); 1020 } 1021 vid = vlr.vlr_tag; 1022 wildcard = (unit < 0); 1023 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1024 unit = -1; 1025 wildcard = 0; 1026 } else { 1027 p = NULL; 1028 error = ifc_name2unit(name, &unit); 1029 if (error != 0) 1030 return (error); 1031 1032 wildcard = (unit < 0); 1033 } 1034 1035 error = ifc_alloc_unit(ifc, &unit); 1036 if (error != 0) { 1037 if (p != NULL) 1038 if_rele(p); 1039 return (error); 1040 } 1041 1042 /* In the wildcard case, we need to update the name. */ 1043 if (wildcard) { 1044 for (dp = name; *dp != '\0'; dp++); 1045 if (snprintf(dp, len - (dp-name), "%d", unit) > 1046 len - (dp-name) - 1) { 1047 panic("%s: interface name too long", __func__); 1048 } 1049 } 1050 1051 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1052 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1053 if (ifp == NULL) { 1054 ifc_free_unit(ifc, unit); 1055 free(ifv, M_VLAN); 1056 if (p != NULL) 1057 if_rele(p); 1058 return (ENOSPC); 1059 } 1060 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1061 ifp->if_softc = ifv; 1062 /* 1063 * Set the name manually rather than using if_initname because 1064 * we don't conform to the default naming convention for interfaces. 1065 */ 1066 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1067 ifp->if_dname = vlanname; 1068 ifp->if_dunit = unit; 1069 1070 ifp->if_init = vlan_init; 1071 ifp->if_transmit = vlan_transmit; 1072 ifp->if_qflush = vlan_qflush; 1073 ifp->if_ioctl = vlan_ioctl; 1074 #if defined(KERN_TLS) || defined(RATELIMIT) 1075 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1076 ifp->if_snd_tag_modify = vlan_snd_tag_modify; 1077 ifp->if_snd_tag_query = vlan_snd_tag_query; 1078 ifp->if_snd_tag_free = vlan_snd_tag_free; 1079 #endif 1080 ifp->if_flags = VLAN_IFFLAGS; 1081 ether_ifattach(ifp, eaddr); 1082 /* Now undo some of the damage... */ 1083 ifp->if_baudrate = 0; 1084 ifp->if_type = IFT_L2VLAN; 1085 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1086 ifa = ifp->if_addr; 1087 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1088 sdl->sdl_type = IFT_L2VLAN; 1089 1090 if (p != NULL) { 1091 error = vlan_config(ifv, p, vid); 1092 if_rele(p); 1093 if (error != 0) { 1094 /* 1095 * Since we've partially failed, we need to back 1096 * out all the way, otherwise userland could get 1097 * confused. Thus, we destroy the interface. 1098 */ 1099 ether_ifdetach(ifp); 1100 vlan_unconfig(ifp); 1101 if_free(ifp); 1102 ifc_free_unit(ifc, unit); 1103 free(ifv, M_VLAN); 1104 1105 return (error); 1106 } 1107 } 1108 1109 return (0); 1110 } 1111 1112 static int 1113 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1114 { 1115 struct ifvlan *ifv = ifp->if_softc; 1116 int unit = ifp->if_dunit; 1117 1118 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1119 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1120 /* 1121 * We should have the only reference to the ifv now, so we can now 1122 * drain any remaining lladdr task before freeing the ifnet and the 1123 * ifvlan. 1124 */ 1125 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1126 NET_EPOCH_WAIT(); 1127 if_free(ifp); 1128 free(ifv, M_VLAN); 1129 ifc_free_unit(ifc, unit); 1130 1131 return (0); 1132 } 1133 1134 /* 1135 * The ifp->if_init entry point for vlan(4) is a no-op. 1136 */ 1137 static void 1138 vlan_init(void *foo __unused) 1139 { 1140 } 1141 1142 /* 1143 * The if_transmit method for vlan(4) interface. 1144 */ 1145 static int 1146 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1147 { 1148 struct ifvlan *ifv; 1149 struct ifnet *p; 1150 int error, len, mcast; 1151 1152 NET_EPOCH_ASSERT(); 1153 1154 ifv = ifp->if_softc; 1155 if (TRUNK(ifv) == NULL) { 1156 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1157 m_freem(m); 1158 return (ENETDOWN); 1159 } 1160 p = PARENT(ifv); 1161 len = m->m_pkthdr.len; 1162 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1163 1164 BPF_MTAP(ifp, m); 1165 1166 #if defined(KERN_TLS) || defined(RATELIMIT) 1167 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1168 struct vlan_snd_tag *vst; 1169 struct m_snd_tag *mst; 1170 1171 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 1172 mst = m->m_pkthdr.snd_tag; 1173 vst = mst_to_vst(mst); 1174 if (vst->tag->ifp != p) { 1175 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1176 m_freem(m); 1177 return (EAGAIN); 1178 } 1179 1180 m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag); 1181 m_snd_tag_rele(mst); 1182 } 1183 #endif 1184 1185 /* 1186 * Do not run parent's if_transmit() if the parent is not up, 1187 * or parent's driver will cause a system crash. 1188 */ 1189 if (!UP_AND_RUNNING(p)) { 1190 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1191 m_freem(m); 1192 return (ENETDOWN); 1193 } 1194 1195 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1196 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1197 return (0); 1198 } 1199 1200 /* 1201 * Send it, precisely as ether_output() would have. 1202 */ 1203 error = (p->if_transmit)(p, m); 1204 if (error == 0) { 1205 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1206 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1207 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1208 } else 1209 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1210 return (error); 1211 } 1212 1213 static int 1214 vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1215 struct route *ro) 1216 { 1217 struct ifvlan *ifv; 1218 struct ifnet *p; 1219 1220 NET_EPOCH_ASSERT(); 1221 1222 ifv = ifp->if_softc; 1223 if (TRUNK(ifv) == NULL) { 1224 m_freem(m); 1225 return (ENETDOWN); 1226 } 1227 p = PARENT(ifv); 1228 return p->if_output(ifp, m, dst, ro); 1229 } 1230 1231 1232 /* 1233 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1234 */ 1235 static void 1236 vlan_qflush(struct ifnet *ifp __unused) 1237 { 1238 } 1239 1240 static void 1241 vlan_input(struct ifnet *ifp, struct mbuf *m) 1242 { 1243 struct ifvlantrunk *trunk; 1244 struct ifvlan *ifv; 1245 struct m_tag *mtag; 1246 uint16_t vid, tag; 1247 1248 NET_EPOCH_ASSERT(); 1249 1250 trunk = ifp->if_vlantrunk; 1251 if (trunk == NULL) { 1252 m_freem(m); 1253 return; 1254 } 1255 1256 if (m->m_flags & M_VLANTAG) { 1257 /* 1258 * Packet is tagged, but m contains a normal 1259 * Ethernet frame; the tag is stored out-of-band. 1260 */ 1261 tag = m->m_pkthdr.ether_vtag; 1262 m->m_flags &= ~M_VLANTAG; 1263 } else { 1264 struct ether_vlan_header *evl; 1265 1266 /* 1267 * Packet is tagged in-band as specified by 802.1q. 1268 */ 1269 switch (ifp->if_type) { 1270 case IFT_ETHER: 1271 if (m->m_len < sizeof(*evl) && 1272 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1273 if_printf(ifp, "cannot pullup VLAN header\n"); 1274 return; 1275 } 1276 evl = mtod(m, struct ether_vlan_header *); 1277 tag = ntohs(evl->evl_tag); 1278 1279 /* 1280 * Remove the 802.1q header by copying the Ethernet 1281 * addresses over it and adjusting the beginning of 1282 * the data in the mbuf. The encapsulated Ethernet 1283 * type field is already in place. 1284 */ 1285 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1286 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1287 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1288 break; 1289 1290 default: 1291 #ifdef INVARIANTS 1292 panic("%s: %s has unsupported if_type %u", 1293 __func__, ifp->if_xname, ifp->if_type); 1294 #endif 1295 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1296 m_freem(m); 1297 return; 1298 } 1299 } 1300 1301 vid = EVL_VLANOFTAG(tag); 1302 1303 ifv = vlan_gethash(trunk, vid); 1304 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1305 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1306 m_freem(m); 1307 return; 1308 } 1309 1310 if (vlan_mtag_pcp) { 1311 /* 1312 * While uncommon, it is possible that we will find a 802.1q 1313 * packet encapsulated inside another packet that also had an 1314 * 802.1q header. For example, ethernet tunneled over IPSEC 1315 * arriving over ethernet. In that case, we replace the 1316 * existing 802.1q PCP m_tag value. 1317 */ 1318 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1319 if (mtag == NULL) { 1320 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1321 sizeof(uint8_t), M_NOWAIT); 1322 if (mtag == NULL) { 1323 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1324 m_freem(m); 1325 return; 1326 } 1327 m_tag_prepend(m, mtag); 1328 } 1329 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1330 } 1331 1332 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1333 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1334 1335 /* Pass it back through the parent's input routine. */ 1336 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1337 } 1338 1339 static void 1340 vlan_lladdr_fn(void *arg, int pending __unused) 1341 { 1342 struct ifvlan *ifv; 1343 struct ifnet *ifp; 1344 1345 ifv = (struct ifvlan *)arg; 1346 ifp = ifv->ifv_ifp; 1347 1348 CURVNET_SET(ifp->if_vnet); 1349 1350 /* The ifv_ifp already has the lladdr copied in. */ 1351 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1352 1353 CURVNET_RESTORE(); 1354 } 1355 1356 static int 1357 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1358 { 1359 struct epoch_tracker et; 1360 struct ifvlantrunk *trunk; 1361 struct ifnet *ifp; 1362 int error = 0; 1363 1364 /* 1365 * We can handle non-ethernet hardware types as long as 1366 * they handle the tagging and headers themselves. 1367 */ 1368 if (p->if_type != IFT_ETHER && 1369 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1370 return (EPROTONOSUPPORT); 1371 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1372 return (EPROTONOSUPPORT); 1373 /* 1374 * Don't let the caller set up a VLAN VID with 1375 * anything except VLID bits. 1376 * VID numbers 0x0 and 0xFFF are reserved. 1377 */ 1378 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1379 return (EINVAL); 1380 if (ifv->ifv_trunk) 1381 return (EBUSY); 1382 1383 VLAN_XLOCK(); 1384 if (p->if_vlantrunk == NULL) { 1385 trunk = malloc(sizeof(struct ifvlantrunk), 1386 M_VLAN, M_WAITOK | M_ZERO); 1387 vlan_inithash(trunk); 1388 TRUNK_LOCK_INIT(trunk); 1389 TRUNK_WLOCK(trunk); 1390 p->if_vlantrunk = trunk; 1391 trunk->parent = p; 1392 if_ref(trunk->parent); 1393 TRUNK_WUNLOCK(trunk); 1394 } else { 1395 trunk = p->if_vlantrunk; 1396 } 1397 1398 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1399 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1400 vlan_tag_recalculate(ifv); 1401 error = vlan_inshash(trunk, ifv); 1402 if (error) 1403 goto done; 1404 ifv->ifv_proto = ETHERTYPE_VLAN; 1405 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1406 ifv->ifv_mintu = ETHERMIN; 1407 ifv->ifv_pflags = 0; 1408 ifv->ifv_capenable = -1; 1409 1410 /* 1411 * If the parent supports the VLAN_MTU capability, 1412 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1413 * use it. 1414 */ 1415 if (p->if_capenable & IFCAP_VLAN_MTU) { 1416 /* 1417 * No need to fudge the MTU since the parent can 1418 * handle extended frames. 1419 */ 1420 ifv->ifv_mtufudge = 0; 1421 } else { 1422 /* 1423 * Fudge the MTU by the encapsulation size. This 1424 * makes us incompatible with strictly compliant 1425 * 802.1Q implementations, but allows us to use 1426 * the feature with other NetBSD implementations, 1427 * which might still be useful. 1428 */ 1429 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1430 } 1431 1432 ifv->ifv_trunk = trunk; 1433 ifp = ifv->ifv_ifp; 1434 /* 1435 * Initialize fields from our parent. This duplicates some 1436 * work with ether_ifattach() but allows for non-ethernet 1437 * interfaces to also work. 1438 */ 1439 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1440 ifp->if_baudrate = p->if_baudrate; 1441 ifp->if_input = p->if_input; 1442 ifp->if_resolvemulti = p->if_resolvemulti; 1443 ifp->if_addrlen = p->if_addrlen; 1444 ifp->if_broadcastaddr = p->if_broadcastaddr; 1445 ifp->if_pcp = ifv->ifv_pcp; 1446 1447 /* 1448 * We wrap the parent's if_output using vlan_output to ensure that it 1449 * can't become stale. 1450 */ 1451 ifp->if_output = vlan_output; 1452 1453 /* 1454 * Copy only a selected subset of flags from the parent. 1455 * Other flags are none of our business. 1456 */ 1457 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1458 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1459 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1460 #undef VLAN_COPY_FLAGS 1461 1462 ifp->if_link_state = p->if_link_state; 1463 1464 NET_EPOCH_ENTER(et); 1465 vlan_capabilities(ifv); 1466 NET_EPOCH_EXIT(et); 1467 1468 /* 1469 * Set up our interface address to reflect the underlying 1470 * physical interface's. 1471 */ 1472 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1473 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1474 p->if_addrlen; 1475 1476 /* 1477 * Do not schedule link address update if it was the same 1478 * as previous parent's. This helps avoid updating for each 1479 * associated llentry. 1480 */ 1481 if (memcmp(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen) != 0) { 1482 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1483 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 1484 } 1485 1486 /* We are ready for operation now. */ 1487 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1488 1489 /* Update flags on the parent, if necessary. */ 1490 vlan_setflags(ifp, 1); 1491 1492 /* 1493 * Configure multicast addresses that may already be 1494 * joined on the vlan device. 1495 */ 1496 (void)vlan_setmulti(ifp); 1497 1498 done: 1499 if (error == 0) 1500 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1501 VLAN_XUNLOCK(); 1502 1503 return (error); 1504 } 1505 1506 static void 1507 vlan_unconfig(struct ifnet *ifp) 1508 { 1509 1510 VLAN_XLOCK(); 1511 vlan_unconfig_locked(ifp, 0); 1512 VLAN_XUNLOCK(); 1513 } 1514 1515 static void 1516 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1517 { 1518 struct ifvlantrunk *trunk; 1519 struct vlan_mc_entry *mc; 1520 struct ifvlan *ifv; 1521 struct ifnet *parent; 1522 int error; 1523 1524 VLAN_XLOCK_ASSERT(); 1525 1526 ifv = ifp->if_softc; 1527 trunk = ifv->ifv_trunk; 1528 parent = NULL; 1529 1530 if (trunk != NULL) { 1531 parent = trunk->parent; 1532 1533 /* 1534 * Since the interface is being unconfigured, we need to 1535 * empty the list of multicast groups that we may have joined 1536 * while we were alive from the parent's list. 1537 */ 1538 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1539 /* 1540 * If the parent interface is being detached, 1541 * all its multicast addresses have already 1542 * been removed. Warn about errors if 1543 * if_delmulti() does fail, but don't abort as 1544 * all callers expect vlan destruction to 1545 * succeed. 1546 */ 1547 if (!departing) { 1548 error = if_delmulti(parent, 1549 (struct sockaddr *)&mc->mc_addr); 1550 if (error) 1551 if_printf(ifp, 1552 "Failed to delete multicast address from parent: %d\n", 1553 error); 1554 } 1555 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1556 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 1557 } 1558 1559 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1560 1561 vlan_remhash(trunk, ifv); 1562 ifv->ifv_trunk = NULL; 1563 1564 /* 1565 * Check if we were the last. 1566 */ 1567 if (trunk->refcnt == 0) { 1568 parent->if_vlantrunk = NULL; 1569 NET_EPOCH_WAIT(); 1570 trunk_destroy(trunk); 1571 } 1572 } 1573 1574 /* Disconnect from parent. */ 1575 if (ifv->ifv_pflags) 1576 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1577 ifp->if_mtu = ETHERMTU; 1578 ifp->if_link_state = LINK_STATE_UNKNOWN; 1579 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1580 1581 /* 1582 * Only dispatch an event if vlan was 1583 * attached, otherwise there is nothing 1584 * to cleanup anyway. 1585 */ 1586 if (parent != NULL) 1587 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1588 } 1589 1590 /* Handle a reference counted flag that should be set on the parent as well */ 1591 static int 1592 vlan_setflag(struct ifnet *ifp, int flag, int status, 1593 int (*func)(struct ifnet *, int)) 1594 { 1595 struct ifvlan *ifv; 1596 int error; 1597 1598 VLAN_SXLOCK_ASSERT(); 1599 1600 ifv = ifp->if_softc; 1601 status = status ? (ifp->if_flags & flag) : 0; 1602 /* Now "status" contains the flag value or 0 */ 1603 1604 /* 1605 * See if recorded parent's status is different from what 1606 * we want it to be. If it is, flip it. We record parent's 1607 * status in ifv_pflags so that we won't clear parent's flag 1608 * we haven't set. In fact, we don't clear or set parent's 1609 * flags directly, but get or release references to them. 1610 * That's why we can be sure that recorded flags still are 1611 * in accord with actual parent's flags. 1612 */ 1613 if (status != (ifv->ifv_pflags & flag)) { 1614 error = (*func)(PARENT(ifv), status); 1615 if (error) 1616 return (error); 1617 ifv->ifv_pflags &= ~flag; 1618 ifv->ifv_pflags |= status; 1619 } 1620 return (0); 1621 } 1622 1623 /* 1624 * Handle IFF_* flags that require certain changes on the parent: 1625 * if "status" is true, update parent's flags respective to our if_flags; 1626 * if "status" is false, forcedly clear the flags set on parent. 1627 */ 1628 static int 1629 vlan_setflags(struct ifnet *ifp, int status) 1630 { 1631 int error, i; 1632 1633 for (i = 0; vlan_pflags[i].flag; i++) { 1634 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1635 status, vlan_pflags[i].func); 1636 if (error) 1637 return (error); 1638 } 1639 return (0); 1640 } 1641 1642 /* Inform all vlans that their parent has changed link state */ 1643 static void 1644 vlan_link_state(struct ifnet *ifp) 1645 { 1646 struct epoch_tracker et; 1647 struct ifvlantrunk *trunk; 1648 struct ifvlan *ifv; 1649 1650 NET_EPOCH_ENTER(et); 1651 trunk = ifp->if_vlantrunk; 1652 if (trunk == NULL) { 1653 NET_EPOCH_EXIT(et); 1654 return; 1655 } 1656 1657 TRUNK_WLOCK(trunk); 1658 VLAN_FOREACH(ifv, trunk) { 1659 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1660 if_link_state_change(ifv->ifv_ifp, 1661 trunk->parent->if_link_state); 1662 } 1663 TRUNK_WUNLOCK(trunk); 1664 NET_EPOCH_EXIT(et); 1665 } 1666 1667 static void 1668 vlan_capabilities(struct ifvlan *ifv) 1669 { 1670 struct ifnet *p; 1671 struct ifnet *ifp; 1672 struct ifnet_hw_tsomax hw_tsomax; 1673 int cap = 0, ena = 0, mena; 1674 u_long hwa = 0; 1675 1676 NET_EPOCH_ASSERT(); 1677 VLAN_SXLOCK_ASSERT(); 1678 1679 p = PARENT(ifv); 1680 ifp = ifv->ifv_ifp; 1681 1682 /* Mask parent interface enabled capabilities disabled by user. */ 1683 mena = p->if_capenable & ifv->ifv_capenable; 1684 1685 /* 1686 * If the parent interface can do checksum offloading 1687 * on VLANs, then propagate its hardware-assisted 1688 * checksumming flags. Also assert that checksum 1689 * offloading requires hardware VLAN tagging. 1690 */ 1691 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1692 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1693 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1694 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1695 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1696 if (ena & IFCAP_TXCSUM) 1697 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1698 CSUM_UDP | CSUM_SCTP); 1699 if (ena & IFCAP_TXCSUM_IPV6) 1700 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1701 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1702 } 1703 1704 /* 1705 * If the parent interface can do TSO on VLANs then 1706 * propagate the hardware-assisted flag. TSO on VLANs 1707 * does not necessarily require hardware VLAN tagging. 1708 */ 1709 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1710 if_hw_tsomax_common(p, &hw_tsomax); 1711 if_hw_tsomax_update(ifp, &hw_tsomax); 1712 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1713 cap |= p->if_capabilities & IFCAP_TSO; 1714 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1715 ena |= mena & IFCAP_TSO; 1716 if (ena & IFCAP_TSO) 1717 hwa |= p->if_hwassist & CSUM_TSO; 1718 } 1719 1720 /* 1721 * If the parent interface can do LRO and checksum offloading on 1722 * VLANs, then guess it may do LRO on VLANs. False positive here 1723 * cost nothing, while false negative may lead to some confusions. 1724 */ 1725 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1726 cap |= p->if_capabilities & IFCAP_LRO; 1727 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1728 ena |= p->if_capenable & IFCAP_LRO; 1729 1730 /* 1731 * If the parent interface can offload TCP connections over VLANs then 1732 * propagate its TOE capability to the VLAN interface. 1733 * 1734 * All TOE drivers in the tree today can deal with VLANs. If this 1735 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1736 * with its own bit. 1737 */ 1738 #define IFCAP_VLAN_TOE IFCAP_TOE 1739 if (p->if_capabilities & IFCAP_VLAN_TOE) 1740 cap |= p->if_capabilities & IFCAP_TOE; 1741 if (p->if_capenable & IFCAP_VLAN_TOE) { 1742 TOEDEV(ifp) = TOEDEV(p); 1743 ena |= mena & IFCAP_TOE; 1744 } 1745 1746 /* 1747 * If the parent interface supports dynamic link state, so does the 1748 * VLAN interface. 1749 */ 1750 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1751 ena |= (mena & IFCAP_LINKSTATE); 1752 1753 #ifdef RATELIMIT 1754 /* 1755 * If the parent interface supports ratelimiting, so does the 1756 * VLAN interface. 1757 */ 1758 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1759 ena |= (mena & IFCAP_TXRTLMT); 1760 #endif 1761 1762 /* 1763 * If the parent interface supports unmapped mbufs, so does 1764 * the VLAN interface. Note that this should be fine even for 1765 * interfaces that don't support hardware tagging as headers 1766 * are prepended in normal mbufs to unmapped mbufs holding 1767 * payload data. 1768 */ 1769 cap |= (p->if_capabilities & IFCAP_NOMAP); 1770 ena |= (mena & IFCAP_NOMAP); 1771 1772 /* 1773 * If the parent interface can offload encryption and segmentation 1774 * of TLS records over TCP, propagate it's capability to the VLAN 1775 * interface. 1776 * 1777 * All TLS drivers in the tree today can deal with VLANs. If 1778 * this ever changes, then a new IFCAP_VLAN_TXTLS can be 1779 * defined. 1780 */ 1781 if (p->if_capabilities & IFCAP_TXTLS) 1782 cap |= p->if_capabilities & IFCAP_TXTLS; 1783 if (p->if_capenable & IFCAP_TXTLS) 1784 ena |= mena & IFCAP_TXTLS; 1785 1786 ifp->if_capabilities = cap; 1787 ifp->if_capenable = ena; 1788 ifp->if_hwassist = hwa; 1789 } 1790 1791 static void 1792 vlan_trunk_capabilities(struct ifnet *ifp) 1793 { 1794 struct epoch_tracker et; 1795 struct ifvlantrunk *trunk; 1796 struct ifvlan *ifv; 1797 1798 VLAN_SLOCK(); 1799 trunk = ifp->if_vlantrunk; 1800 if (trunk == NULL) { 1801 VLAN_SUNLOCK(); 1802 return; 1803 } 1804 NET_EPOCH_ENTER(et); 1805 VLAN_FOREACH(ifv, trunk) 1806 vlan_capabilities(ifv); 1807 NET_EPOCH_EXIT(et); 1808 VLAN_SUNLOCK(); 1809 } 1810 1811 static int 1812 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1813 { 1814 struct ifnet *p; 1815 struct ifreq *ifr; 1816 struct ifaddr *ifa; 1817 struct ifvlan *ifv; 1818 struct ifvlantrunk *trunk; 1819 struct vlanreq vlr; 1820 int error = 0, oldmtu; 1821 1822 ifr = (struct ifreq *)data; 1823 ifa = (struct ifaddr *) data; 1824 ifv = ifp->if_softc; 1825 1826 switch (cmd) { 1827 case SIOCSIFADDR: 1828 ifp->if_flags |= IFF_UP; 1829 #ifdef INET 1830 if (ifa->ifa_addr->sa_family == AF_INET) 1831 arp_ifinit(ifp, ifa); 1832 #endif 1833 break; 1834 case SIOCGIFADDR: 1835 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1836 ifp->if_addrlen); 1837 break; 1838 case SIOCGIFMEDIA: 1839 VLAN_SLOCK(); 1840 if (TRUNK(ifv) != NULL) { 1841 p = PARENT(ifv); 1842 if_ref(p); 1843 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1844 if_rele(p); 1845 /* Limit the result to the parent's current config. */ 1846 if (error == 0) { 1847 struct ifmediareq *ifmr; 1848 1849 ifmr = (struct ifmediareq *)data; 1850 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1851 ifmr->ifm_count = 1; 1852 error = copyout(&ifmr->ifm_current, 1853 ifmr->ifm_ulist, 1854 sizeof(int)); 1855 } 1856 } 1857 } else { 1858 error = EINVAL; 1859 } 1860 VLAN_SUNLOCK(); 1861 break; 1862 1863 case SIOCSIFMEDIA: 1864 error = EINVAL; 1865 break; 1866 1867 case SIOCSIFMTU: 1868 /* 1869 * Set the interface MTU. 1870 */ 1871 VLAN_SLOCK(); 1872 trunk = TRUNK(ifv); 1873 if (trunk != NULL) { 1874 TRUNK_WLOCK(trunk); 1875 if (ifr->ifr_mtu > 1876 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1877 ifr->ifr_mtu < 1878 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1879 error = EINVAL; 1880 else 1881 ifp->if_mtu = ifr->ifr_mtu; 1882 TRUNK_WUNLOCK(trunk); 1883 } else 1884 error = EINVAL; 1885 VLAN_SUNLOCK(); 1886 break; 1887 1888 case SIOCSETVLAN: 1889 #ifdef VIMAGE 1890 /* 1891 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1892 * interface to be delegated to a jail without allowing the 1893 * jail to change what underlying interface/VID it is 1894 * associated with. We are not entirely convinced that this 1895 * is the right way to accomplish that policy goal. 1896 */ 1897 if (ifp->if_vnet != ifp->if_home_vnet) { 1898 error = EPERM; 1899 break; 1900 } 1901 #endif 1902 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1903 if (error) 1904 break; 1905 if (vlr.vlr_parent[0] == '\0') { 1906 vlan_unconfig(ifp); 1907 break; 1908 } 1909 p = ifunit_ref(vlr.vlr_parent); 1910 if (p == NULL) { 1911 error = ENOENT; 1912 break; 1913 } 1914 oldmtu = ifp->if_mtu; 1915 error = vlan_config(ifv, p, vlr.vlr_tag); 1916 if_rele(p); 1917 1918 /* 1919 * VLAN MTU may change during addition of the vlandev. 1920 * If it did, do network layer specific procedure. 1921 */ 1922 if (ifp->if_mtu != oldmtu) { 1923 #ifdef INET6 1924 nd6_setmtu(ifp); 1925 #endif 1926 rt_updatemtu(ifp); 1927 } 1928 break; 1929 1930 case SIOCGETVLAN: 1931 #ifdef VIMAGE 1932 if (ifp->if_vnet != ifp->if_home_vnet) { 1933 error = EPERM; 1934 break; 1935 } 1936 #endif 1937 bzero(&vlr, sizeof(vlr)); 1938 VLAN_SLOCK(); 1939 if (TRUNK(ifv) != NULL) { 1940 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1941 sizeof(vlr.vlr_parent)); 1942 vlr.vlr_tag = ifv->ifv_vid; 1943 } 1944 VLAN_SUNLOCK(); 1945 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1946 break; 1947 1948 case SIOCSIFFLAGS: 1949 /* 1950 * We should propagate selected flags to the parent, 1951 * e.g., promiscuous mode. 1952 */ 1953 VLAN_XLOCK(); 1954 if (TRUNK(ifv) != NULL) 1955 error = vlan_setflags(ifp, 1); 1956 VLAN_XUNLOCK(); 1957 break; 1958 1959 case SIOCADDMULTI: 1960 case SIOCDELMULTI: 1961 /* 1962 * If we don't have a parent, just remember the membership for 1963 * when we do. 1964 * 1965 * XXX We need the rmlock here to avoid sleeping while 1966 * holding in6_multi_mtx. 1967 */ 1968 VLAN_XLOCK(); 1969 trunk = TRUNK(ifv); 1970 if (trunk != NULL) 1971 error = vlan_setmulti(ifp); 1972 VLAN_XUNLOCK(); 1973 1974 break; 1975 case SIOCGVLANPCP: 1976 #ifdef VIMAGE 1977 if (ifp->if_vnet != ifp->if_home_vnet) { 1978 error = EPERM; 1979 break; 1980 } 1981 #endif 1982 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1983 break; 1984 1985 case SIOCSVLANPCP: 1986 #ifdef VIMAGE 1987 if (ifp->if_vnet != ifp->if_home_vnet) { 1988 error = EPERM; 1989 break; 1990 } 1991 #endif 1992 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1993 if (error) 1994 break; 1995 if (ifr->ifr_vlan_pcp > 7) { 1996 error = EINVAL; 1997 break; 1998 } 1999 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 2000 ifp->if_pcp = ifv->ifv_pcp; 2001 vlan_tag_recalculate(ifv); 2002 /* broadcast event about PCP change */ 2003 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 2004 break; 2005 2006 case SIOCSIFCAP: 2007 VLAN_SLOCK(); 2008 ifv->ifv_capenable = ifr->ifr_reqcap; 2009 trunk = TRUNK(ifv); 2010 if (trunk != NULL) { 2011 struct epoch_tracker et; 2012 2013 NET_EPOCH_ENTER(et); 2014 vlan_capabilities(ifv); 2015 NET_EPOCH_EXIT(et); 2016 } 2017 VLAN_SUNLOCK(); 2018 break; 2019 2020 default: 2021 error = EINVAL; 2022 break; 2023 } 2024 2025 return (error); 2026 } 2027 2028 #if defined(KERN_TLS) || defined(RATELIMIT) 2029 static int 2030 vlan_snd_tag_alloc(struct ifnet *ifp, 2031 union if_snd_tag_alloc_params *params, 2032 struct m_snd_tag **ppmt) 2033 { 2034 struct epoch_tracker et; 2035 struct vlan_snd_tag *vst; 2036 struct ifvlan *ifv; 2037 struct ifnet *parent; 2038 int error; 2039 2040 NET_EPOCH_ENTER(et); 2041 ifv = ifp->if_softc; 2042 if (ifv->ifv_trunk != NULL) 2043 parent = PARENT(ifv); 2044 else 2045 parent = NULL; 2046 if (parent == NULL || parent->if_snd_tag_alloc == NULL) { 2047 NET_EPOCH_EXIT(et); 2048 return (EOPNOTSUPP); 2049 } 2050 if_ref(parent); 2051 NET_EPOCH_EXIT(et); 2052 2053 vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT); 2054 if (vst == NULL) { 2055 if_rele(parent); 2056 return (ENOMEM); 2057 } 2058 2059 error = parent->if_snd_tag_alloc(parent, params, &vst->tag); 2060 if_rele(parent); 2061 if (error) { 2062 free(vst, M_VLAN); 2063 return (error); 2064 } 2065 2066 m_snd_tag_init(&vst->com, ifp); 2067 2068 *ppmt = &vst->com; 2069 return (0); 2070 } 2071 2072 static int 2073 vlan_snd_tag_modify(struct m_snd_tag *mst, 2074 union if_snd_tag_modify_params *params) 2075 { 2076 struct vlan_snd_tag *vst; 2077 2078 vst = mst_to_vst(mst); 2079 return (vst->tag->ifp->if_snd_tag_modify(vst->tag, params)); 2080 } 2081 2082 static int 2083 vlan_snd_tag_query(struct m_snd_tag *mst, 2084 union if_snd_tag_query_params *params) 2085 { 2086 struct vlan_snd_tag *vst; 2087 2088 vst = mst_to_vst(mst); 2089 return (vst->tag->ifp->if_snd_tag_query(vst->tag, params)); 2090 } 2091 2092 static void 2093 vlan_snd_tag_free(struct m_snd_tag *mst) 2094 { 2095 struct vlan_snd_tag *vst; 2096 2097 vst = mst_to_vst(mst); 2098 m_snd_tag_rele(vst->tag); 2099 free(vst, M_VLAN); 2100 } 2101 #endif 2102