1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_kern_tls.h" 50 #include "opt_vlan.h" 51 #include "opt_ratelimit.h" 52 53 #include <sys/param.h> 54 #include <sys/eventhandler.h> 55 #include <sys/kernel.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mbuf.h> 59 #include <sys/module.h> 60 #include <sys/rmlock.h> 61 #include <sys/priv.h> 62 #include <sys/queue.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 #include <sys/sysctl.h> 66 #include <sys/systm.h> 67 #include <sys/sx.h> 68 #include <sys/taskqueue.h> 69 70 #include <net/bpf.h> 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/if_clone.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 #include <net/if_vlan_var.h> 78 #include <net/vnet.h> 79 80 #ifdef INET 81 #include <netinet/in.h> 82 #include <netinet/if_ether.h> 83 #endif 84 85 #define VLAN_DEF_HWIDTH 4 86 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 87 88 #define UP_AND_RUNNING(ifp) \ 89 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 90 91 CK_SLIST_HEAD(ifvlanhead, ifvlan); 92 93 struct ifvlantrunk { 94 struct ifnet *parent; /* parent interface of this trunk */ 95 struct mtx lock; 96 #ifdef VLAN_ARRAY 97 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 98 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 99 #else 100 struct ifvlanhead *hash; /* dynamic hash-list table */ 101 uint16_t hmask; 102 uint16_t hwidth; 103 #endif 104 int refcnt; 105 }; 106 107 #if defined(KERN_TLS) || defined(RATELIMIT) 108 struct vlan_snd_tag { 109 struct m_snd_tag com; 110 struct m_snd_tag *tag; 111 }; 112 113 static inline struct vlan_snd_tag * 114 mst_to_vst(struct m_snd_tag *mst) 115 { 116 117 return (__containerof(mst, struct vlan_snd_tag, com)); 118 } 119 #endif 120 121 /* 122 * This macro provides a facility to iterate over every vlan on a trunk with 123 * the assumption that none will be added/removed during iteration. 124 */ 125 #ifdef VLAN_ARRAY 126 #define VLAN_FOREACH(_ifv, _trunk) \ 127 size_t _i; \ 128 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 129 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 130 #else /* VLAN_ARRAY */ 131 #define VLAN_FOREACH(_ifv, _trunk) \ 132 struct ifvlan *_next; \ 133 size_t _i; \ 134 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 135 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 136 #endif /* VLAN_ARRAY */ 137 138 /* 139 * This macro provides a facility to iterate over every vlan on a trunk while 140 * also modifying the number of vlans on the trunk. The iteration continues 141 * until some condition is met or there are no more vlans on the trunk. 142 */ 143 #ifdef VLAN_ARRAY 144 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 145 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 146 size_t _i; \ 147 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 148 if (((_ifv) = (_trunk)->vlans[_i])) 149 #else /* VLAN_ARRAY */ 150 /* 151 * The hash table case is more complicated. We allow for the hash table to be 152 * modified (i.e. vlans removed) while we are iterating over it. To allow for 153 * this we must restart the iteration every time we "touch" something during 154 * the iteration, since removal will resize the hash table and invalidate our 155 * current position. If acting on the touched element causes the trunk to be 156 * emptied, then iteration also stops. 157 */ 158 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 159 size_t _i; \ 160 bool _touch = false; \ 161 for (_i = 0; \ 162 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 163 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 164 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 165 (_touch = true)) 166 #endif /* VLAN_ARRAY */ 167 168 struct vlan_mc_entry { 169 struct sockaddr_dl mc_addr; 170 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 171 struct epoch_context mc_epoch_ctx; 172 }; 173 174 struct ifvlan { 175 struct ifvlantrunk *ifv_trunk; 176 struct ifnet *ifv_ifp; 177 #define TRUNK(ifv) ((ifv)->ifv_trunk) 178 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 179 void *ifv_cookie; 180 int ifv_pflags; /* special flags we have set on parent */ 181 int ifv_capenable; 182 int ifv_encaplen; /* encapsulation length */ 183 int ifv_mtufudge; /* MTU fudged by this much */ 184 int ifv_mintu; /* min transmission unit */ 185 uint16_t ifv_proto; /* encapsulation ethertype */ 186 uint16_t ifv_tag; /* tag to apply on packets leaving if */ 187 uint16_t ifv_vid; /* VLAN ID */ 188 uint8_t ifv_pcp; /* Priority Code Point (PCP). */ 189 struct task lladdr_task; 190 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 191 #ifndef VLAN_ARRAY 192 CK_SLIST_ENTRY(ifvlan) ifv_list; 193 #endif 194 }; 195 196 /* Special flags we should propagate to parent. */ 197 static struct { 198 int flag; 199 int (*func)(struct ifnet *, int); 200 } vlan_pflags[] = { 201 {IFF_PROMISC, ifpromisc}, 202 {IFF_ALLMULTI, if_allmulti}, 203 {0, NULL} 204 }; 205 206 extern int vlan_mtag_pcp; 207 208 static const char vlanname[] = "vlan"; 209 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 210 211 static eventhandler_tag ifdetach_tag; 212 static eventhandler_tag iflladdr_tag; 213 214 /* 215 * if_vlan uses two module-level synchronizations primitives to allow concurrent 216 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 217 * while they are being used for tx/rx. To accomplish this in a way that has 218 * acceptable performance and cooperation with other parts of the network stack 219 * there is a non-sleepable epoch(9) and an sx(9). 220 * 221 * The performance-sensitive paths that warrant using the epoch(9) are 222 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 223 * existence using if_vlantrunk, and being in the network tx/rx paths the use 224 * of an epoch(9) gives a measureable improvement in performance. 225 * 226 * The reason for having an sx(9) is mostly because there are still areas that 227 * must be sleepable and also have safe concurrent access to a vlan interface. 228 * Since the sx(9) exists, it is used by default in most paths unless sleeping 229 * is not permitted, or if it is not clear whether sleeping is permitted. 230 * 231 */ 232 #define _VLAN_SX_ID ifv_sx 233 234 static struct sx _VLAN_SX_ID; 235 236 #define VLAN_LOCKING_INIT() \ 237 sx_init(&_VLAN_SX_ID, "vlan_sx") 238 239 #define VLAN_LOCKING_DESTROY() \ 240 sx_destroy(&_VLAN_SX_ID) 241 242 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 243 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 244 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 245 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 246 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 247 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 248 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 249 250 251 /* 252 * We also have a per-trunk mutex that should be acquired when changing 253 * its state. 254 */ 255 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 256 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 257 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 258 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 259 #define TRUNK_LOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(trunk)->lock)) 260 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 261 262 /* 263 * The VLAN_ARRAY substitutes the dynamic hash with a static array 264 * with 4096 entries. In theory this can give a boost in processing, 265 * however in practice it does not. Probably this is because the array 266 * is too big to fit into CPU cache. 267 */ 268 #ifndef VLAN_ARRAY 269 static void vlan_inithash(struct ifvlantrunk *trunk); 270 static void vlan_freehash(struct ifvlantrunk *trunk); 271 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 272 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 273 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 274 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 275 uint16_t vid); 276 #endif 277 static void trunk_destroy(struct ifvlantrunk *trunk); 278 279 static void vlan_init(void *foo); 280 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 281 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 282 #if defined(KERN_TLS) || defined(RATELIMIT) 283 static int vlan_snd_tag_alloc(struct ifnet *, 284 union if_snd_tag_alloc_params *, struct m_snd_tag **); 285 static int vlan_snd_tag_modify(struct m_snd_tag *, 286 union if_snd_tag_modify_params *); 287 static int vlan_snd_tag_query(struct m_snd_tag *, 288 union if_snd_tag_query_params *); 289 static void vlan_snd_tag_free(struct m_snd_tag *); 290 #endif 291 static void vlan_qflush(struct ifnet *ifp); 292 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 293 int (*func)(struct ifnet *, int)); 294 static int vlan_setflags(struct ifnet *ifp, int status); 295 static int vlan_setmulti(struct ifnet *ifp); 296 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 297 static int vlan_output(struct ifnet *ifp, struct mbuf *m, 298 const struct sockaddr *dst, struct route *ro); 299 static void vlan_unconfig(struct ifnet *ifp); 300 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 301 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 302 static void vlan_link_state(struct ifnet *ifp); 303 static void vlan_capabilities(struct ifvlan *ifv); 304 static void vlan_trunk_capabilities(struct ifnet *ifp); 305 306 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 307 static int vlan_clone_match(struct if_clone *, const char *); 308 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 309 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 310 311 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 312 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 313 314 static void vlan_lladdr_fn(void *arg, int pending); 315 316 static struct if_clone *vlan_cloner; 317 318 #ifdef VIMAGE 319 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 320 #define V_vlan_cloner VNET(vlan_cloner) 321 #endif 322 323 static void 324 vlan_mc_free(struct epoch_context *ctx) 325 { 326 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 327 free(mc, M_VLAN); 328 } 329 330 #ifndef VLAN_ARRAY 331 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 332 333 static void 334 vlan_inithash(struct ifvlantrunk *trunk) 335 { 336 int i, n; 337 338 /* 339 * The trunk must not be locked here since we call malloc(M_WAITOK). 340 * It is OK in case this function is called before the trunk struct 341 * gets hooked up and becomes visible from other threads. 342 */ 343 344 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 345 ("%s: hash already initialized", __func__)); 346 347 trunk->hwidth = VLAN_DEF_HWIDTH; 348 n = 1 << trunk->hwidth; 349 trunk->hmask = n - 1; 350 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 351 for (i = 0; i < n; i++) 352 CK_SLIST_INIT(&trunk->hash[i]); 353 } 354 355 static void 356 vlan_freehash(struct ifvlantrunk *trunk) 357 { 358 #ifdef INVARIANTS 359 int i; 360 361 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 362 for (i = 0; i < (1 << trunk->hwidth); i++) 363 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 364 ("%s: hash table not empty", __func__)); 365 #endif 366 free(trunk->hash, M_VLAN); 367 trunk->hash = NULL; 368 trunk->hwidth = trunk->hmask = 0; 369 } 370 371 static int 372 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 373 { 374 int i, b; 375 struct ifvlan *ifv2; 376 377 VLAN_XLOCK_ASSERT(); 378 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 379 380 b = 1 << trunk->hwidth; 381 i = HASH(ifv->ifv_vid, trunk->hmask); 382 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 383 if (ifv->ifv_vid == ifv2->ifv_vid) 384 return (EEXIST); 385 386 /* 387 * Grow the hash when the number of vlans exceeds half of the number of 388 * hash buckets squared. This will make the average linked-list length 389 * buckets/2. 390 */ 391 if (trunk->refcnt > (b * b) / 2) { 392 vlan_growhash(trunk, 1); 393 i = HASH(ifv->ifv_vid, trunk->hmask); 394 } 395 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 396 trunk->refcnt++; 397 398 return (0); 399 } 400 401 static int 402 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 403 { 404 int i, b; 405 struct ifvlan *ifv2; 406 407 VLAN_XLOCK_ASSERT(); 408 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 409 410 b = 1 << trunk->hwidth; 411 i = HASH(ifv->ifv_vid, trunk->hmask); 412 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 413 if (ifv2 == ifv) { 414 trunk->refcnt--; 415 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 416 if (trunk->refcnt < (b * b) / 2) 417 vlan_growhash(trunk, -1); 418 return (0); 419 } 420 421 panic("%s: vlan not found\n", __func__); 422 return (ENOENT); /*NOTREACHED*/ 423 } 424 425 /* 426 * Grow the hash larger or smaller if memory permits. 427 */ 428 static void 429 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 430 { 431 struct ifvlan *ifv; 432 struct ifvlanhead *hash2; 433 int hwidth2, i, j, n, n2; 434 435 VLAN_XLOCK_ASSERT(); 436 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 437 438 if (howmuch == 0) { 439 /* Harmless yet obvious coding error */ 440 printf("%s: howmuch is 0\n", __func__); 441 return; 442 } 443 444 hwidth2 = trunk->hwidth + howmuch; 445 n = 1 << trunk->hwidth; 446 n2 = 1 << hwidth2; 447 /* Do not shrink the table below the default */ 448 if (hwidth2 < VLAN_DEF_HWIDTH) 449 return; 450 451 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 452 if (hash2 == NULL) { 453 printf("%s: out of memory -- hash size not changed\n", 454 __func__); 455 return; /* We can live with the old hash table */ 456 } 457 for (j = 0; j < n2; j++) 458 CK_SLIST_INIT(&hash2[j]); 459 for (i = 0; i < n; i++) 460 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 461 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 462 j = HASH(ifv->ifv_vid, n2 - 1); 463 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 464 } 465 NET_EPOCH_WAIT(); 466 free(trunk->hash, M_VLAN); 467 trunk->hash = hash2; 468 trunk->hwidth = hwidth2; 469 trunk->hmask = n2 - 1; 470 471 if (bootverbose) 472 if_printf(trunk->parent, 473 "VLAN hash table resized from %d to %d buckets\n", n, n2); 474 } 475 476 static __inline struct ifvlan * 477 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 478 { 479 struct ifvlan *ifv; 480 481 NET_EPOCH_ASSERT(); 482 483 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 484 if (ifv->ifv_vid == vid) 485 return (ifv); 486 return (NULL); 487 } 488 489 #if 0 490 /* Debugging code to view the hashtables. */ 491 static void 492 vlan_dumphash(struct ifvlantrunk *trunk) 493 { 494 int i; 495 struct ifvlan *ifv; 496 497 for (i = 0; i < (1 << trunk->hwidth); i++) { 498 printf("%d: ", i); 499 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 500 printf("%s ", ifv->ifv_ifp->if_xname); 501 printf("\n"); 502 } 503 } 504 #endif /* 0 */ 505 #else 506 507 static __inline struct ifvlan * 508 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 509 { 510 511 return trunk->vlans[vid]; 512 } 513 514 static __inline int 515 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 516 { 517 518 if (trunk->vlans[ifv->ifv_vid] != NULL) 519 return EEXIST; 520 trunk->vlans[ifv->ifv_vid] = ifv; 521 trunk->refcnt++; 522 523 return (0); 524 } 525 526 static __inline int 527 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 528 { 529 530 trunk->vlans[ifv->ifv_vid] = NULL; 531 trunk->refcnt--; 532 533 return (0); 534 } 535 536 static __inline void 537 vlan_freehash(struct ifvlantrunk *trunk) 538 { 539 } 540 541 static __inline void 542 vlan_inithash(struct ifvlantrunk *trunk) 543 { 544 } 545 546 #endif /* !VLAN_ARRAY */ 547 548 static void 549 trunk_destroy(struct ifvlantrunk *trunk) 550 { 551 VLAN_XLOCK_ASSERT(); 552 553 vlan_freehash(trunk); 554 trunk->parent->if_vlantrunk = NULL; 555 TRUNK_LOCK_DESTROY(trunk); 556 if_rele(trunk->parent); 557 free(trunk, M_VLAN); 558 } 559 560 /* 561 * Program our multicast filter. What we're actually doing is 562 * programming the multicast filter of the parent. This has the 563 * side effect of causing the parent interface to receive multicast 564 * traffic that it doesn't really want, which ends up being discarded 565 * later by the upper protocol layers. Unfortunately, there's no way 566 * to avoid this: there really is only one physical interface. 567 */ 568 static int 569 vlan_setmulti(struct ifnet *ifp) 570 { 571 struct ifnet *ifp_p; 572 struct ifmultiaddr *ifma; 573 struct ifvlan *sc; 574 struct vlan_mc_entry *mc; 575 int error; 576 577 VLAN_XLOCK_ASSERT(); 578 579 /* Find the parent. */ 580 sc = ifp->if_softc; 581 ifp_p = PARENT(sc); 582 583 CURVNET_SET_QUIET(ifp_p->if_vnet); 584 585 /* First, remove any existing filter entries. */ 586 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 587 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 588 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 589 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 590 } 591 592 /* Now program new ones. */ 593 IF_ADDR_WLOCK(ifp); 594 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 595 if (ifma->ifma_addr->sa_family != AF_LINK) 596 continue; 597 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 598 if (mc == NULL) { 599 IF_ADDR_WUNLOCK(ifp); 600 return (ENOMEM); 601 } 602 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 603 mc->mc_addr.sdl_index = ifp_p->if_index; 604 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 605 } 606 IF_ADDR_WUNLOCK(ifp); 607 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 608 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 609 NULL); 610 if (error) 611 return (error); 612 } 613 614 CURVNET_RESTORE(); 615 return (0); 616 } 617 618 /* 619 * A handler for parent interface link layer address changes. 620 * If the parent interface link layer address is changed we 621 * should also change it on all children vlans. 622 */ 623 static void 624 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 625 { 626 struct epoch_tracker et; 627 struct ifvlan *ifv; 628 struct ifnet *ifv_ifp; 629 struct ifvlantrunk *trunk; 630 struct sockaddr_dl *sdl; 631 632 /* Need the epoch since this is run on taskqueue_swi. */ 633 NET_EPOCH_ENTER(et); 634 trunk = ifp->if_vlantrunk; 635 if (trunk == NULL) { 636 NET_EPOCH_EXIT(et); 637 return; 638 } 639 640 /* 641 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 642 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 643 * ioctl calls on the parent garbling the lladdr of the child vlan. 644 */ 645 TRUNK_WLOCK(trunk); 646 VLAN_FOREACH(ifv, trunk) { 647 /* 648 * Copy new new lladdr into the ifv_ifp, enqueue a task 649 * to actually call if_setlladdr. if_setlladdr needs to 650 * be deferred to a taskqueue because it will call into 651 * the if_vlan ioctl path and try to acquire the global 652 * lock. 653 */ 654 ifv_ifp = ifv->ifv_ifp; 655 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 656 ifp->if_addrlen); 657 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 658 sdl->sdl_alen = ifp->if_addrlen; 659 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 660 } 661 TRUNK_WUNLOCK(trunk); 662 NET_EPOCH_EXIT(et); 663 } 664 665 /* 666 * A handler for network interface departure events. 667 * Track departure of trunks here so that we don't access invalid 668 * pointers or whatever if a trunk is ripped from under us, e.g., 669 * by ejecting its hot-plug card. However, if an ifnet is simply 670 * being renamed, then there's no need to tear down the state. 671 */ 672 static void 673 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 674 { 675 struct ifvlan *ifv; 676 struct ifvlantrunk *trunk; 677 678 /* If the ifnet is just being renamed, don't do anything. */ 679 if (ifp->if_flags & IFF_RENAMING) 680 return; 681 VLAN_XLOCK(); 682 trunk = ifp->if_vlantrunk; 683 if (trunk == NULL) { 684 VLAN_XUNLOCK(); 685 return; 686 } 687 688 /* 689 * OK, it's a trunk. Loop over and detach all vlan's on it. 690 * Check trunk pointer after each vlan_unconfig() as it will 691 * free it and set to NULL after the last vlan was detached. 692 */ 693 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 694 ifp->if_vlantrunk == NULL) 695 vlan_unconfig_locked(ifv->ifv_ifp, 1); 696 697 /* Trunk should have been destroyed in vlan_unconfig(). */ 698 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 699 VLAN_XUNLOCK(); 700 } 701 702 /* 703 * Return the trunk device for a virtual interface. 704 */ 705 static struct ifnet * 706 vlan_trunkdev(struct ifnet *ifp) 707 { 708 struct epoch_tracker et; 709 struct ifvlan *ifv; 710 711 if (ifp->if_type != IFT_L2VLAN) 712 return (NULL); 713 714 NET_EPOCH_ENTER(et); 715 ifv = ifp->if_softc; 716 ifp = NULL; 717 if (ifv->ifv_trunk) 718 ifp = PARENT(ifv); 719 NET_EPOCH_EXIT(et); 720 return (ifp); 721 } 722 723 /* 724 * Return the 12-bit VLAN VID for this interface, for use by external 725 * components such as Infiniband. 726 * 727 * XXXRW: Note that the function name here is historical; it should be named 728 * vlan_vid(). 729 */ 730 static int 731 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 732 { 733 struct ifvlan *ifv; 734 735 if (ifp->if_type != IFT_L2VLAN) 736 return (EINVAL); 737 ifv = ifp->if_softc; 738 *vidp = ifv->ifv_vid; 739 return (0); 740 } 741 742 static int 743 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 744 { 745 struct ifvlan *ifv; 746 747 if (ifp->if_type != IFT_L2VLAN) 748 return (EINVAL); 749 ifv = ifp->if_softc; 750 *pcpp = ifv->ifv_pcp; 751 return (0); 752 } 753 754 /* 755 * Return a driver specific cookie for this interface. Synchronization 756 * with setcookie must be provided by the driver. 757 */ 758 static void * 759 vlan_cookie(struct ifnet *ifp) 760 { 761 struct ifvlan *ifv; 762 763 if (ifp->if_type != IFT_L2VLAN) 764 return (NULL); 765 ifv = ifp->if_softc; 766 return (ifv->ifv_cookie); 767 } 768 769 /* 770 * Store a cookie in our softc that drivers can use to store driver 771 * private per-instance data in. 772 */ 773 static int 774 vlan_setcookie(struct ifnet *ifp, void *cookie) 775 { 776 struct ifvlan *ifv; 777 778 if (ifp->if_type != IFT_L2VLAN) 779 return (EINVAL); 780 ifv = ifp->if_softc; 781 ifv->ifv_cookie = cookie; 782 return (0); 783 } 784 785 /* 786 * Return the vlan device present at the specific VID. 787 */ 788 static struct ifnet * 789 vlan_devat(struct ifnet *ifp, uint16_t vid) 790 { 791 struct epoch_tracker et; 792 struct ifvlantrunk *trunk; 793 struct ifvlan *ifv; 794 795 NET_EPOCH_ENTER(et); 796 trunk = ifp->if_vlantrunk; 797 if (trunk == NULL) { 798 NET_EPOCH_EXIT(et); 799 return (NULL); 800 } 801 ifp = NULL; 802 ifv = vlan_gethash(trunk, vid); 803 if (ifv) 804 ifp = ifv->ifv_ifp; 805 NET_EPOCH_EXIT(et); 806 return (ifp); 807 } 808 809 /* 810 * Recalculate the cached VLAN tag exposed via the MIB. 811 */ 812 static void 813 vlan_tag_recalculate(struct ifvlan *ifv) 814 { 815 816 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 817 } 818 819 /* 820 * VLAN support can be loaded as a module. The only place in the 821 * system that's intimately aware of this is ether_input. We hook 822 * into this code through vlan_input_p which is defined there and 823 * set here. No one else in the system should be aware of this so 824 * we use an explicit reference here. 825 */ 826 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 827 828 /* For if_link_state_change() eyes only... */ 829 extern void (*vlan_link_state_p)(struct ifnet *); 830 831 static int 832 vlan_modevent(module_t mod, int type, void *data) 833 { 834 835 switch (type) { 836 case MOD_LOAD: 837 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 838 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 839 if (ifdetach_tag == NULL) 840 return (ENOMEM); 841 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 842 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 843 if (iflladdr_tag == NULL) 844 return (ENOMEM); 845 VLAN_LOCKING_INIT(); 846 vlan_input_p = vlan_input; 847 vlan_link_state_p = vlan_link_state; 848 vlan_trunk_cap_p = vlan_trunk_capabilities; 849 vlan_trunkdev_p = vlan_trunkdev; 850 vlan_cookie_p = vlan_cookie; 851 vlan_setcookie_p = vlan_setcookie; 852 vlan_tag_p = vlan_tag; 853 vlan_pcp_p = vlan_pcp; 854 vlan_devat_p = vlan_devat; 855 #ifndef VIMAGE 856 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 857 vlan_clone_create, vlan_clone_destroy); 858 #endif 859 if (bootverbose) 860 printf("vlan: initialized, using " 861 #ifdef VLAN_ARRAY 862 "full-size arrays" 863 #else 864 "hash tables with chaining" 865 #endif 866 867 "\n"); 868 break; 869 case MOD_UNLOAD: 870 #ifndef VIMAGE 871 if_clone_detach(vlan_cloner); 872 #endif 873 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 874 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 875 vlan_input_p = NULL; 876 vlan_link_state_p = NULL; 877 vlan_trunk_cap_p = NULL; 878 vlan_trunkdev_p = NULL; 879 vlan_tag_p = NULL; 880 vlan_cookie_p = NULL; 881 vlan_setcookie_p = NULL; 882 vlan_devat_p = NULL; 883 VLAN_LOCKING_DESTROY(); 884 if (bootverbose) 885 printf("vlan: unloaded\n"); 886 break; 887 default: 888 return (EOPNOTSUPP); 889 } 890 return (0); 891 } 892 893 static moduledata_t vlan_mod = { 894 "if_vlan", 895 vlan_modevent, 896 0 897 }; 898 899 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 900 MODULE_VERSION(if_vlan, 3); 901 902 #ifdef VIMAGE 903 static void 904 vnet_vlan_init(const void *unused __unused) 905 { 906 907 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 908 vlan_clone_create, vlan_clone_destroy); 909 V_vlan_cloner = vlan_cloner; 910 } 911 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 912 vnet_vlan_init, NULL); 913 914 static void 915 vnet_vlan_uninit(const void *unused __unused) 916 { 917 918 if_clone_detach(V_vlan_cloner); 919 } 920 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 921 vnet_vlan_uninit, NULL); 922 #endif 923 924 /* 925 * Check for <etherif>.<vlan> style interface names. 926 */ 927 static struct ifnet * 928 vlan_clone_match_ethervid(const char *name, int *vidp) 929 { 930 char ifname[IFNAMSIZ]; 931 char *cp; 932 struct ifnet *ifp; 933 int vid; 934 935 strlcpy(ifname, name, IFNAMSIZ); 936 if ((cp = strchr(ifname, '.')) == NULL) 937 return (NULL); 938 *cp = '\0'; 939 if ((ifp = ifunit_ref(ifname)) == NULL) 940 return (NULL); 941 /* Parse VID. */ 942 if (*++cp == '\0') { 943 if_rele(ifp); 944 return (NULL); 945 } 946 vid = 0; 947 for(; *cp >= '0' && *cp <= '9'; cp++) 948 vid = (vid * 10) + (*cp - '0'); 949 if (*cp != '\0') { 950 if_rele(ifp); 951 return (NULL); 952 } 953 if (vidp != NULL) 954 *vidp = vid; 955 956 return (ifp); 957 } 958 959 static int 960 vlan_clone_match(struct if_clone *ifc, const char *name) 961 { 962 const char *cp; 963 964 if (vlan_clone_match_ethervid(name, NULL) != NULL) 965 return (1); 966 967 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 968 return (0); 969 for (cp = name + 4; *cp != '\0'; cp++) { 970 if (*cp < '0' || *cp > '9') 971 return (0); 972 } 973 974 return (1); 975 } 976 977 static int 978 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 979 { 980 char *dp; 981 int wildcard; 982 int unit; 983 int error; 984 int vid; 985 struct ifvlan *ifv; 986 struct ifnet *ifp; 987 struct ifnet *p; 988 struct ifaddr *ifa; 989 struct sockaddr_dl *sdl; 990 struct vlanreq vlr; 991 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 992 993 /* 994 * There are 3 (ugh) ways to specify the cloned device: 995 * o pass a parameter block with the clone request. 996 * o specify parameters in the text of the clone device name 997 * o specify no parameters and get an unattached device that 998 * must be configured separately. 999 * The first technique is preferred; the latter two are 1000 * supported for backwards compatibility. 1001 * 1002 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1003 * called for. 1004 */ 1005 if (params) { 1006 error = copyin(params, &vlr, sizeof(vlr)); 1007 if (error) 1008 return error; 1009 p = ifunit_ref(vlr.vlr_parent); 1010 if (p == NULL) 1011 return (ENXIO); 1012 error = ifc_name2unit(name, &unit); 1013 if (error != 0) { 1014 if_rele(p); 1015 return (error); 1016 } 1017 vid = vlr.vlr_tag; 1018 wildcard = (unit < 0); 1019 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1020 unit = -1; 1021 wildcard = 0; 1022 } else { 1023 p = NULL; 1024 error = ifc_name2unit(name, &unit); 1025 if (error != 0) 1026 return (error); 1027 1028 wildcard = (unit < 0); 1029 } 1030 1031 error = ifc_alloc_unit(ifc, &unit); 1032 if (error != 0) { 1033 if (p != NULL) 1034 if_rele(p); 1035 return (error); 1036 } 1037 1038 /* In the wildcard case, we need to update the name. */ 1039 if (wildcard) { 1040 for (dp = name; *dp != '\0'; dp++); 1041 if (snprintf(dp, len - (dp-name), "%d", unit) > 1042 len - (dp-name) - 1) { 1043 panic("%s: interface name too long", __func__); 1044 } 1045 } 1046 1047 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1048 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1049 if (ifp == NULL) { 1050 ifc_free_unit(ifc, unit); 1051 free(ifv, M_VLAN); 1052 if (p != NULL) 1053 if_rele(p); 1054 return (ENOSPC); 1055 } 1056 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1057 ifp->if_softc = ifv; 1058 /* 1059 * Set the name manually rather than using if_initname because 1060 * we don't conform to the default naming convention for interfaces. 1061 */ 1062 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1063 ifp->if_dname = vlanname; 1064 ifp->if_dunit = unit; 1065 1066 ifp->if_init = vlan_init; 1067 ifp->if_transmit = vlan_transmit; 1068 ifp->if_qflush = vlan_qflush; 1069 ifp->if_ioctl = vlan_ioctl; 1070 #if defined(KERN_TLS) || defined(RATELIMIT) 1071 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1072 ifp->if_snd_tag_modify = vlan_snd_tag_modify; 1073 ifp->if_snd_tag_query = vlan_snd_tag_query; 1074 ifp->if_snd_tag_free = vlan_snd_tag_free; 1075 #endif 1076 ifp->if_flags = VLAN_IFFLAGS; 1077 ether_ifattach(ifp, eaddr); 1078 /* Now undo some of the damage... */ 1079 ifp->if_baudrate = 0; 1080 ifp->if_type = IFT_L2VLAN; 1081 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1082 ifa = ifp->if_addr; 1083 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1084 sdl->sdl_type = IFT_L2VLAN; 1085 1086 if (p != NULL) { 1087 error = vlan_config(ifv, p, vid); 1088 if_rele(p); 1089 if (error != 0) { 1090 /* 1091 * Since we've partially failed, we need to back 1092 * out all the way, otherwise userland could get 1093 * confused. Thus, we destroy the interface. 1094 */ 1095 ether_ifdetach(ifp); 1096 vlan_unconfig(ifp); 1097 if_free(ifp); 1098 ifc_free_unit(ifc, unit); 1099 free(ifv, M_VLAN); 1100 1101 return (error); 1102 } 1103 } 1104 1105 return (0); 1106 } 1107 1108 static int 1109 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1110 { 1111 struct ifvlan *ifv = ifp->if_softc; 1112 int unit = ifp->if_dunit; 1113 1114 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1115 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1116 /* 1117 * We should have the only reference to the ifv now, so we can now 1118 * drain any remaining lladdr task before freeing the ifnet and the 1119 * ifvlan. 1120 */ 1121 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1122 NET_EPOCH_WAIT(); 1123 if_free(ifp); 1124 free(ifv, M_VLAN); 1125 ifc_free_unit(ifc, unit); 1126 1127 return (0); 1128 } 1129 1130 /* 1131 * The ifp->if_init entry point for vlan(4) is a no-op. 1132 */ 1133 static void 1134 vlan_init(void *foo __unused) 1135 { 1136 } 1137 1138 /* 1139 * The if_transmit method for vlan(4) interface. 1140 */ 1141 static int 1142 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1143 { 1144 struct epoch_tracker et; 1145 struct ifvlan *ifv; 1146 struct ifnet *p; 1147 int error, len, mcast; 1148 1149 NET_EPOCH_ENTER(et); 1150 ifv = ifp->if_softc; 1151 if (TRUNK(ifv) == NULL) { 1152 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1153 NET_EPOCH_EXIT(et); 1154 m_freem(m); 1155 return (ENETDOWN); 1156 } 1157 p = PARENT(ifv); 1158 len = m->m_pkthdr.len; 1159 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1160 1161 BPF_MTAP(ifp, m); 1162 1163 #if defined(KERN_TLS) || defined(RATELIMIT) 1164 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1165 struct vlan_snd_tag *vst; 1166 struct m_snd_tag *mst; 1167 1168 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 1169 mst = m->m_pkthdr.snd_tag; 1170 vst = mst_to_vst(mst); 1171 if (vst->tag->ifp != p) { 1172 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1173 NET_EPOCH_EXIT(et); 1174 m_freem(m); 1175 return (EAGAIN); 1176 } 1177 1178 m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag); 1179 m_snd_tag_rele(mst); 1180 } 1181 #endif 1182 1183 /* 1184 * Do not run parent's if_transmit() if the parent is not up, 1185 * or parent's driver will cause a system crash. 1186 */ 1187 if (!UP_AND_RUNNING(p)) { 1188 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1189 NET_EPOCH_EXIT(et); 1190 m_freem(m); 1191 return (ENETDOWN); 1192 } 1193 1194 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1195 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1196 NET_EPOCH_EXIT(et); 1197 return (0); 1198 } 1199 1200 /* 1201 * Send it, precisely as ether_output() would have. 1202 */ 1203 error = (p->if_transmit)(p, m); 1204 if (error == 0) { 1205 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1206 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1207 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1208 } else 1209 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1210 NET_EPOCH_EXIT(et); 1211 return (error); 1212 } 1213 1214 static int 1215 vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1216 struct route *ro) 1217 { 1218 struct epoch_tracker et; 1219 struct ifvlan *ifv; 1220 struct ifnet *p; 1221 1222 NET_EPOCH_ENTER(et); 1223 ifv = ifp->if_softc; 1224 if (TRUNK(ifv) == NULL) { 1225 NET_EPOCH_EXIT(et); 1226 m_freem(m); 1227 return (ENETDOWN); 1228 } 1229 p = PARENT(ifv); 1230 NET_EPOCH_EXIT(et); 1231 return p->if_output(ifp, m, dst, ro); 1232 } 1233 1234 1235 /* 1236 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1237 */ 1238 static void 1239 vlan_qflush(struct ifnet *ifp __unused) 1240 { 1241 } 1242 1243 static void 1244 vlan_input(struct ifnet *ifp, struct mbuf *m) 1245 { 1246 struct epoch_tracker et; 1247 struct ifvlantrunk *trunk; 1248 struct ifvlan *ifv; 1249 struct m_tag *mtag; 1250 uint16_t vid, tag; 1251 1252 NET_EPOCH_ENTER(et); 1253 trunk = ifp->if_vlantrunk; 1254 if (trunk == NULL) { 1255 NET_EPOCH_EXIT(et); 1256 m_freem(m); 1257 return; 1258 } 1259 1260 if (m->m_flags & M_VLANTAG) { 1261 /* 1262 * Packet is tagged, but m contains a normal 1263 * Ethernet frame; the tag is stored out-of-band. 1264 */ 1265 tag = m->m_pkthdr.ether_vtag; 1266 m->m_flags &= ~M_VLANTAG; 1267 } else { 1268 struct ether_vlan_header *evl; 1269 1270 /* 1271 * Packet is tagged in-band as specified by 802.1q. 1272 */ 1273 switch (ifp->if_type) { 1274 case IFT_ETHER: 1275 if (m->m_len < sizeof(*evl) && 1276 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1277 if_printf(ifp, "cannot pullup VLAN header\n"); 1278 NET_EPOCH_EXIT(et); 1279 return; 1280 } 1281 evl = mtod(m, struct ether_vlan_header *); 1282 tag = ntohs(evl->evl_tag); 1283 1284 /* 1285 * Remove the 802.1q header by copying the Ethernet 1286 * addresses over it and adjusting the beginning of 1287 * the data in the mbuf. The encapsulated Ethernet 1288 * type field is already in place. 1289 */ 1290 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1291 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1292 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1293 break; 1294 1295 default: 1296 #ifdef INVARIANTS 1297 panic("%s: %s has unsupported if_type %u", 1298 __func__, ifp->if_xname, ifp->if_type); 1299 #endif 1300 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1301 NET_EPOCH_EXIT(et); 1302 m_freem(m); 1303 return; 1304 } 1305 } 1306 1307 vid = EVL_VLANOFTAG(tag); 1308 1309 ifv = vlan_gethash(trunk, vid); 1310 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1311 NET_EPOCH_EXIT(et); 1312 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1313 m_freem(m); 1314 return; 1315 } 1316 1317 if (vlan_mtag_pcp) { 1318 /* 1319 * While uncommon, it is possible that we will find a 802.1q 1320 * packet encapsulated inside another packet that also had an 1321 * 802.1q header. For example, ethernet tunneled over IPSEC 1322 * arriving over ethernet. In that case, we replace the 1323 * existing 802.1q PCP m_tag value. 1324 */ 1325 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1326 if (mtag == NULL) { 1327 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1328 sizeof(uint8_t), M_NOWAIT); 1329 if (mtag == NULL) { 1330 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1331 NET_EPOCH_EXIT(et); 1332 m_freem(m); 1333 return; 1334 } 1335 m_tag_prepend(m, mtag); 1336 } 1337 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1338 } 1339 1340 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1341 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1342 NET_EPOCH_EXIT(et); 1343 1344 /* Pass it back through the parent's input routine. */ 1345 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1346 } 1347 1348 static void 1349 vlan_lladdr_fn(void *arg, int pending __unused) 1350 { 1351 struct ifvlan *ifv; 1352 struct ifnet *ifp; 1353 1354 ifv = (struct ifvlan *)arg; 1355 ifp = ifv->ifv_ifp; 1356 1357 CURVNET_SET(ifp->if_vnet); 1358 1359 /* The ifv_ifp already has the lladdr copied in. */ 1360 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1361 1362 CURVNET_RESTORE(); 1363 } 1364 1365 static int 1366 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1367 { 1368 struct epoch_tracker et; 1369 struct ifvlantrunk *trunk; 1370 struct ifnet *ifp; 1371 int error = 0; 1372 1373 /* 1374 * We can handle non-ethernet hardware types as long as 1375 * they handle the tagging and headers themselves. 1376 */ 1377 if (p->if_type != IFT_ETHER && 1378 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1379 return (EPROTONOSUPPORT); 1380 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1381 return (EPROTONOSUPPORT); 1382 /* 1383 * Don't let the caller set up a VLAN VID with 1384 * anything except VLID bits. 1385 * VID numbers 0x0 and 0xFFF are reserved. 1386 */ 1387 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1388 return (EINVAL); 1389 if (ifv->ifv_trunk) 1390 return (EBUSY); 1391 1392 VLAN_XLOCK(); 1393 if (p->if_vlantrunk == NULL) { 1394 trunk = malloc(sizeof(struct ifvlantrunk), 1395 M_VLAN, M_WAITOK | M_ZERO); 1396 vlan_inithash(trunk); 1397 TRUNK_LOCK_INIT(trunk); 1398 TRUNK_WLOCK(trunk); 1399 p->if_vlantrunk = trunk; 1400 trunk->parent = p; 1401 if_ref(trunk->parent); 1402 TRUNK_WUNLOCK(trunk); 1403 } else { 1404 trunk = p->if_vlantrunk; 1405 } 1406 1407 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1408 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1409 vlan_tag_recalculate(ifv); 1410 error = vlan_inshash(trunk, ifv); 1411 if (error) 1412 goto done; 1413 ifv->ifv_proto = ETHERTYPE_VLAN; 1414 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1415 ifv->ifv_mintu = ETHERMIN; 1416 ifv->ifv_pflags = 0; 1417 ifv->ifv_capenable = -1; 1418 1419 /* 1420 * If the parent supports the VLAN_MTU capability, 1421 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1422 * use it. 1423 */ 1424 if (p->if_capenable & IFCAP_VLAN_MTU) { 1425 /* 1426 * No need to fudge the MTU since the parent can 1427 * handle extended frames. 1428 */ 1429 ifv->ifv_mtufudge = 0; 1430 } else { 1431 /* 1432 * Fudge the MTU by the encapsulation size. This 1433 * makes us incompatible with strictly compliant 1434 * 802.1Q implementations, but allows us to use 1435 * the feature with other NetBSD implementations, 1436 * which might still be useful. 1437 */ 1438 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1439 } 1440 1441 ifv->ifv_trunk = trunk; 1442 ifp = ifv->ifv_ifp; 1443 /* 1444 * Initialize fields from our parent. This duplicates some 1445 * work with ether_ifattach() but allows for non-ethernet 1446 * interfaces to also work. 1447 */ 1448 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1449 ifp->if_baudrate = p->if_baudrate; 1450 ifp->if_input = p->if_input; 1451 ifp->if_resolvemulti = p->if_resolvemulti; 1452 ifp->if_addrlen = p->if_addrlen; 1453 ifp->if_broadcastaddr = p->if_broadcastaddr; 1454 ifp->if_pcp = ifv->ifv_pcp; 1455 1456 /* 1457 * We wrap the parent's if_output using vlan_output to ensure that it 1458 * can't become stale. 1459 */ 1460 ifp->if_output = vlan_output; 1461 1462 /* 1463 * Copy only a selected subset of flags from the parent. 1464 * Other flags are none of our business. 1465 */ 1466 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1467 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1468 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1469 #undef VLAN_COPY_FLAGS 1470 1471 ifp->if_link_state = p->if_link_state; 1472 1473 NET_EPOCH_ENTER(et); 1474 vlan_capabilities(ifv); 1475 NET_EPOCH_EXIT(et); 1476 1477 /* 1478 * Set up our interface address to reflect the underlying 1479 * physical interface's. 1480 */ 1481 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1482 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1483 p->if_addrlen; 1484 1485 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1486 1487 /* We are ready for operation now. */ 1488 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1489 1490 /* Update flags on the parent, if necessary. */ 1491 vlan_setflags(ifp, 1); 1492 1493 /* 1494 * Configure multicast addresses that may already be 1495 * joined on the vlan device. 1496 */ 1497 (void)vlan_setmulti(ifp); 1498 1499 done: 1500 if (error == 0) 1501 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1502 VLAN_XUNLOCK(); 1503 1504 return (error); 1505 } 1506 1507 static void 1508 vlan_unconfig(struct ifnet *ifp) 1509 { 1510 1511 VLAN_XLOCK(); 1512 vlan_unconfig_locked(ifp, 0); 1513 VLAN_XUNLOCK(); 1514 } 1515 1516 static void 1517 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1518 { 1519 struct ifvlantrunk *trunk; 1520 struct vlan_mc_entry *mc; 1521 struct ifvlan *ifv; 1522 struct ifnet *parent; 1523 int error; 1524 1525 VLAN_XLOCK_ASSERT(); 1526 1527 ifv = ifp->if_softc; 1528 trunk = ifv->ifv_trunk; 1529 parent = NULL; 1530 1531 if (trunk != NULL) { 1532 parent = trunk->parent; 1533 1534 /* 1535 * Since the interface is being unconfigured, we need to 1536 * empty the list of multicast groups that we may have joined 1537 * while we were alive from the parent's list. 1538 */ 1539 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1540 /* 1541 * If the parent interface is being detached, 1542 * all its multicast addresses have already 1543 * been removed. Warn about errors if 1544 * if_delmulti() does fail, but don't abort as 1545 * all callers expect vlan destruction to 1546 * succeed. 1547 */ 1548 if (!departing) { 1549 error = if_delmulti(parent, 1550 (struct sockaddr *)&mc->mc_addr); 1551 if (error) 1552 if_printf(ifp, 1553 "Failed to delete multicast address from parent: %d\n", 1554 error); 1555 } 1556 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1557 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 1558 } 1559 1560 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1561 1562 vlan_remhash(trunk, ifv); 1563 ifv->ifv_trunk = NULL; 1564 1565 /* 1566 * Check if we were the last. 1567 */ 1568 if (trunk->refcnt == 0) { 1569 parent->if_vlantrunk = NULL; 1570 NET_EPOCH_WAIT(); 1571 trunk_destroy(trunk); 1572 } 1573 } 1574 1575 /* Disconnect from parent. */ 1576 if (ifv->ifv_pflags) 1577 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1578 ifp->if_mtu = ETHERMTU; 1579 ifp->if_link_state = LINK_STATE_UNKNOWN; 1580 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1581 1582 /* 1583 * Only dispatch an event if vlan was 1584 * attached, otherwise there is nothing 1585 * to cleanup anyway. 1586 */ 1587 if (parent != NULL) 1588 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1589 } 1590 1591 /* Handle a reference counted flag that should be set on the parent as well */ 1592 static int 1593 vlan_setflag(struct ifnet *ifp, int flag, int status, 1594 int (*func)(struct ifnet *, int)) 1595 { 1596 struct ifvlan *ifv; 1597 int error; 1598 1599 VLAN_SXLOCK_ASSERT(); 1600 1601 ifv = ifp->if_softc; 1602 status = status ? (ifp->if_flags & flag) : 0; 1603 /* Now "status" contains the flag value or 0 */ 1604 1605 /* 1606 * See if recorded parent's status is different from what 1607 * we want it to be. If it is, flip it. We record parent's 1608 * status in ifv_pflags so that we won't clear parent's flag 1609 * we haven't set. In fact, we don't clear or set parent's 1610 * flags directly, but get or release references to them. 1611 * That's why we can be sure that recorded flags still are 1612 * in accord with actual parent's flags. 1613 */ 1614 if (status != (ifv->ifv_pflags & flag)) { 1615 error = (*func)(PARENT(ifv), status); 1616 if (error) 1617 return (error); 1618 ifv->ifv_pflags &= ~flag; 1619 ifv->ifv_pflags |= status; 1620 } 1621 return (0); 1622 } 1623 1624 /* 1625 * Handle IFF_* flags that require certain changes on the parent: 1626 * if "status" is true, update parent's flags respective to our if_flags; 1627 * if "status" is false, forcedly clear the flags set on parent. 1628 */ 1629 static int 1630 vlan_setflags(struct ifnet *ifp, int status) 1631 { 1632 int error, i; 1633 1634 for (i = 0; vlan_pflags[i].flag; i++) { 1635 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1636 status, vlan_pflags[i].func); 1637 if (error) 1638 return (error); 1639 } 1640 return (0); 1641 } 1642 1643 /* Inform all vlans that their parent has changed link state */ 1644 static void 1645 vlan_link_state(struct ifnet *ifp) 1646 { 1647 struct epoch_tracker et; 1648 struct ifvlantrunk *trunk; 1649 struct ifvlan *ifv; 1650 1651 /* Called from a taskqueue_swi task, so we cannot sleep. */ 1652 NET_EPOCH_ENTER(et); 1653 trunk = ifp->if_vlantrunk; 1654 if (trunk == NULL) { 1655 NET_EPOCH_EXIT(et); 1656 return; 1657 } 1658 1659 TRUNK_WLOCK(trunk); 1660 VLAN_FOREACH(ifv, trunk) { 1661 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1662 if_link_state_change(ifv->ifv_ifp, 1663 trunk->parent->if_link_state); 1664 } 1665 TRUNK_WUNLOCK(trunk); 1666 NET_EPOCH_EXIT(et); 1667 } 1668 1669 static void 1670 vlan_capabilities(struct ifvlan *ifv) 1671 { 1672 struct ifnet *p; 1673 struct ifnet *ifp; 1674 struct ifnet_hw_tsomax hw_tsomax; 1675 int cap = 0, ena = 0, mena; 1676 u_long hwa = 0; 1677 1678 VLAN_SXLOCK_ASSERT(); 1679 NET_EPOCH_ASSERT(); 1680 p = PARENT(ifv); 1681 ifp = ifv->ifv_ifp; 1682 1683 /* Mask parent interface enabled capabilities disabled by user. */ 1684 mena = p->if_capenable & ifv->ifv_capenable; 1685 1686 /* 1687 * If the parent interface can do checksum offloading 1688 * on VLANs, then propagate its hardware-assisted 1689 * checksumming flags. Also assert that checksum 1690 * offloading requires hardware VLAN tagging. 1691 */ 1692 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1693 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1694 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1695 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1696 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1697 if (ena & IFCAP_TXCSUM) 1698 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1699 CSUM_UDP | CSUM_SCTP); 1700 if (ena & IFCAP_TXCSUM_IPV6) 1701 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1702 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1703 } 1704 1705 /* 1706 * If the parent interface can do TSO on VLANs then 1707 * propagate the hardware-assisted flag. TSO on VLANs 1708 * does not necessarily require hardware VLAN tagging. 1709 */ 1710 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1711 if_hw_tsomax_common(p, &hw_tsomax); 1712 if_hw_tsomax_update(ifp, &hw_tsomax); 1713 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1714 cap |= p->if_capabilities & IFCAP_TSO; 1715 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1716 ena |= mena & IFCAP_TSO; 1717 if (ena & IFCAP_TSO) 1718 hwa |= p->if_hwassist & CSUM_TSO; 1719 } 1720 1721 /* 1722 * If the parent interface can do LRO and checksum offloading on 1723 * VLANs, then guess it may do LRO on VLANs. False positive here 1724 * cost nothing, while false negative may lead to some confusions. 1725 */ 1726 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1727 cap |= p->if_capabilities & IFCAP_LRO; 1728 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1729 ena |= p->if_capenable & IFCAP_LRO; 1730 1731 /* 1732 * If the parent interface can offload TCP connections over VLANs then 1733 * propagate its TOE capability to the VLAN interface. 1734 * 1735 * All TOE drivers in the tree today can deal with VLANs. If this 1736 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1737 * with its own bit. 1738 */ 1739 #define IFCAP_VLAN_TOE IFCAP_TOE 1740 if (p->if_capabilities & IFCAP_VLAN_TOE) 1741 cap |= p->if_capabilities & IFCAP_TOE; 1742 if (p->if_capenable & IFCAP_VLAN_TOE) { 1743 TOEDEV(ifp) = TOEDEV(p); 1744 ena |= mena & IFCAP_TOE; 1745 } 1746 1747 /* 1748 * If the parent interface supports dynamic link state, so does the 1749 * VLAN interface. 1750 */ 1751 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1752 ena |= (mena & IFCAP_LINKSTATE); 1753 1754 #ifdef RATELIMIT 1755 /* 1756 * If the parent interface supports ratelimiting, so does the 1757 * VLAN interface. 1758 */ 1759 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1760 ena |= (mena & IFCAP_TXRTLMT); 1761 #endif 1762 1763 /* 1764 * If the parent interface supports unmapped mbufs, so does 1765 * the VLAN interface. Note that this should be fine even for 1766 * interfaces that don't support hardware tagging as headers 1767 * are prepended in normal mbufs to unmapped mbufs holding 1768 * payload data. 1769 */ 1770 cap |= (p->if_capabilities & IFCAP_NOMAP); 1771 ena |= (mena & IFCAP_NOMAP); 1772 1773 /* 1774 * If the parent interface can offload encryption and segmentation 1775 * of TLS records over TCP, propagate it's capability to the VLAN 1776 * interface. 1777 * 1778 * All TLS drivers in the tree today can deal with VLANs. If 1779 * this ever changes, then a new IFCAP_VLAN_TXTLS can be 1780 * defined. 1781 */ 1782 if (p->if_capabilities & IFCAP_TXTLS) 1783 cap |= p->if_capabilities & IFCAP_TXTLS; 1784 if (p->if_capenable & IFCAP_TXTLS) 1785 ena |= mena & IFCAP_TXTLS; 1786 1787 ifp->if_capabilities = cap; 1788 ifp->if_capenable = ena; 1789 ifp->if_hwassist = hwa; 1790 } 1791 1792 static void 1793 vlan_trunk_capabilities(struct ifnet *ifp) 1794 { 1795 struct epoch_tracker et; 1796 struct ifvlantrunk *trunk; 1797 struct ifvlan *ifv; 1798 1799 VLAN_SLOCK(); 1800 trunk = ifp->if_vlantrunk; 1801 if (trunk == NULL) { 1802 VLAN_SUNLOCK(); 1803 return; 1804 } 1805 NET_EPOCH_ENTER(et); 1806 VLAN_FOREACH(ifv, trunk) { 1807 vlan_capabilities(ifv); 1808 } 1809 NET_EPOCH_EXIT(et); 1810 VLAN_SUNLOCK(); 1811 } 1812 1813 static int 1814 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1815 { 1816 struct ifnet *p; 1817 struct ifreq *ifr; 1818 struct ifaddr *ifa; 1819 struct ifvlan *ifv; 1820 struct ifvlantrunk *trunk; 1821 struct vlanreq vlr; 1822 int error = 0; 1823 1824 ifr = (struct ifreq *)data; 1825 ifa = (struct ifaddr *) data; 1826 ifv = ifp->if_softc; 1827 1828 switch (cmd) { 1829 case SIOCSIFADDR: 1830 ifp->if_flags |= IFF_UP; 1831 #ifdef INET 1832 if (ifa->ifa_addr->sa_family == AF_INET) 1833 arp_ifinit(ifp, ifa); 1834 #endif 1835 break; 1836 case SIOCGIFADDR: 1837 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1838 ifp->if_addrlen); 1839 break; 1840 case SIOCGIFMEDIA: 1841 VLAN_SLOCK(); 1842 if (TRUNK(ifv) != NULL) { 1843 p = PARENT(ifv); 1844 if_ref(p); 1845 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1846 if_rele(p); 1847 /* Limit the result to the parent's current config. */ 1848 if (error == 0) { 1849 struct ifmediareq *ifmr; 1850 1851 ifmr = (struct ifmediareq *)data; 1852 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1853 ifmr->ifm_count = 1; 1854 error = copyout(&ifmr->ifm_current, 1855 ifmr->ifm_ulist, 1856 sizeof(int)); 1857 } 1858 } 1859 } else { 1860 error = EINVAL; 1861 } 1862 VLAN_SUNLOCK(); 1863 break; 1864 1865 case SIOCSIFMEDIA: 1866 error = EINVAL; 1867 break; 1868 1869 case SIOCSIFMTU: 1870 /* 1871 * Set the interface MTU. 1872 */ 1873 VLAN_SLOCK(); 1874 trunk = TRUNK(ifv); 1875 if (trunk != NULL) { 1876 TRUNK_WLOCK(trunk); 1877 if (ifr->ifr_mtu > 1878 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1879 ifr->ifr_mtu < 1880 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1881 error = EINVAL; 1882 else 1883 ifp->if_mtu = ifr->ifr_mtu; 1884 TRUNK_WUNLOCK(trunk); 1885 } else 1886 error = EINVAL; 1887 VLAN_SUNLOCK(); 1888 break; 1889 1890 case SIOCSETVLAN: 1891 #ifdef VIMAGE 1892 /* 1893 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1894 * interface to be delegated to a jail without allowing the 1895 * jail to change what underlying interface/VID it is 1896 * associated with. We are not entirely convinced that this 1897 * is the right way to accomplish that policy goal. 1898 */ 1899 if (ifp->if_vnet != ifp->if_home_vnet) { 1900 error = EPERM; 1901 break; 1902 } 1903 #endif 1904 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1905 if (error) 1906 break; 1907 if (vlr.vlr_parent[0] == '\0') { 1908 vlan_unconfig(ifp); 1909 break; 1910 } 1911 p = ifunit_ref(vlr.vlr_parent); 1912 if (p == NULL) { 1913 error = ENOENT; 1914 break; 1915 } 1916 error = vlan_config(ifv, p, vlr.vlr_tag); 1917 if_rele(p); 1918 break; 1919 1920 case SIOCGETVLAN: 1921 #ifdef VIMAGE 1922 if (ifp->if_vnet != ifp->if_home_vnet) { 1923 error = EPERM; 1924 break; 1925 } 1926 #endif 1927 bzero(&vlr, sizeof(vlr)); 1928 VLAN_SLOCK(); 1929 if (TRUNK(ifv) != NULL) { 1930 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1931 sizeof(vlr.vlr_parent)); 1932 vlr.vlr_tag = ifv->ifv_vid; 1933 } 1934 VLAN_SUNLOCK(); 1935 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1936 break; 1937 1938 case SIOCSIFFLAGS: 1939 /* 1940 * We should propagate selected flags to the parent, 1941 * e.g., promiscuous mode. 1942 */ 1943 VLAN_XLOCK(); 1944 if (TRUNK(ifv) != NULL) 1945 error = vlan_setflags(ifp, 1); 1946 VLAN_XUNLOCK(); 1947 break; 1948 1949 case SIOCADDMULTI: 1950 case SIOCDELMULTI: 1951 /* 1952 * If we don't have a parent, just remember the membership for 1953 * when we do. 1954 * 1955 * XXX We need the rmlock here to avoid sleeping while 1956 * holding in6_multi_mtx. 1957 */ 1958 VLAN_XLOCK(); 1959 trunk = TRUNK(ifv); 1960 if (trunk != NULL) 1961 error = vlan_setmulti(ifp); 1962 VLAN_XUNLOCK(); 1963 1964 break; 1965 case SIOCGVLANPCP: 1966 #ifdef VIMAGE 1967 if (ifp->if_vnet != ifp->if_home_vnet) { 1968 error = EPERM; 1969 break; 1970 } 1971 #endif 1972 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1973 break; 1974 1975 case SIOCSVLANPCP: 1976 #ifdef VIMAGE 1977 if (ifp->if_vnet != ifp->if_home_vnet) { 1978 error = EPERM; 1979 break; 1980 } 1981 #endif 1982 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1983 if (error) 1984 break; 1985 if (ifr->ifr_vlan_pcp > 7) { 1986 error = EINVAL; 1987 break; 1988 } 1989 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 1990 ifp->if_pcp = ifv->ifv_pcp; 1991 vlan_tag_recalculate(ifv); 1992 /* broadcast event about PCP change */ 1993 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 1994 break; 1995 1996 case SIOCSIFCAP: 1997 VLAN_SLOCK(); 1998 ifv->ifv_capenable = ifr->ifr_reqcap; 1999 trunk = TRUNK(ifv); 2000 if (trunk != NULL) { 2001 struct epoch_tracker et; 2002 2003 NET_EPOCH_ENTER(et); 2004 vlan_capabilities(ifv); 2005 NET_EPOCH_EXIT(et); 2006 } 2007 VLAN_SUNLOCK(); 2008 break; 2009 2010 default: 2011 error = EINVAL; 2012 break; 2013 } 2014 2015 return (error); 2016 } 2017 2018 #if defined(KERN_TLS) || defined(RATELIMIT) 2019 static int 2020 vlan_snd_tag_alloc(struct ifnet *ifp, 2021 union if_snd_tag_alloc_params *params, 2022 struct m_snd_tag **ppmt) 2023 { 2024 struct epoch_tracker et; 2025 struct vlan_snd_tag *vst; 2026 struct ifvlan *ifv; 2027 struct ifnet *parent; 2028 int error; 2029 2030 NET_EPOCH_ENTER(et); 2031 ifv = ifp->if_softc; 2032 if (ifv->ifv_trunk != NULL) 2033 parent = PARENT(ifv); 2034 else 2035 parent = NULL; 2036 if (parent == NULL || parent->if_snd_tag_alloc == NULL) { 2037 NET_EPOCH_EXIT(et); 2038 return (EOPNOTSUPP); 2039 } 2040 if_ref(parent); 2041 NET_EPOCH_EXIT(et); 2042 2043 vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT); 2044 if (vst == NULL) { 2045 if_rele(parent); 2046 return (ENOMEM); 2047 } 2048 2049 error = parent->if_snd_tag_alloc(parent, params, &vst->tag); 2050 if_rele(parent); 2051 if (error) { 2052 free(vst, M_VLAN); 2053 return (error); 2054 } 2055 2056 m_snd_tag_init(&vst->com, ifp); 2057 2058 *ppmt = &vst->com; 2059 return (0); 2060 } 2061 2062 static int 2063 vlan_snd_tag_modify(struct m_snd_tag *mst, 2064 union if_snd_tag_modify_params *params) 2065 { 2066 struct vlan_snd_tag *vst; 2067 2068 vst = mst_to_vst(mst); 2069 return (vst->tag->ifp->if_snd_tag_modify(vst->tag, params)); 2070 } 2071 2072 static int 2073 vlan_snd_tag_query(struct m_snd_tag *mst, 2074 union if_snd_tag_query_params *params) 2075 { 2076 struct vlan_snd_tag *vst; 2077 2078 vst = mst_to_vst(mst); 2079 return (vst->tag->ifp->if_snd_tag_query(vst->tag, params)); 2080 } 2081 2082 static void 2083 vlan_snd_tag_free(struct m_snd_tag *mst) 2084 { 2085 struct vlan_snd_tag *vst; 2086 2087 vst = mst_to_vst(mst); 2088 m_snd_tag_rele(vst->tag); 2089 free(vst, M_VLAN); 2090 } 2091 #endif 2092