1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_kern_tls.h" 50 #include "opt_vlan.h" 51 #include "opt_ratelimit.h" 52 53 #include <sys/param.h> 54 #include <sys/eventhandler.h> 55 #include <sys/kernel.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mbuf.h> 59 #include <sys/module.h> 60 #include <sys/rmlock.h> 61 #include <sys/priv.h> 62 #include <sys/queue.h> 63 #include <sys/socket.h> 64 #include <sys/sockio.h> 65 #include <sys/sysctl.h> 66 #include <sys/systm.h> 67 #include <sys/sx.h> 68 #include <sys/taskqueue.h> 69 70 #include <net/bpf.h> 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/if_var.h> 74 #include <net/if_clone.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 #include <net/if_vlan_var.h> 78 #include <net/vnet.h> 79 80 #ifdef INET 81 #include <netinet/in.h> 82 #include <netinet/if_ether.h> 83 #endif 84 85 #define VLAN_DEF_HWIDTH 4 86 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 87 88 #define UP_AND_RUNNING(ifp) \ 89 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 90 91 CK_SLIST_HEAD(ifvlanhead, ifvlan); 92 93 struct ifvlantrunk { 94 struct ifnet *parent; /* parent interface of this trunk */ 95 struct mtx lock; 96 #ifdef VLAN_ARRAY 97 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 98 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 99 #else 100 struct ifvlanhead *hash; /* dynamic hash-list table */ 101 uint16_t hmask; 102 uint16_t hwidth; 103 #endif 104 int refcnt; 105 }; 106 107 #if defined(KERN_TLS) || defined(RATELIMIT) 108 struct vlan_snd_tag { 109 struct m_snd_tag com; 110 struct m_snd_tag *tag; 111 }; 112 113 static inline struct vlan_snd_tag * 114 mst_to_vst(struct m_snd_tag *mst) 115 { 116 117 return (__containerof(mst, struct vlan_snd_tag, com)); 118 } 119 #endif 120 121 /* 122 * This macro provides a facility to iterate over every vlan on a trunk with 123 * the assumption that none will be added/removed during iteration. 124 */ 125 #ifdef VLAN_ARRAY 126 #define VLAN_FOREACH(_ifv, _trunk) \ 127 size_t _i; \ 128 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 129 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 130 #else /* VLAN_ARRAY */ 131 #define VLAN_FOREACH(_ifv, _trunk) \ 132 struct ifvlan *_next; \ 133 size_t _i; \ 134 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 135 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 136 #endif /* VLAN_ARRAY */ 137 138 /* 139 * This macro provides a facility to iterate over every vlan on a trunk while 140 * also modifying the number of vlans on the trunk. The iteration continues 141 * until some condition is met or there are no more vlans on the trunk. 142 */ 143 #ifdef VLAN_ARRAY 144 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 145 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 146 size_t _i; \ 147 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 148 if (((_ifv) = (_trunk)->vlans[_i])) 149 #else /* VLAN_ARRAY */ 150 /* 151 * The hash table case is more complicated. We allow for the hash table to be 152 * modified (i.e. vlans removed) while we are iterating over it. To allow for 153 * this we must restart the iteration every time we "touch" something during 154 * the iteration, since removal will resize the hash table and invalidate our 155 * current position. If acting on the touched element causes the trunk to be 156 * emptied, then iteration also stops. 157 */ 158 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 159 size_t _i; \ 160 bool _touch = false; \ 161 for (_i = 0; \ 162 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 163 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 164 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 165 (_touch = true)) 166 #endif /* VLAN_ARRAY */ 167 168 struct vlan_mc_entry { 169 struct sockaddr_dl mc_addr; 170 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 171 struct epoch_context mc_epoch_ctx; 172 }; 173 174 struct ifvlan { 175 struct ifvlantrunk *ifv_trunk; 176 struct ifnet *ifv_ifp; 177 #define TRUNK(ifv) ((ifv)->ifv_trunk) 178 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 179 void *ifv_cookie; 180 int ifv_pflags; /* special flags we have set on parent */ 181 int ifv_capenable; 182 int ifv_encaplen; /* encapsulation length */ 183 int ifv_mtufudge; /* MTU fudged by this much */ 184 int ifv_mintu; /* min transmission unit */ 185 uint16_t ifv_proto; /* encapsulation ethertype */ 186 uint16_t ifv_tag; /* tag to apply on packets leaving if */ 187 uint16_t ifv_vid; /* VLAN ID */ 188 uint8_t ifv_pcp; /* Priority Code Point (PCP). */ 189 struct task lladdr_task; 190 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 191 #ifndef VLAN_ARRAY 192 CK_SLIST_ENTRY(ifvlan) ifv_list; 193 #endif 194 }; 195 196 /* Special flags we should propagate to parent. */ 197 static struct { 198 int flag; 199 int (*func)(struct ifnet *, int); 200 } vlan_pflags[] = { 201 {IFF_PROMISC, ifpromisc}, 202 {IFF_ALLMULTI, if_allmulti}, 203 {0, NULL} 204 }; 205 206 extern int vlan_mtag_pcp; 207 208 static const char vlanname[] = "vlan"; 209 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 210 211 static eventhandler_tag ifdetach_tag; 212 static eventhandler_tag iflladdr_tag; 213 214 /* 215 * if_vlan uses two module-level synchronizations primitives to allow concurrent 216 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 217 * while they are being used for tx/rx. To accomplish this in a way that has 218 * acceptable performance and cooperation with other parts of the network stack 219 * there is a non-sleepable epoch(9) and an sx(9). 220 * 221 * The performance-sensitive paths that warrant using the epoch(9) are 222 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 223 * existence using if_vlantrunk, and being in the network tx/rx paths the use 224 * of an epoch(9) gives a measureable improvement in performance. 225 * 226 * The reason for having an sx(9) is mostly because there are still areas that 227 * must be sleepable and also have safe concurrent access to a vlan interface. 228 * Since the sx(9) exists, it is used by default in most paths unless sleeping 229 * is not permitted, or if it is not clear whether sleeping is permitted. 230 * 231 */ 232 #define _VLAN_SX_ID ifv_sx 233 234 static struct sx _VLAN_SX_ID; 235 236 #define VLAN_LOCKING_INIT() \ 237 sx_init(&_VLAN_SX_ID, "vlan_sx") 238 239 #define VLAN_LOCKING_DESTROY() \ 240 sx_destroy(&_VLAN_SX_ID) 241 242 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 243 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 244 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 245 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 246 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 247 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 248 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 249 250 /* 251 * We also have a per-trunk mutex that should be acquired when changing 252 * its state. 253 */ 254 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 255 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 256 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 257 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 258 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 259 260 /* 261 * The VLAN_ARRAY substitutes the dynamic hash with a static array 262 * with 4096 entries. In theory this can give a boost in processing, 263 * however in practice it does not. Probably this is because the array 264 * is too big to fit into CPU cache. 265 */ 266 #ifndef VLAN_ARRAY 267 static void vlan_inithash(struct ifvlantrunk *trunk); 268 static void vlan_freehash(struct ifvlantrunk *trunk); 269 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 270 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 271 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 272 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 273 uint16_t vid); 274 #endif 275 static void trunk_destroy(struct ifvlantrunk *trunk); 276 277 static void vlan_init(void *foo); 278 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 279 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 280 #if defined(KERN_TLS) || defined(RATELIMIT) 281 static int vlan_snd_tag_alloc(struct ifnet *, 282 union if_snd_tag_alloc_params *, struct m_snd_tag **); 283 static int vlan_snd_tag_modify(struct m_snd_tag *, 284 union if_snd_tag_modify_params *); 285 static int vlan_snd_tag_query(struct m_snd_tag *, 286 union if_snd_tag_query_params *); 287 static void vlan_snd_tag_free(struct m_snd_tag *); 288 #endif 289 static void vlan_qflush(struct ifnet *ifp); 290 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 291 int (*func)(struct ifnet *, int)); 292 static int vlan_setflags(struct ifnet *ifp, int status); 293 static int vlan_setmulti(struct ifnet *ifp); 294 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 295 static int vlan_output(struct ifnet *ifp, struct mbuf *m, 296 const struct sockaddr *dst, struct route *ro); 297 static void vlan_unconfig(struct ifnet *ifp); 298 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 299 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 300 static void vlan_link_state(struct ifnet *ifp); 301 static void vlan_capabilities(struct ifvlan *ifv); 302 static void vlan_trunk_capabilities(struct ifnet *ifp); 303 304 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 305 static int vlan_clone_match(struct if_clone *, const char *); 306 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 307 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 308 309 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 310 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 311 312 static void vlan_lladdr_fn(void *arg, int pending); 313 314 static struct if_clone *vlan_cloner; 315 316 #ifdef VIMAGE 317 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 318 #define V_vlan_cloner VNET(vlan_cloner) 319 #endif 320 321 static void 322 vlan_mc_free(struct epoch_context *ctx) 323 { 324 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 325 free(mc, M_VLAN); 326 } 327 328 #ifndef VLAN_ARRAY 329 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 330 331 static void 332 vlan_inithash(struct ifvlantrunk *trunk) 333 { 334 int i, n; 335 336 /* 337 * The trunk must not be locked here since we call malloc(M_WAITOK). 338 * It is OK in case this function is called before the trunk struct 339 * gets hooked up and becomes visible from other threads. 340 */ 341 342 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 343 ("%s: hash already initialized", __func__)); 344 345 trunk->hwidth = VLAN_DEF_HWIDTH; 346 n = 1 << trunk->hwidth; 347 trunk->hmask = n - 1; 348 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 349 for (i = 0; i < n; i++) 350 CK_SLIST_INIT(&trunk->hash[i]); 351 } 352 353 static void 354 vlan_freehash(struct ifvlantrunk *trunk) 355 { 356 #ifdef INVARIANTS 357 int i; 358 359 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 360 for (i = 0; i < (1 << trunk->hwidth); i++) 361 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 362 ("%s: hash table not empty", __func__)); 363 #endif 364 free(trunk->hash, M_VLAN); 365 trunk->hash = NULL; 366 trunk->hwidth = trunk->hmask = 0; 367 } 368 369 static int 370 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 371 { 372 int i, b; 373 struct ifvlan *ifv2; 374 375 VLAN_XLOCK_ASSERT(); 376 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 377 378 b = 1 << trunk->hwidth; 379 i = HASH(ifv->ifv_vid, trunk->hmask); 380 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 381 if (ifv->ifv_vid == ifv2->ifv_vid) 382 return (EEXIST); 383 384 /* 385 * Grow the hash when the number of vlans exceeds half of the number of 386 * hash buckets squared. This will make the average linked-list length 387 * buckets/2. 388 */ 389 if (trunk->refcnt > (b * b) / 2) { 390 vlan_growhash(trunk, 1); 391 i = HASH(ifv->ifv_vid, trunk->hmask); 392 } 393 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 394 trunk->refcnt++; 395 396 return (0); 397 } 398 399 static int 400 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 401 { 402 int i, b; 403 struct ifvlan *ifv2; 404 405 VLAN_XLOCK_ASSERT(); 406 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 407 408 b = 1 << trunk->hwidth; 409 i = HASH(ifv->ifv_vid, trunk->hmask); 410 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 411 if (ifv2 == ifv) { 412 trunk->refcnt--; 413 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 414 if (trunk->refcnt < (b * b) / 2) 415 vlan_growhash(trunk, -1); 416 return (0); 417 } 418 419 panic("%s: vlan not found\n", __func__); 420 return (ENOENT); /*NOTREACHED*/ 421 } 422 423 /* 424 * Grow the hash larger or smaller if memory permits. 425 */ 426 static void 427 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 428 { 429 struct ifvlan *ifv; 430 struct ifvlanhead *hash2; 431 int hwidth2, i, j, n, n2; 432 433 VLAN_XLOCK_ASSERT(); 434 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 435 436 if (howmuch == 0) { 437 /* Harmless yet obvious coding error */ 438 printf("%s: howmuch is 0\n", __func__); 439 return; 440 } 441 442 hwidth2 = trunk->hwidth + howmuch; 443 n = 1 << trunk->hwidth; 444 n2 = 1 << hwidth2; 445 /* Do not shrink the table below the default */ 446 if (hwidth2 < VLAN_DEF_HWIDTH) 447 return; 448 449 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 450 if (hash2 == NULL) { 451 printf("%s: out of memory -- hash size not changed\n", 452 __func__); 453 return; /* We can live with the old hash table */ 454 } 455 for (j = 0; j < n2; j++) 456 CK_SLIST_INIT(&hash2[j]); 457 for (i = 0; i < n; i++) 458 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 459 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 460 j = HASH(ifv->ifv_vid, n2 - 1); 461 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 462 } 463 NET_EPOCH_WAIT(); 464 free(trunk->hash, M_VLAN); 465 trunk->hash = hash2; 466 trunk->hwidth = hwidth2; 467 trunk->hmask = n2 - 1; 468 469 if (bootverbose) 470 if_printf(trunk->parent, 471 "VLAN hash table resized from %d to %d buckets\n", n, n2); 472 } 473 474 static __inline struct ifvlan * 475 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 476 { 477 struct ifvlan *ifv; 478 479 NET_EPOCH_ASSERT(); 480 481 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 482 if (ifv->ifv_vid == vid) 483 return (ifv); 484 return (NULL); 485 } 486 487 #if 0 488 /* Debugging code to view the hashtables. */ 489 static void 490 vlan_dumphash(struct ifvlantrunk *trunk) 491 { 492 int i; 493 struct ifvlan *ifv; 494 495 for (i = 0; i < (1 << trunk->hwidth); i++) { 496 printf("%d: ", i); 497 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 498 printf("%s ", ifv->ifv_ifp->if_xname); 499 printf("\n"); 500 } 501 } 502 #endif /* 0 */ 503 #else 504 505 static __inline struct ifvlan * 506 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 507 { 508 509 return trunk->vlans[vid]; 510 } 511 512 static __inline int 513 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 514 { 515 516 if (trunk->vlans[ifv->ifv_vid] != NULL) 517 return EEXIST; 518 trunk->vlans[ifv->ifv_vid] = ifv; 519 trunk->refcnt++; 520 521 return (0); 522 } 523 524 static __inline int 525 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 526 { 527 528 trunk->vlans[ifv->ifv_vid] = NULL; 529 trunk->refcnt--; 530 531 return (0); 532 } 533 534 static __inline void 535 vlan_freehash(struct ifvlantrunk *trunk) 536 { 537 } 538 539 static __inline void 540 vlan_inithash(struct ifvlantrunk *trunk) 541 { 542 } 543 544 #endif /* !VLAN_ARRAY */ 545 546 static void 547 trunk_destroy(struct ifvlantrunk *trunk) 548 { 549 VLAN_XLOCK_ASSERT(); 550 551 vlan_freehash(trunk); 552 trunk->parent->if_vlantrunk = NULL; 553 TRUNK_LOCK_DESTROY(trunk); 554 if_rele(trunk->parent); 555 free(trunk, M_VLAN); 556 } 557 558 /* 559 * Program our multicast filter. What we're actually doing is 560 * programming the multicast filter of the parent. This has the 561 * side effect of causing the parent interface to receive multicast 562 * traffic that it doesn't really want, which ends up being discarded 563 * later by the upper protocol layers. Unfortunately, there's no way 564 * to avoid this: there really is only one physical interface. 565 */ 566 static int 567 vlan_setmulti(struct ifnet *ifp) 568 { 569 struct ifnet *ifp_p; 570 struct ifmultiaddr *ifma; 571 struct ifvlan *sc; 572 struct vlan_mc_entry *mc; 573 int error; 574 575 VLAN_XLOCK_ASSERT(); 576 577 /* Find the parent. */ 578 sc = ifp->if_softc; 579 ifp_p = PARENT(sc); 580 581 CURVNET_SET_QUIET(ifp_p->if_vnet); 582 583 /* First, remove any existing filter entries. */ 584 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 585 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 586 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 587 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 588 } 589 590 /* Now program new ones. */ 591 IF_ADDR_WLOCK(ifp); 592 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 593 if (ifma->ifma_addr->sa_family != AF_LINK) 594 continue; 595 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 596 if (mc == NULL) { 597 IF_ADDR_WUNLOCK(ifp); 598 return (ENOMEM); 599 } 600 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 601 mc->mc_addr.sdl_index = ifp_p->if_index; 602 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 603 } 604 IF_ADDR_WUNLOCK(ifp); 605 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 606 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 607 NULL); 608 if (error) 609 return (error); 610 } 611 612 CURVNET_RESTORE(); 613 return (0); 614 } 615 616 /* 617 * A handler for parent interface link layer address changes. 618 * If the parent interface link layer address is changed we 619 * should also change it on all children vlans. 620 */ 621 static void 622 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 623 { 624 struct epoch_tracker et; 625 struct ifvlan *ifv; 626 struct ifnet *ifv_ifp; 627 struct ifvlantrunk *trunk; 628 struct sockaddr_dl *sdl; 629 630 /* Need the epoch since this is run on taskqueue_swi. */ 631 NET_EPOCH_ENTER(et); 632 trunk = ifp->if_vlantrunk; 633 if (trunk == NULL) { 634 NET_EPOCH_EXIT(et); 635 return; 636 } 637 638 /* 639 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 640 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 641 * ioctl calls on the parent garbling the lladdr of the child vlan. 642 */ 643 TRUNK_WLOCK(trunk); 644 VLAN_FOREACH(ifv, trunk) { 645 /* 646 * Copy new new lladdr into the ifv_ifp, enqueue a task 647 * to actually call if_setlladdr. if_setlladdr needs to 648 * be deferred to a taskqueue because it will call into 649 * the if_vlan ioctl path and try to acquire the global 650 * lock. 651 */ 652 ifv_ifp = ifv->ifv_ifp; 653 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 654 ifp->if_addrlen); 655 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 656 sdl->sdl_alen = ifp->if_addrlen; 657 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 658 } 659 TRUNK_WUNLOCK(trunk); 660 NET_EPOCH_EXIT(et); 661 } 662 663 /* 664 * A handler for network interface departure events. 665 * Track departure of trunks here so that we don't access invalid 666 * pointers or whatever if a trunk is ripped from under us, e.g., 667 * by ejecting its hot-plug card. However, if an ifnet is simply 668 * being renamed, then there's no need to tear down the state. 669 */ 670 static void 671 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 672 { 673 struct ifvlan *ifv; 674 struct ifvlantrunk *trunk; 675 676 /* If the ifnet is just being renamed, don't do anything. */ 677 if (ifp->if_flags & IFF_RENAMING) 678 return; 679 VLAN_XLOCK(); 680 trunk = ifp->if_vlantrunk; 681 if (trunk == NULL) { 682 VLAN_XUNLOCK(); 683 return; 684 } 685 686 /* 687 * OK, it's a trunk. Loop over and detach all vlan's on it. 688 * Check trunk pointer after each vlan_unconfig() as it will 689 * free it and set to NULL after the last vlan was detached. 690 */ 691 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 692 ifp->if_vlantrunk == NULL) 693 vlan_unconfig_locked(ifv->ifv_ifp, 1); 694 695 /* Trunk should have been destroyed in vlan_unconfig(). */ 696 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 697 VLAN_XUNLOCK(); 698 } 699 700 /* 701 * Return the trunk device for a virtual interface. 702 */ 703 static struct ifnet * 704 vlan_trunkdev(struct ifnet *ifp) 705 { 706 struct ifvlan *ifv; 707 708 NET_EPOCH_ASSERT(); 709 710 if (ifp->if_type != IFT_L2VLAN) 711 return (NULL); 712 713 ifv = ifp->if_softc; 714 ifp = NULL; 715 if (ifv->ifv_trunk) 716 ifp = PARENT(ifv); 717 return (ifp); 718 } 719 720 /* 721 * Return the 12-bit VLAN VID for this interface, for use by external 722 * components such as Infiniband. 723 * 724 * XXXRW: Note that the function name here is historical; it should be named 725 * vlan_vid(). 726 */ 727 static int 728 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 729 { 730 struct ifvlan *ifv; 731 732 if (ifp->if_type != IFT_L2VLAN) 733 return (EINVAL); 734 ifv = ifp->if_softc; 735 *vidp = ifv->ifv_vid; 736 return (0); 737 } 738 739 static int 740 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 741 { 742 struct ifvlan *ifv; 743 744 if (ifp->if_type != IFT_L2VLAN) 745 return (EINVAL); 746 ifv = ifp->if_softc; 747 *pcpp = ifv->ifv_pcp; 748 return (0); 749 } 750 751 /* 752 * Return a driver specific cookie for this interface. Synchronization 753 * with setcookie must be provided by the driver. 754 */ 755 static void * 756 vlan_cookie(struct ifnet *ifp) 757 { 758 struct ifvlan *ifv; 759 760 if (ifp->if_type != IFT_L2VLAN) 761 return (NULL); 762 ifv = ifp->if_softc; 763 return (ifv->ifv_cookie); 764 } 765 766 /* 767 * Store a cookie in our softc that drivers can use to store driver 768 * private per-instance data in. 769 */ 770 static int 771 vlan_setcookie(struct ifnet *ifp, void *cookie) 772 { 773 struct ifvlan *ifv; 774 775 if (ifp->if_type != IFT_L2VLAN) 776 return (EINVAL); 777 ifv = ifp->if_softc; 778 ifv->ifv_cookie = cookie; 779 return (0); 780 } 781 782 /* 783 * Return the vlan device present at the specific VID. 784 */ 785 static struct ifnet * 786 vlan_devat(struct ifnet *ifp, uint16_t vid) 787 { 788 struct ifvlantrunk *trunk; 789 struct ifvlan *ifv; 790 791 NET_EPOCH_ASSERT(); 792 793 trunk = ifp->if_vlantrunk; 794 if (trunk == NULL) 795 return (NULL); 796 ifp = NULL; 797 ifv = vlan_gethash(trunk, vid); 798 if (ifv) 799 ifp = ifv->ifv_ifp; 800 return (ifp); 801 } 802 803 /* 804 * Recalculate the cached VLAN tag exposed via the MIB. 805 */ 806 static void 807 vlan_tag_recalculate(struct ifvlan *ifv) 808 { 809 810 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 811 } 812 813 /* 814 * VLAN support can be loaded as a module. The only place in the 815 * system that's intimately aware of this is ether_input. We hook 816 * into this code through vlan_input_p which is defined there and 817 * set here. No one else in the system should be aware of this so 818 * we use an explicit reference here. 819 */ 820 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 821 822 /* For if_link_state_change() eyes only... */ 823 extern void (*vlan_link_state_p)(struct ifnet *); 824 825 static int 826 vlan_modevent(module_t mod, int type, void *data) 827 { 828 829 switch (type) { 830 case MOD_LOAD: 831 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 832 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 833 if (ifdetach_tag == NULL) 834 return (ENOMEM); 835 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 836 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 837 if (iflladdr_tag == NULL) 838 return (ENOMEM); 839 VLAN_LOCKING_INIT(); 840 vlan_input_p = vlan_input; 841 vlan_link_state_p = vlan_link_state; 842 vlan_trunk_cap_p = vlan_trunk_capabilities; 843 vlan_trunkdev_p = vlan_trunkdev; 844 vlan_cookie_p = vlan_cookie; 845 vlan_setcookie_p = vlan_setcookie; 846 vlan_tag_p = vlan_tag; 847 vlan_pcp_p = vlan_pcp; 848 vlan_devat_p = vlan_devat; 849 #ifndef VIMAGE 850 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 851 vlan_clone_create, vlan_clone_destroy); 852 #endif 853 if (bootverbose) 854 printf("vlan: initialized, using " 855 #ifdef VLAN_ARRAY 856 "full-size arrays" 857 #else 858 "hash tables with chaining" 859 #endif 860 861 "\n"); 862 break; 863 case MOD_UNLOAD: 864 #ifndef VIMAGE 865 if_clone_detach(vlan_cloner); 866 #endif 867 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 868 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 869 vlan_input_p = NULL; 870 vlan_link_state_p = NULL; 871 vlan_trunk_cap_p = NULL; 872 vlan_trunkdev_p = NULL; 873 vlan_tag_p = NULL; 874 vlan_cookie_p = NULL; 875 vlan_setcookie_p = NULL; 876 vlan_devat_p = NULL; 877 VLAN_LOCKING_DESTROY(); 878 if (bootverbose) 879 printf("vlan: unloaded\n"); 880 break; 881 default: 882 return (EOPNOTSUPP); 883 } 884 return (0); 885 } 886 887 static moduledata_t vlan_mod = { 888 "if_vlan", 889 vlan_modevent, 890 0 891 }; 892 893 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 894 MODULE_VERSION(if_vlan, 3); 895 896 #ifdef VIMAGE 897 static void 898 vnet_vlan_init(const void *unused __unused) 899 { 900 901 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 902 vlan_clone_create, vlan_clone_destroy); 903 V_vlan_cloner = vlan_cloner; 904 } 905 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 906 vnet_vlan_init, NULL); 907 908 static void 909 vnet_vlan_uninit(const void *unused __unused) 910 { 911 912 if_clone_detach(V_vlan_cloner); 913 } 914 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 915 vnet_vlan_uninit, NULL); 916 #endif 917 918 /* 919 * Check for <etherif>.<vlan> style interface names. 920 */ 921 static struct ifnet * 922 vlan_clone_match_ethervid(const char *name, int *vidp) 923 { 924 char ifname[IFNAMSIZ]; 925 char *cp; 926 struct ifnet *ifp; 927 int vid; 928 929 strlcpy(ifname, name, IFNAMSIZ); 930 if ((cp = strchr(ifname, '.')) == NULL) 931 return (NULL); 932 *cp = '\0'; 933 if ((ifp = ifunit_ref(ifname)) == NULL) 934 return (NULL); 935 /* Parse VID. */ 936 if (*++cp == '\0') { 937 if_rele(ifp); 938 return (NULL); 939 } 940 vid = 0; 941 for(; *cp >= '0' && *cp <= '9'; cp++) 942 vid = (vid * 10) + (*cp - '0'); 943 if (*cp != '\0') { 944 if_rele(ifp); 945 return (NULL); 946 } 947 if (vidp != NULL) 948 *vidp = vid; 949 950 return (ifp); 951 } 952 953 static int 954 vlan_clone_match(struct if_clone *ifc, const char *name) 955 { 956 const char *cp; 957 958 if (vlan_clone_match_ethervid(name, NULL) != NULL) 959 return (1); 960 961 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 962 return (0); 963 for (cp = name + 4; *cp != '\0'; cp++) { 964 if (*cp < '0' || *cp > '9') 965 return (0); 966 } 967 968 return (1); 969 } 970 971 static int 972 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 973 { 974 char *dp; 975 int wildcard; 976 int unit; 977 int error; 978 int vid; 979 struct ifvlan *ifv; 980 struct ifnet *ifp; 981 struct ifnet *p; 982 struct ifaddr *ifa; 983 struct sockaddr_dl *sdl; 984 struct vlanreq vlr; 985 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 986 987 /* 988 * There are 3 (ugh) ways to specify the cloned device: 989 * o pass a parameter block with the clone request. 990 * o specify parameters in the text of the clone device name 991 * o specify no parameters and get an unattached device that 992 * must be configured separately. 993 * The first technique is preferred; the latter two are 994 * supported for backwards compatibility. 995 * 996 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 997 * called for. 998 */ 999 if (params) { 1000 error = copyin(params, &vlr, sizeof(vlr)); 1001 if (error) 1002 return error; 1003 p = ifunit_ref(vlr.vlr_parent); 1004 if (p == NULL) 1005 return (ENXIO); 1006 error = ifc_name2unit(name, &unit); 1007 if (error != 0) { 1008 if_rele(p); 1009 return (error); 1010 } 1011 vid = vlr.vlr_tag; 1012 wildcard = (unit < 0); 1013 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1014 unit = -1; 1015 wildcard = 0; 1016 } else { 1017 p = NULL; 1018 error = ifc_name2unit(name, &unit); 1019 if (error != 0) 1020 return (error); 1021 1022 wildcard = (unit < 0); 1023 } 1024 1025 error = ifc_alloc_unit(ifc, &unit); 1026 if (error != 0) { 1027 if (p != NULL) 1028 if_rele(p); 1029 return (error); 1030 } 1031 1032 /* In the wildcard case, we need to update the name. */ 1033 if (wildcard) { 1034 for (dp = name; *dp != '\0'; dp++); 1035 if (snprintf(dp, len - (dp-name), "%d", unit) > 1036 len - (dp-name) - 1) { 1037 panic("%s: interface name too long", __func__); 1038 } 1039 } 1040 1041 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1042 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1043 if (ifp == NULL) { 1044 ifc_free_unit(ifc, unit); 1045 free(ifv, M_VLAN); 1046 if (p != NULL) 1047 if_rele(p); 1048 return (ENOSPC); 1049 } 1050 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1051 ifp->if_softc = ifv; 1052 /* 1053 * Set the name manually rather than using if_initname because 1054 * we don't conform to the default naming convention for interfaces. 1055 */ 1056 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1057 ifp->if_dname = vlanname; 1058 ifp->if_dunit = unit; 1059 1060 ifp->if_init = vlan_init; 1061 ifp->if_transmit = vlan_transmit; 1062 ifp->if_qflush = vlan_qflush; 1063 ifp->if_ioctl = vlan_ioctl; 1064 #if defined(KERN_TLS) || defined(RATELIMIT) 1065 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1066 ifp->if_snd_tag_modify = vlan_snd_tag_modify; 1067 ifp->if_snd_tag_query = vlan_snd_tag_query; 1068 ifp->if_snd_tag_free = vlan_snd_tag_free; 1069 #endif 1070 ifp->if_flags = VLAN_IFFLAGS; 1071 ether_ifattach(ifp, eaddr); 1072 /* Now undo some of the damage... */ 1073 ifp->if_baudrate = 0; 1074 ifp->if_type = IFT_L2VLAN; 1075 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1076 ifa = ifp->if_addr; 1077 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1078 sdl->sdl_type = IFT_L2VLAN; 1079 1080 if (p != NULL) { 1081 error = vlan_config(ifv, p, vid); 1082 if_rele(p); 1083 if (error != 0) { 1084 /* 1085 * Since we've partially failed, we need to back 1086 * out all the way, otherwise userland could get 1087 * confused. Thus, we destroy the interface. 1088 */ 1089 ether_ifdetach(ifp); 1090 vlan_unconfig(ifp); 1091 if_free(ifp); 1092 ifc_free_unit(ifc, unit); 1093 free(ifv, M_VLAN); 1094 1095 return (error); 1096 } 1097 } 1098 1099 return (0); 1100 } 1101 1102 static int 1103 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1104 { 1105 struct ifvlan *ifv = ifp->if_softc; 1106 int unit = ifp->if_dunit; 1107 1108 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1109 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1110 /* 1111 * We should have the only reference to the ifv now, so we can now 1112 * drain any remaining lladdr task before freeing the ifnet and the 1113 * ifvlan. 1114 */ 1115 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1116 NET_EPOCH_WAIT(); 1117 if_free(ifp); 1118 free(ifv, M_VLAN); 1119 ifc_free_unit(ifc, unit); 1120 1121 return (0); 1122 } 1123 1124 /* 1125 * The ifp->if_init entry point for vlan(4) is a no-op. 1126 */ 1127 static void 1128 vlan_init(void *foo __unused) 1129 { 1130 } 1131 1132 /* 1133 * The if_transmit method for vlan(4) interface. 1134 */ 1135 static int 1136 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1137 { 1138 struct ifvlan *ifv; 1139 struct ifnet *p; 1140 int error, len, mcast; 1141 1142 NET_EPOCH_ASSERT(); 1143 1144 ifv = ifp->if_softc; 1145 if (TRUNK(ifv) == NULL) { 1146 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1147 m_freem(m); 1148 return (ENETDOWN); 1149 } 1150 p = PARENT(ifv); 1151 len = m->m_pkthdr.len; 1152 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1153 1154 BPF_MTAP(ifp, m); 1155 1156 #if defined(KERN_TLS) || defined(RATELIMIT) 1157 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1158 struct vlan_snd_tag *vst; 1159 struct m_snd_tag *mst; 1160 1161 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 1162 mst = m->m_pkthdr.snd_tag; 1163 vst = mst_to_vst(mst); 1164 if (vst->tag->ifp != p) { 1165 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1166 m_freem(m); 1167 return (EAGAIN); 1168 } 1169 1170 m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag); 1171 m_snd_tag_rele(mst); 1172 } 1173 #endif 1174 1175 /* 1176 * Do not run parent's if_transmit() if the parent is not up, 1177 * or parent's driver will cause a system crash. 1178 */ 1179 if (!UP_AND_RUNNING(p)) { 1180 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1181 m_freem(m); 1182 return (ENETDOWN); 1183 } 1184 1185 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1186 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1187 return (0); 1188 } 1189 1190 /* 1191 * Send it, precisely as ether_output() would have. 1192 */ 1193 error = (p->if_transmit)(p, m); 1194 if (error == 0) { 1195 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1196 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1197 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1198 } else 1199 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1200 return (error); 1201 } 1202 1203 static int 1204 vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1205 struct route *ro) 1206 { 1207 struct ifvlan *ifv; 1208 struct ifnet *p; 1209 1210 NET_EPOCH_ASSERT(); 1211 1212 ifv = ifp->if_softc; 1213 if (TRUNK(ifv) == NULL) { 1214 m_freem(m); 1215 return (ENETDOWN); 1216 } 1217 p = PARENT(ifv); 1218 return p->if_output(ifp, m, dst, ro); 1219 } 1220 1221 1222 /* 1223 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1224 */ 1225 static void 1226 vlan_qflush(struct ifnet *ifp __unused) 1227 { 1228 } 1229 1230 static void 1231 vlan_input(struct ifnet *ifp, struct mbuf *m) 1232 { 1233 struct ifvlantrunk *trunk; 1234 struct ifvlan *ifv; 1235 struct m_tag *mtag; 1236 uint16_t vid, tag; 1237 1238 NET_EPOCH_ASSERT(); 1239 1240 trunk = ifp->if_vlantrunk; 1241 if (trunk == NULL) { 1242 m_freem(m); 1243 return; 1244 } 1245 1246 if (m->m_flags & M_VLANTAG) { 1247 /* 1248 * Packet is tagged, but m contains a normal 1249 * Ethernet frame; the tag is stored out-of-band. 1250 */ 1251 tag = m->m_pkthdr.ether_vtag; 1252 m->m_flags &= ~M_VLANTAG; 1253 } else { 1254 struct ether_vlan_header *evl; 1255 1256 /* 1257 * Packet is tagged in-band as specified by 802.1q. 1258 */ 1259 switch (ifp->if_type) { 1260 case IFT_ETHER: 1261 if (m->m_len < sizeof(*evl) && 1262 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1263 if_printf(ifp, "cannot pullup VLAN header\n"); 1264 return; 1265 } 1266 evl = mtod(m, struct ether_vlan_header *); 1267 tag = ntohs(evl->evl_tag); 1268 1269 /* 1270 * Remove the 802.1q header by copying the Ethernet 1271 * addresses over it and adjusting the beginning of 1272 * the data in the mbuf. The encapsulated Ethernet 1273 * type field is already in place. 1274 */ 1275 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1276 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1277 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1278 break; 1279 1280 default: 1281 #ifdef INVARIANTS 1282 panic("%s: %s has unsupported if_type %u", 1283 __func__, ifp->if_xname, ifp->if_type); 1284 #endif 1285 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1286 m_freem(m); 1287 return; 1288 } 1289 } 1290 1291 vid = EVL_VLANOFTAG(tag); 1292 1293 ifv = vlan_gethash(trunk, vid); 1294 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1295 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1296 m_freem(m); 1297 return; 1298 } 1299 1300 if (vlan_mtag_pcp) { 1301 /* 1302 * While uncommon, it is possible that we will find a 802.1q 1303 * packet encapsulated inside another packet that also had an 1304 * 802.1q header. For example, ethernet tunneled over IPSEC 1305 * arriving over ethernet. In that case, we replace the 1306 * existing 802.1q PCP m_tag value. 1307 */ 1308 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1309 if (mtag == NULL) { 1310 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1311 sizeof(uint8_t), M_NOWAIT); 1312 if (mtag == NULL) { 1313 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1314 m_freem(m); 1315 return; 1316 } 1317 m_tag_prepend(m, mtag); 1318 } 1319 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1320 } 1321 1322 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1323 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1324 1325 /* Pass it back through the parent's input routine. */ 1326 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1327 } 1328 1329 static void 1330 vlan_lladdr_fn(void *arg, int pending __unused) 1331 { 1332 struct ifvlan *ifv; 1333 struct ifnet *ifp; 1334 1335 ifv = (struct ifvlan *)arg; 1336 ifp = ifv->ifv_ifp; 1337 1338 CURVNET_SET(ifp->if_vnet); 1339 1340 /* The ifv_ifp already has the lladdr copied in. */ 1341 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1342 1343 CURVNET_RESTORE(); 1344 } 1345 1346 static int 1347 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1348 { 1349 struct epoch_tracker et; 1350 struct ifvlantrunk *trunk; 1351 struct ifnet *ifp; 1352 int error = 0; 1353 1354 /* 1355 * We can handle non-ethernet hardware types as long as 1356 * they handle the tagging and headers themselves. 1357 */ 1358 if (p->if_type != IFT_ETHER && 1359 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1360 return (EPROTONOSUPPORT); 1361 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1362 return (EPROTONOSUPPORT); 1363 /* 1364 * Don't let the caller set up a VLAN VID with 1365 * anything except VLID bits. 1366 * VID numbers 0x0 and 0xFFF are reserved. 1367 */ 1368 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1369 return (EINVAL); 1370 if (ifv->ifv_trunk) 1371 return (EBUSY); 1372 1373 VLAN_XLOCK(); 1374 if (p->if_vlantrunk == NULL) { 1375 trunk = malloc(sizeof(struct ifvlantrunk), 1376 M_VLAN, M_WAITOK | M_ZERO); 1377 vlan_inithash(trunk); 1378 TRUNK_LOCK_INIT(trunk); 1379 TRUNK_WLOCK(trunk); 1380 p->if_vlantrunk = trunk; 1381 trunk->parent = p; 1382 if_ref(trunk->parent); 1383 TRUNK_WUNLOCK(trunk); 1384 } else { 1385 trunk = p->if_vlantrunk; 1386 } 1387 1388 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1389 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1390 vlan_tag_recalculate(ifv); 1391 error = vlan_inshash(trunk, ifv); 1392 if (error) 1393 goto done; 1394 ifv->ifv_proto = ETHERTYPE_VLAN; 1395 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1396 ifv->ifv_mintu = ETHERMIN; 1397 ifv->ifv_pflags = 0; 1398 ifv->ifv_capenable = -1; 1399 1400 /* 1401 * If the parent supports the VLAN_MTU capability, 1402 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1403 * use it. 1404 */ 1405 if (p->if_capenable & IFCAP_VLAN_MTU) { 1406 /* 1407 * No need to fudge the MTU since the parent can 1408 * handle extended frames. 1409 */ 1410 ifv->ifv_mtufudge = 0; 1411 } else { 1412 /* 1413 * Fudge the MTU by the encapsulation size. This 1414 * makes us incompatible with strictly compliant 1415 * 802.1Q implementations, but allows us to use 1416 * the feature with other NetBSD implementations, 1417 * which might still be useful. 1418 */ 1419 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1420 } 1421 1422 ifv->ifv_trunk = trunk; 1423 ifp = ifv->ifv_ifp; 1424 /* 1425 * Initialize fields from our parent. This duplicates some 1426 * work with ether_ifattach() but allows for non-ethernet 1427 * interfaces to also work. 1428 */ 1429 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1430 ifp->if_baudrate = p->if_baudrate; 1431 ifp->if_input = p->if_input; 1432 ifp->if_resolvemulti = p->if_resolvemulti; 1433 ifp->if_addrlen = p->if_addrlen; 1434 ifp->if_broadcastaddr = p->if_broadcastaddr; 1435 ifp->if_pcp = ifv->ifv_pcp; 1436 1437 /* 1438 * We wrap the parent's if_output using vlan_output to ensure that it 1439 * can't become stale. 1440 */ 1441 ifp->if_output = vlan_output; 1442 1443 /* 1444 * Copy only a selected subset of flags from the parent. 1445 * Other flags are none of our business. 1446 */ 1447 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1448 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1449 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1450 #undef VLAN_COPY_FLAGS 1451 1452 ifp->if_link_state = p->if_link_state; 1453 1454 NET_EPOCH_ENTER(et); 1455 vlan_capabilities(ifv); 1456 NET_EPOCH_EXIT(et); 1457 1458 /* 1459 * Set up our interface address to reflect the underlying 1460 * physical interface's. 1461 */ 1462 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1463 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1464 p->if_addrlen; 1465 1466 /* 1467 * Do not schedule link address update if it was the same 1468 * as previous parent's. This helps avoid updating for each 1469 * associated llentry. 1470 */ 1471 if (memcmp(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen) != 0) { 1472 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1473 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 1474 } 1475 1476 /* We are ready for operation now. */ 1477 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1478 1479 /* Update flags on the parent, if necessary. */ 1480 vlan_setflags(ifp, 1); 1481 1482 /* 1483 * Configure multicast addresses that may already be 1484 * joined on the vlan device. 1485 */ 1486 (void)vlan_setmulti(ifp); 1487 1488 done: 1489 if (error == 0) 1490 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1491 VLAN_XUNLOCK(); 1492 1493 return (error); 1494 } 1495 1496 static void 1497 vlan_unconfig(struct ifnet *ifp) 1498 { 1499 1500 VLAN_XLOCK(); 1501 vlan_unconfig_locked(ifp, 0); 1502 VLAN_XUNLOCK(); 1503 } 1504 1505 static void 1506 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1507 { 1508 struct ifvlantrunk *trunk; 1509 struct vlan_mc_entry *mc; 1510 struct ifvlan *ifv; 1511 struct ifnet *parent; 1512 int error; 1513 1514 VLAN_XLOCK_ASSERT(); 1515 1516 ifv = ifp->if_softc; 1517 trunk = ifv->ifv_trunk; 1518 parent = NULL; 1519 1520 if (trunk != NULL) { 1521 parent = trunk->parent; 1522 1523 /* 1524 * Since the interface is being unconfigured, we need to 1525 * empty the list of multicast groups that we may have joined 1526 * while we were alive from the parent's list. 1527 */ 1528 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1529 /* 1530 * If the parent interface is being detached, 1531 * all its multicast addresses have already 1532 * been removed. Warn about errors if 1533 * if_delmulti() does fail, but don't abort as 1534 * all callers expect vlan destruction to 1535 * succeed. 1536 */ 1537 if (!departing) { 1538 error = if_delmulti(parent, 1539 (struct sockaddr *)&mc->mc_addr); 1540 if (error) 1541 if_printf(ifp, 1542 "Failed to delete multicast address from parent: %d\n", 1543 error); 1544 } 1545 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1546 epoch_call(net_epoch_preempt, &mc->mc_epoch_ctx, vlan_mc_free); 1547 } 1548 1549 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1550 1551 vlan_remhash(trunk, ifv); 1552 ifv->ifv_trunk = NULL; 1553 1554 /* 1555 * Check if we were the last. 1556 */ 1557 if (trunk->refcnt == 0) { 1558 parent->if_vlantrunk = NULL; 1559 NET_EPOCH_WAIT(); 1560 trunk_destroy(trunk); 1561 } 1562 } 1563 1564 /* Disconnect from parent. */ 1565 if (ifv->ifv_pflags) 1566 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1567 ifp->if_mtu = ETHERMTU; 1568 ifp->if_link_state = LINK_STATE_UNKNOWN; 1569 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1570 1571 /* 1572 * Only dispatch an event if vlan was 1573 * attached, otherwise there is nothing 1574 * to cleanup anyway. 1575 */ 1576 if (parent != NULL) 1577 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1578 } 1579 1580 /* Handle a reference counted flag that should be set on the parent as well */ 1581 static int 1582 vlan_setflag(struct ifnet *ifp, int flag, int status, 1583 int (*func)(struct ifnet *, int)) 1584 { 1585 struct ifvlan *ifv; 1586 int error; 1587 1588 VLAN_SXLOCK_ASSERT(); 1589 1590 ifv = ifp->if_softc; 1591 status = status ? (ifp->if_flags & flag) : 0; 1592 /* Now "status" contains the flag value or 0 */ 1593 1594 /* 1595 * See if recorded parent's status is different from what 1596 * we want it to be. If it is, flip it. We record parent's 1597 * status in ifv_pflags so that we won't clear parent's flag 1598 * we haven't set. In fact, we don't clear or set parent's 1599 * flags directly, but get or release references to them. 1600 * That's why we can be sure that recorded flags still are 1601 * in accord with actual parent's flags. 1602 */ 1603 if (status != (ifv->ifv_pflags & flag)) { 1604 error = (*func)(PARENT(ifv), status); 1605 if (error) 1606 return (error); 1607 ifv->ifv_pflags &= ~flag; 1608 ifv->ifv_pflags |= status; 1609 } 1610 return (0); 1611 } 1612 1613 /* 1614 * Handle IFF_* flags that require certain changes on the parent: 1615 * if "status" is true, update parent's flags respective to our if_flags; 1616 * if "status" is false, forcedly clear the flags set on parent. 1617 */ 1618 static int 1619 vlan_setflags(struct ifnet *ifp, int status) 1620 { 1621 int error, i; 1622 1623 for (i = 0; vlan_pflags[i].flag; i++) { 1624 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1625 status, vlan_pflags[i].func); 1626 if (error) 1627 return (error); 1628 } 1629 return (0); 1630 } 1631 1632 /* Inform all vlans that their parent has changed link state */ 1633 static void 1634 vlan_link_state(struct ifnet *ifp) 1635 { 1636 struct epoch_tracker et; 1637 struct ifvlantrunk *trunk; 1638 struct ifvlan *ifv; 1639 1640 NET_EPOCH_ENTER(et); 1641 trunk = ifp->if_vlantrunk; 1642 if (trunk == NULL) { 1643 NET_EPOCH_EXIT(et); 1644 return; 1645 } 1646 1647 TRUNK_WLOCK(trunk); 1648 VLAN_FOREACH(ifv, trunk) { 1649 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1650 if_link_state_change(ifv->ifv_ifp, 1651 trunk->parent->if_link_state); 1652 } 1653 TRUNK_WUNLOCK(trunk); 1654 NET_EPOCH_EXIT(et); 1655 } 1656 1657 static void 1658 vlan_capabilities(struct ifvlan *ifv) 1659 { 1660 struct ifnet *p; 1661 struct ifnet *ifp; 1662 struct ifnet_hw_tsomax hw_tsomax; 1663 int cap = 0, ena = 0, mena; 1664 u_long hwa = 0; 1665 1666 NET_EPOCH_ASSERT(); 1667 VLAN_SXLOCK_ASSERT(); 1668 1669 p = PARENT(ifv); 1670 ifp = ifv->ifv_ifp; 1671 1672 /* Mask parent interface enabled capabilities disabled by user. */ 1673 mena = p->if_capenable & ifv->ifv_capenable; 1674 1675 /* 1676 * If the parent interface can do checksum offloading 1677 * on VLANs, then propagate its hardware-assisted 1678 * checksumming flags. Also assert that checksum 1679 * offloading requires hardware VLAN tagging. 1680 */ 1681 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1682 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1683 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1684 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1685 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1686 if (ena & IFCAP_TXCSUM) 1687 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1688 CSUM_UDP | CSUM_SCTP); 1689 if (ena & IFCAP_TXCSUM_IPV6) 1690 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1691 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1692 } 1693 1694 /* 1695 * If the parent interface can do TSO on VLANs then 1696 * propagate the hardware-assisted flag. TSO on VLANs 1697 * does not necessarily require hardware VLAN tagging. 1698 */ 1699 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1700 if_hw_tsomax_common(p, &hw_tsomax); 1701 if_hw_tsomax_update(ifp, &hw_tsomax); 1702 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1703 cap |= p->if_capabilities & IFCAP_TSO; 1704 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1705 ena |= mena & IFCAP_TSO; 1706 if (ena & IFCAP_TSO) 1707 hwa |= p->if_hwassist & CSUM_TSO; 1708 } 1709 1710 /* 1711 * If the parent interface can do LRO and checksum offloading on 1712 * VLANs, then guess it may do LRO on VLANs. False positive here 1713 * cost nothing, while false negative may lead to some confusions. 1714 */ 1715 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1716 cap |= p->if_capabilities & IFCAP_LRO; 1717 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1718 ena |= p->if_capenable & IFCAP_LRO; 1719 1720 /* 1721 * If the parent interface can offload TCP connections over VLANs then 1722 * propagate its TOE capability to the VLAN interface. 1723 * 1724 * All TOE drivers in the tree today can deal with VLANs. If this 1725 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1726 * with its own bit. 1727 */ 1728 #define IFCAP_VLAN_TOE IFCAP_TOE 1729 if (p->if_capabilities & IFCAP_VLAN_TOE) 1730 cap |= p->if_capabilities & IFCAP_TOE; 1731 if (p->if_capenable & IFCAP_VLAN_TOE) { 1732 TOEDEV(ifp) = TOEDEV(p); 1733 ena |= mena & IFCAP_TOE; 1734 } 1735 1736 /* 1737 * If the parent interface supports dynamic link state, so does the 1738 * VLAN interface. 1739 */ 1740 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1741 ena |= (mena & IFCAP_LINKSTATE); 1742 1743 #ifdef RATELIMIT 1744 /* 1745 * If the parent interface supports ratelimiting, so does the 1746 * VLAN interface. 1747 */ 1748 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1749 ena |= (mena & IFCAP_TXRTLMT); 1750 #endif 1751 1752 /* 1753 * If the parent interface supports unmapped mbufs, so does 1754 * the VLAN interface. Note that this should be fine even for 1755 * interfaces that don't support hardware tagging as headers 1756 * are prepended in normal mbufs to unmapped mbufs holding 1757 * payload data. 1758 */ 1759 cap |= (p->if_capabilities & IFCAP_NOMAP); 1760 ena |= (mena & IFCAP_NOMAP); 1761 1762 /* 1763 * If the parent interface can offload encryption and segmentation 1764 * of TLS records over TCP, propagate it's capability to the VLAN 1765 * interface. 1766 * 1767 * All TLS drivers in the tree today can deal with VLANs. If 1768 * this ever changes, then a new IFCAP_VLAN_TXTLS can be 1769 * defined. 1770 */ 1771 if (p->if_capabilities & IFCAP_TXTLS) 1772 cap |= p->if_capabilities & IFCAP_TXTLS; 1773 if (p->if_capenable & IFCAP_TXTLS) 1774 ena |= mena & IFCAP_TXTLS; 1775 1776 ifp->if_capabilities = cap; 1777 ifp->if_capenable = ena; 1778 ifp->if_hwassist = hwa; 1779 } 1780 1781 static void 1782 vlan_trunk_capabilities(struct ifnet *ifp) 1783 { 1784 struct epoch_tracker et; 1785 struct ifvlantrunk *trunk; 1786 struct ifvlan *ifv; 1787 1788 VLAN_SLOCK(); 1789 trunk = ifp->if_vlantrunk; 1790 if (trunk == NULL) { 1791 VLAN_SUNLOCK(); 1792 return; 1793 } 1794 NET_EPOCH_ENTER(et); 1795 VLAN_FOREACH(ifv, trunk) 1796 vlan_capabilities(ifv); 1797 NET_EPOCH_EXIT(et); 1798 VLAN_SUNLOCK(); 1799 } 1800 1801 static int 1802 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1803 { 1804 struct ifnet *p; 1805 struct ifreq *ifr; 1806 struct ifaddr *ifa; 1807 struct ifvlan *ifv; 1808 struct ifvlantrunk *trunk; 1809 struct vlanreq vlr; 1810 int error = 0; 1811 1812 ifr = (struct ifreq *)data; 1813 ifa = (struct ifaddr *) data; 1814 ifv = ifp->if_softc; 1815 1816 switch (cmd) { 1817 case SIOCSIFADDR: 1818 ifp->if_flags |= IFF_UP; 1819 #ifdef INET 1820 if (ifa->ifa_addr->sa_family == AF_INET) 1821 arp_ifinit(ifp, ifa); 1822 #endif 1823 break; 1824 case SIOCGIFADDR: 1825 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1826 ifp->if_addrlen); 1827 break; 1828 case SIOCGIFMEDIA: 1829 VLAN_SLOCK(); 1830 if (TRUNK(ifv) != NULL) { 1831 p = PARENT(ifv); 1832 if_ref(p); 1833 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1834 if_rele(p); 1835 /* Limit the result to the parent's current config. */ 1836 if (error == 0) { 1837 struct ifmediareq *ifmr; 1838 1839 ifmr = (struct ifmediareq *)data; 1840 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1841 ifmr->ifm_count = 1; 1842 error = copyout(&ifmr->ifm_current, 1843 ifmr->ifm_ulist, 1844 sizeof(int)); 1845 } 1846 } 1847 } else { 1848 error = EINVAL; 1849 } 1850 VLAN_SUNLOCK(); 1851 break; 1852 1853 case SIOCSIFMEDIA: 1854 error = EINVAL; 1855 break; 1856 1857 case SIOCSIFMTU: 1858 /* 1859 * Set the interface MTU. 1860 */ 1861 VLAN_SLOCK(); 1862 trunk = TRUNK(ifv); 1863 if (trunk != NULL) { 1864 TRUNK_WLOCK(trunk); 1865 if (ifr->ifr_mtu > 1866 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1867 ifr->ifr_mtu < 1868 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1869 error = EINVAL; 1870 else 1871 ifp->if_mtu = ifr->ifr_mtu; 1872 TRUNK_WUNLOCK(trunk); 1873 } else 1874 error = EINVAL; 1875 VLAN_SUNLOCK(); 1876 break; 1877 1878 case SIOCSETVLAN: 1879 #ifdef VIMAGE 1880 /* 1881 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1882 * interface to be delegated to a jail without allowing the 1883 * jail to change what underlying interface/VID it is 1884 * associated with. We are not entirely convinced that this 1885 * is the right way to accomplish that policy goal. 1886 */ 1887 if (ifp->if_vnet != ifp->if_home_vnet) { 1888 error = EPERM; 1889 break; 1890 } 1891 #endif 1892 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1893 if (error) 1894 break; 1895 if (vlr.vlr_parent[0] == '\0') { 1896 vlan_unconfig(ifp); 1897 break; 1898 } 1899 p = ifunit_ref(vlr.vlr_parent); 1900 if (p == NULL) { 1901 error = ENOENT; 1902 break; 1903 } 1904 error = vlan_config(ifv, p, vlr.vlr_tag); 1905 if_rele(p); 1906 break; 1907 1908 case SIOCGETVLAN: 1909 #ifdef VIMAGE 1910 if (ifp->if_vnet != ifp->if_home_vnet) { 1911 error = EPERM; 1912 break; 1913 } 1914 #endif 1915 bzero(&vlr, sizeof(vlr)); 1916 VLAN_SLOCK(); 1917 if (TRUNK(ifv) != NULL) { 1918 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1919 sizeof(vlr.vlr_parent)); 1920 vlr.vlr_tag = ifv->ifv_vid; 1921 } 1922 VLAN_SUNLOCK(); 1923 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1924 break; 1925 1926 case SIOCSIFFLAGS: 1927 /* 1928 * We should propagate selected flags to the parent, 1929 * e.g., promiscuous mode. 1930 */ 1931 VLAN_XLOCK(); 1932 if (TRUNK(ifv) != NULL) 1933 error = vlan_setflags(ifp, 1); 1934 VLAN_XUNLOCK(); 1935 break; 1936 1937 case SIOCADDMULTI: 1938 case SIOCDELMULTI: 1939 /* 1940 * If we don't have a parent, just remember the membership for 1941 * when we do. 1942 * 1943 * XXX We need the rmlock here to avoid sleeping while 1944 * holding in6_multi_mtx. 1945 */ 1946 VLAN_XLOCK(); 1947 trunk = TRUNK(ifv); 1948 if (trunk != NULL) 1949 error = vlan_setmulti(ifp); 1950 VLAN_XUNLOCK(); 1951 1952 break; 1953 case SIOCGVLANPCP: 1954 #ifdef VIMAGE 1955 if (ifp->if_vnet != ifp->if_home_vnet) { 1956 error = EPERM; 1957 break; 1958 } 1959 #endif 1960 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1961 break; 1962 1963 case SIOCSVLANPCP: 1964 #ifdef VIMAGE 1965 if (ifp->if_vnet != ifp->if_home_vnet) { 1966 error = EPERM; 1967 break; 1968 } 1969 #endif 1970 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1971 if (error) 1972 break; 1973 if (ifr->ifr_vlan_pcp > 7) { 1974 error = EINVAL; 1975 break; 1976 } 1977 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 1978 ifp->if_pcp = ifv->ifv_pcp; 1979 vlan_tag_recalculate(ifv); 1980 /* broadcast event about PCP change */ 1981 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 1982 break; 1983 1984 case SIOCSIFCAP: 1985 VLAN_SLOCK(); 1986 ifv->ifv_capenable = ifr->ifr_reqcap; 1987 trunk = TRUNK(ifv); 1988 if (trunk != NULL) { 1989 struct epoch_tracker et; 1990 1991 NET_EPOCH_ENTER(et); 1992 vlan_capabilities(ifv); 1993 NET_EPOCH_EXIT(et); 1994 } 1995 VLAN_SUNLOCK(); 1996 break; 1997 1998 default: 1999 error = EINVAL; 2000 break; 2001 } 2002 2003 return (error); 2004 } 2005 2006 #if defined(KERN_TLS) || defined(RATELIMIT) 2007 static int 2008 vlan_snd_tag_alloc(struct ifnet *ifp, 2009 union if_snd_tag_alloc_params *params, 2010 struct m_snd_tag **ppmt) 2011 { 2012 struct epoch_tracker et; 2013 struct vlan_snd_tag *vst; 2014 struct ifvlan *ifv; 2015 struct ifnet *parent; 2016 int error; 2017 2018 NET_EPOCH_ENTER(et); 2019 ifv = ifp->if_softc; 2020 if (ifv->ifv_trunk != NULL) 2021 parent = PARENT(ifv); 2022 else 2023 parent = NULL; 2024 if (parent == NULL || parent->if_snd_tag_alloc == NULL) { 2025 NET_EPOCH_EXIT(et); 2026 return (EOPNOTSUPP); 2027 } 2028 if_ref(parent); 2029 NET_EPOCH_EXIT(et); 2030 2031 vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT); 2032 if (vst == NULL) { 2033 if_rele(parent); 2034 return (ENOMEM); 2035 } 2036 2037 error = parent->if_snd_tag_alloc(parent, params, &vst->tag); 2038 if_rele(parent); 2039 if (error) { 2040 free(vst, M_VLAN); 2041 return (error); 2042 } 2043 2044 m_snd_tag_init(&vst->com, ifp); 2045 2046 *ppmt = &vst->com; 2047 return (0); 2048 } 2049 2050 static int 2051 vlan_snd_tag_modify(struct m_snd_tag *mst, 2052 union if_snd_tag_modify_params *params) 2053 { 2054 struct vlan_snd_tag *vst; 2055 2056 vst = mst_to_vst(mst); 2057 return (vst->tag->ifp->if_snd_tag_modify(vst->tag, params)); 2058 } 2059 2060 static int 2061 vlan_snd_tag_query(struct m_snd_tag *mst, 2062 union if_snd_tag_query_params *params) 2063 { 2064 struct vlan_snd_tag *vst; 2065 2066 vst = mst_to_vst(mst); 2067 return (vst->tag->ifp->if_snd_tag_query(vst->tag, params)); 2068 } 2069 2070 static void 2071 vlan_snd_tag_free(struct m_snd_tag *mst) 2072 { 2073 struct vlan_snd_tag *vst; 2074 2075 vst = mst_to_vst(mst); 2076 m_snd_tag_rele(vst->tag); 2077 free(vst, M_VLAN); 2078 } 2079 #endif 2080