1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_clone.h> 74 #include <net/if_dl.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 #include <net/vnet.h> 78 79 #ifdef INET 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #endif 83 84 #define VLAN_DEF_HWIDTH 4 85 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 86 87 #define UP_AND_RUNNING(ifp) \ 88 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 89 90 LIST_HEAD(ifvlanhead, ifvlan); 91 92 struct ifvlantrunk { 93 struct ifnet *parent; /* parent interface of this trunk */ 94 struct rmlock lock; 95 #ifdef VLAN_ARRAY 96 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 97 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 98 #else 99 struct ifvlanhead *hash; /* dynamic hash-list table */ 100 uint16_t hmask; 101 uint16_t hwidth; 102 #endif 103 int refcnt; 104 }; 105 106 /* 107 * This macro provides a facility to iterate over every vlan on a trunk with 108 * the assumption that none will be added/removed during iteration. 109 */ 110 #ifdef VLAN_ARRAY 111 #define VLAN_FOREACH(_ifv, _trunk) \ 112 size_t _i; \ 113 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 114 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 115 #else /* VLAN_ARRAY */ 116 #define VLAN_FOREACH(_ifv, _trunk) \ 117 struct ifvlan *_next; \ 118 size_t _i; \ 119 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 120 LIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 121 #endif /* VLAN_ARRAY */ 122 123 /* 124 * This macro provides a facility to iterate over every vlan on a trunk while 125 * also modifying the number of vlans on the trunk. The iteration continues 126 * until some condition is met or there are no more vlans on the trunk. 127 */ 128 #ifdef VLAN_ARRAY 129 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 130 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 131 size_t _i; \ 132 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 133 if (((_ifv) = (_trunk)->vlans[_i])) 134 #else /* VLAN_ARRAY */ 135 /* 136 * The hash table case is more complicated. We allow for the hash table to be 137 * modified (i.e. vlans removed) while we are iterating over it. To allow for 138 * this we must restart the iteration every time we "touch" something during 139 * the iteration, since removal will resize the hash table and invalidate our 140 * current position. If acting on the touched element causes the trunk to be 141 * emptied, then iteration also stops. 142 */ 143 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 144 size_t _i; \ 145 bool _touch = false; \ 146 for (_i = 0; \ 147 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 148 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 149 if (((_ifv) = LIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 150 (_touch = true)) 151 #endif /* VLAN_ARRAY */ 152 153 struct vlan_mc_entry { 154 struct sockaddr_dl mc_addr; 155 SLIST_ENTRY(vlan_mc_entry) mc_entries; 156 }; 157 158 struct ifvlan { 159 struct ifvlantrunk *ifv_trunk; 160 struct ifnet *ifv_ifp; 161 #define TRUNK(ifv) ((ifv)->ifv_trunk) 162 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 163 void *ifv_cookie; 164 int ifv_pflags; /* special flags we have set on parent */ 165 int ifv_capenable; 166 struct ifv_linkmib { 167 int ifvm_encaplen; /* encapsulation length */ 168 int ifvm_mtufudge; /* MTU fudged by this much */ 169 int ifvm_mintu; /* min transmission unit */ 170 uint16_t ifvm_proto; /* encapsulation ethertype */ 171 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 172 uint16_t ifvm_vid; /* VLAN ID */ 173 uint8_t ifvm_pcp; /* Priority Code Point (PCP). */ 174 } ifv_mib; 175 struct task lladdr_task; 176 SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 177 #ifndef VLAN_ARRAY 178 LIST_ENTRY(ifvlan) ifv_list; 179 #endif 180 }; 181 #define ifv_proto ifv_mib.ifvm_proto 182 #define ifv_tag ifv_mib.ifvm_tag 183 #define ifv_vid ifv_mib.ifvm_vid 184 #define ifv_pcp ifv_mib.ifvm_pcp 185 #define ifv_encaplen ifv_mib.ifvm_encaplen 186 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 187 #define ifv_mintu ifv_mib.ifvm_mintu 188 189 /* Special flags we should propagate to parent. */ 190 static struct { 191 int flag; 192 int (*func)(struct ifnet *, int); 193 } vlan_pflags[] = { 194 {IFF_PROMISC, ifpromisc}, 195 {IFF_ALLMULTI, if_allmulti}, 196 {0, NULL} 197 }; 198 199 SYSCTL_DECL(_net_link); 200 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0, 201 "IEEE 802.1Q VLAN"); 202 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, 203 "for consistency"); 204 205 static VNET_DEFINE(int, soft_pad); 206 #define V_soft_pad VNET(soft_pad) 207 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET, 208 &VNET_NAME(soft_pad), 0, "pad short frames before tagging"); 209 210 /* 211 * For now, make preserving PCP via an mbuf tag optional, as it increases 212 * per-packet memory allocations and frees. In the future, it would be 213 * preferable to reuse ether_vtag for this, or similar. 214 */ 215 static int vlan_mtag_pcp = 0; 216 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW, &vlan_mtag_pcp, 0, 217 "Retain VLAN PCP information as packets are passed up the stack"); 218 219 static const char vlanname[] = "vlan"; 220 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 221 222 static eventhandler_tag ifdetach_tag; 223 static eventhandler_tag iflladdr_tag; 224 225 /* 226 * if_vlan uses two module-level locks to allow concurrent modification of vlan 227 * interfaces and (mostly) allow for vlans to be destroyed while they are being 228 * used for tx/rx. To accomplish this in a way that has acceptable performance 229 * and cooperation with other parts of the network stack there is a 230 * non-sleepable rmlock(9) and an sx(9). Both locks are exclusively acquired 231 * when destroying a vlan interface, i.e. when the if_vlantrunk field of struct 232 * ifnet is de-allocated and NULL'd. Thus a reader holding either lock has a 233 * guarantee that the struct ifvlantrunk references a valid vlan trunk. 234 * 235 * The performance-sensitive paths that warrant using the rmlock(9) are 236 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 237 * existence using if_vlantrunk, and being in the network tx/rx paths the use 238 * of an rmlock(9) gives a measureable improvement in performance. 239 * 240 * The reason for having an sx(9) is mostly because there are still areas that 241 * must be sleepable and also have safe concurrent access to a vlan interface. 242 * Since the sx(9) exists, it is used by default in most paths unless sleeping 243 * is not permitted, or if it is not clear whether sleeping is permitted. 244 * 245 * Note that despite these protections, there is still an inherent race in the 246 * destruction of vlans since there's no guarantee that the ifnet hasn't been 247 * freed/reused when the tx/rx functions are called by the stack. This can only 248 * be fixed by addressing ifnet's lifetime issues. 249 */ 250 #define _VLAN_RM_ID ifv_rm_lock 251 #define _VLAN_SX_ID ifv_sx 252 253 static struct rmlock _VLAN_RM_ID; 254 static struct sx _VLAN_SX_ID; 255 256 #define VLAN_LOCKING_INIT() \ 257 rm_init(&_VLAN_RM_ID, "vlan_rm"); \ 258 sx_init(&_VLAN_SX_ID, "vlan_sx") 259 260 #define VLAN_LOCKING_DESTROY() \ 261 rm_destroy(&_VLAN_RM_ID); \ 262 sx_destroy(&_VLAN_SX_ID) 263 264 #define _VLAN_RM_TRACKER _vlan_rm_tracker 265 #define VLAN_RLOCK() rm_rlock(&_VLAN_RM_ID, \ 266 &_VLAN_RM_TRACKER) 267 #define VLAN_RUNLOCK() rm_runlock(&_VLAN_RM_ID, \ 268 &_VLAN_RM_TRACKER) 269 #define VLAN_WLOCK() rm_wlock(&_VLAN_RM_ID) 270 #define VLAN_WUNLOCK() rm_wunlock(&_VLAN_RM_ID) 271 #define VLAN_RLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_RLOCKED) 272 #define VLAN_WLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_WLOCKED) 273 #define VLAN_RWLOCK_ASSERT() rm_assert(&_VLAN_RM_ID, RA_LOCKED) 274 #define VLAN_LOCK_READER struct rm_priotracker _VLAN_RM_TRACKER 275 276 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 277 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 278 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 279 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 280 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 281 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 282 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 283 284 285 /* 286 * We also have a per-trunk rmlock(9), that is locked shared on packet 287 * processing and exclusive when configuration is changed. Note: This should 288 * only be acquired while there is a shared lock on either of the global locks 289 * via VLAN_SLOCK or VLAN_RLOCK. Thus, an exclusive lock on the global locks 290 * makes a call to TRUNK_RLOCK/TRUNK_WLOCK technically superfluous. 291 */ 292 #define _TRUNK_RM_TRACKER _trunk_rm_tracker 293 #define TRUNK_LOCK_INIT(trunk) rm_init(&(trunk)->lock, vlanname) 294 #define TRUNK_LOCK_DESTROY(trunk) rm_destroy(&(trunk)->lock) 295 #define TRUNK_RLOCK(trunk) rm_rlock(&(trunk)->lock, \ 296 &_TRUNK_RM_TRACKER) 297 #define TRUNK_WLOCK(trunk) rm_wlock(&(trunk)->lock) 298 #define TRUNK_RUNLOCK(trunk) rm_runlock(&(trunk)->lock, \ 299 &_TRUNK_RM_TRACKER) 300 #define TRUNK_WUNLOCK(trunk) rm_wunlock(&(trunk)->lock) 301 #define TRUNK_RLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_RLOCKED) 302 #define TRUNK_LOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_LOCKED) 303 #define TRUNK_WLOCK_ASSERT(trunk) rm_assert(&(trunk)->lock, RA_WLOCKED) 304 #define TRUNK_LOCK_READER struct rm_priotracker _TRUNK_RM_TRACKER 305 306 /* 307 * The VLAN_ARRAY substitutes the dynamic hash with a static array 308 * with 4096 entries. In theory this can give a boost in processing, 309 * however in practice it does not. Probably this is because the array 310 * is too big to fit into CPU cache. 311 */ 312 #ifndef VLAN_ARRAY 313 static void vlan_inithash(struct ifvlantrunk *trunk); 314 static void vlan_freehash(struct ifvlantrunk *trunk); 315 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 316 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 317 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 318 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 319 uint16_t vid); 320 #endif 321 static void trunk_destroy(struct ifvlantrunk *trunk); 322 323 static void vlan_init(void *foo); 324 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 325 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 326 #ifdef RATELIMIT 327 static int vlan_snd_tag_alloc(struct ifnet *, 328 union if_snd_tag_alloc_params *, struct m_snd_tag **); 329 #endif 330 static void vlan_qflush(struct ifnet *ifp); 331 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 332 int (*func)(struct ifnet *, int)); 333 static int vlan_setflags(struct ifnet *ifp, int status); 334 static int vlan_setmulti(struct ifnet *ifp); 335 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 336 static void vlan_unconfig(struct ifnet *ifp); 337 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 338 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 339 static void vlan_link_state(struct ifnet *ifp); 340 static void vlan_capabilities(struct ifvlan *ifv); 341 static void vlan_trunk_capabilities(struct ifnet *ifp); 342 343 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 344 static int vlan_clone_match(struct if_clone *, const char *); 345 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 346 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 347 348 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 349 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 350 351 static void vlan_lladdr_fn(void *arg, int pending); 352 353 static struct if_clone *vlan_cloner; 354 355 #ifdef VIMAGE 356 static VNET_DEFINE(struct if_clone *, vlan_cloner); 357 #define V_vlan_cloner VNET(vlan_cloner) 358 #endif 359 360 #ifndef VLAN_ARRAY 361 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 362 363 static void 364 vlan_inithash(struct ifvlantrunk *trunk) 365 { 366 int i, n; 367 368 /* 369 * The trunk must not be locked here since we call malloc(M_WAITOK). 370 * It is OK in case this function is called before the trunk struct 371 * gets hooked up and becomes visible from other threads. 372 */ 373 374 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 375 ("%s: hash already initialized", __func__)); 376 377 trunk->hwidth = VLAN_DEF_HWIDTH; 378 n = 1 << trunk->hwidth; 379 trunk->hmask = n - 1; 380 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 381 for (i = 0; i < n; i++) 382 LIST_INIT(&trunk->hash[i]); 383 } 384 385 static void 386 vlan_freehash(struct ifvlantrunk *trunk) 387 { 388 #ifdef INVARIANTS 389 int i; 390 391 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 392 for (i = 0; i < (1 << trunk->hwidth); i++) 393 KASSERT(LIST_EMPTY(&trunk->hash[i]), 394 ("%s: hash table not empty", __func__)); 395 #endif 396 free(trunk->hash, M_VLAN); 397 trunk->hash = NULL; 398 trunk->hwidth = trunk->hmask = 0; 399 } 400 401 static int 402 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 403 { 404 int i, b; 405 struct ifvlan *ifv2; 406 407 TRUNK_WLOCK_ASSERT(trunk); 408 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 409 410 b = 1 << trunk->hwidth; 411 i = HASH(ifv->ifv_vid, trunk->hmask); 412 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 413 if (ifv->ifv_vid == ifv2->ifv_vid) 414 return (EEXIST); 415 416 /* 417 * Grow the hash when the number of vlans exceeds half of the number of 418 * hash buckets squared. This will make the average linked-list length 419 * buckets/2. 420 */ 421 if (trunk->refcnt > (b * b) / 2) { 422 vlan_growhash(trunk, 1); 423 i = HASH(ifv->ifv_vid, trunk->hmask); 424 } 425 LIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 426 trunk->refcnt++; 427 428 return (0); 429 } 430 431 static int 432 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 433 { 434 int i, b; 435 struct ifvlan *ifv2; 436 437 TRUNK_WLOCK_ASSERT(trunk); 438 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 439 440 b = 1 << trunk->hwidth; 441 i = HASH(ifv->ifv_vid, trunk->hmask); 442 LIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 443 if (ifv2 == ifv) { 444 trunk->refcnt--; 445 LIST_REMOVE(ifv2, ifv_list); 446 if (trunk->refcnt < (b * b) / 2) 447 vlan_growhash(trunk, -1); 448 return (0); 449 } 450 451 panic("%s: vlan not found\n", __func__); 452 return (ENOENT); /*NOTREACHED*/ 453 } 454 455 /* 456 * Grow the hash larger or smaller if memory permits. 457 */ 458 static void 459 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 460 { 461 struct ifvlan *ifv; 462 struct ifvlanhead *hash2; 463 int hwidth2, i, j, n, n2; 464 465 TRUNK_WLOCK_ASSERT(trunk); 466 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 467 468 if (howmuch == 0) { 469 /* Harmless yet obvious coding error */ 470 printf("%s: howmuch is 0\n", __func__); 471 return; 472 } 473 474 hwidth2 = trunk->hwidth + howmuch; 475 n = 1 << trunk->hwidth; 476 n2 = 1 << hwidth2; 477 /* Do not shrink the table below the default */ 478 if (hwidth2 < VLAN_DEF_HWIDTH) 479 return; 480 481 /* M_NOWAIT because we're called with trunk mutex held */ 482 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_NOWAIT); 483 if (hash2 == NULL) { 484 printf("%s: out of memory -- hash size not changed\n", 485 __func__); 486 return; /* We can live with the old hash table */ 487 } 488 for (j = 0; j < n2; j++) 489 LIST_INIT(&hash2[j]); 490 for (i = 0; i < n; i++) 491 while ((ifv = LIST_FIRST(&trunk->hash[i])) != NULL) { 492 LIST_REMOVE(ifv, ifv_list); 493 j = HASH(ifv->ifv_vid, n2 - 1); 494 LIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 495 } 496 free(trunk->hash, M_VLAN); 497 trunk->hash = hash2; 498 trunk->hwidth = hwidth2; 499 trunk->hmask = n2 - 1; 500 501 if (bootverbose) 502 if_printf(trunk->parent, 503 "VLAN hash table resized from %d to %d buckets\n", n, n2); 504 } 505 506 static __inline struct ifvlan * 507 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 508 { 509 struct ifvlan *ifv; 510 511 TRUNK_RLOCK_ASSERT(trunk); 512 513 LIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 514 if (ifv->ifv_vid == vid) 515 return (ifv); 516 return (NULL); 517 } 518 519 #if 0 520 /* Debugging code to view the hashtables. */ 521 static void 522 vlan_dumphash(struct ifvlantrunk *trunk) 523 { 524 int i; 525 struct ifvlan *ifv; 526 527 for (i = 0; i < (1 << trunk->hwidth); i++) { 528 printf("%d: ", i); 529 LIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 530 printf("%s ", ifv->ifv_ifp->if_xname); 531 printf("\n"); 532 } 533 } 534 #endif /* 0 */ 535 #else 536 537 static __inline struct ifvlan * 538 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 539 { 540 541 return trunk->vlans[vid]; 542 } 543 544 static __inline int 545 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 546 { 547 548 if (trunk->vlans[ifv->ifv_vid] != NULL) 549 return EEXIST; 550 trunk->vlans[ifv->ifv_vid] = ifv; 551 trunk->refcnt++; 552 553 return (0); 554 } 555 556 static __inline int 557 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 558 { 559 560 trunk->vlans[ifv->ifv_vid] = NULL; 561 trunk->refcnt--; 562 563 return (0); 564 } 565 566 static __inline void 567 vlan_freehash(struct ifvlantrunk *trunk) 568 { 569 } 570 571 static __inline void 572 vlan_inithash(struct ifvlantrunk *trunk) 573 { 574 } 575 576 #endif /* !VLAN_ARRAY */ 577 578 static void 579 trunk_destroy(struct ifvlantrunk *trunk) 580 { 581 VLAN_XLOCK_ASSERT(); 582 VLAN_WLOCK_ASSERT(); 583 584 vlan_freehash(trunk); 585 trunk->parent->if_vlantrunk = NULL; 586 TRUNK_LOCK_DESTROY(trunk); 587 if_rele(trunk->parent); 588 free(trunk, M_VLAN); 589 } 590 591 /* 592 * Program our multicast filter. What we're actually doing is 593 * programming the multicast filter of the parent. This has the 594 * side effect of causing the parent interface to receive multicast 595 * traffic that it doesn't really want, which ends up being discarded 596 * later by the upper protocol layers. Unfortunately, there's no way 597 * to avoid this: there really is only one physical interface. 598 */ 599 static int 600 vlan_setmulti(struct ifnet *ifp) 601 { 602 struct ifnet *ifp_p; 603 struct ifmultiaddr *ifma; 604 struct ifvlan *sc; 605 struct vlan_mc_entry *mc; 606 int error; 607 608 /* 609 * XXX This stupidly needs the rmlock to avoid sleeping while holding 610 * the in6_multi_mtx (see in6_mc_join_locked). 611 */ 612 VLAN_RWLOCK_ASSERT(); 613 614 /* Find the parent. */ 615 sc = ifp->if_softc; 616 TRUNK_WLOCK_ASSERT(TRUNK(sc)); 617 ifp_p = PARENT(sc); 618 619 CURVNET_SET_QUIET(ifp_p->if_vnet); 620 621 /* First, remove any existing filter entries. */ 622 while ((mc = SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 623 SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 624 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 625 free(mc, M_VLAN); 626 } 627 628 /* Now program new ones. */ 629 IF_ADDR_WLOCK(ifp); 630 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 631 if (ifma->ifma_addr->sa_family != AF_LINK) 632 continue; 633 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 634 if (mc == NULL) { 635 IF_ADDR_WUNLOCK(ifp); 636 return (ENOMEM); 637 } 638 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 639 mc->mc_addr.sdl_index = ifp_p->if_index; 640 SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 641 } 642 IF_ADDR_WUNLOCK(ifp); 643 SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 644 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 645 NULL); 646 if (error) 647 return (error); 648 } 649 650 CURVNET_RESTORE(); 651 return (0); 652 } 653 654 /* 655 * A handler for parent interface link layer address changes. 656 * If the parent interface link layer address is changed we 657 * should also change it on all children vlans. 658 */ 659 static void 660 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 661 { 662 struct ifvlan *ifv; 663 struct ifnet *ifv_ifp; 664 struct ifvlantrunk *trunk; 665 struct sockaddr_dl *sdl; 666 VLAN_LOCK_READER; 667 668 /* Need the rmlock since this is run on taskqueue_swi. */ 669 VLAN_RLOCK(); 670 trunk = ifp->if_vlantrunk; 671 if (trunk == NULL) { 672 VLAN_RUNLOCK(); 673 return; 674 } 675 676 /* 677 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 678 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 679 * ioctl calls on the parent garbling the lladdr of the child vlan. 680 */ 681 TRUNK_WLOCK(trunk); 682 VLAN_FOREACH(ifv, trunk) { 683 /* 684 * Copy new new lladdr into the ifv_ifp, enqueue a task 685 * to actually call if_setlladdr. if_setlladdr needs to 686 * be deferred to a taskqueue because it will call into 687 * the if_vlan ioctl path and try to acquire the global 688 * lock. 689 */ 690 ifv_ifp = ifv->ifv_ifp; 691 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 692 ifp->if_addrlen); 693 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 694 sdl->sdl_alen = ifp->if_addrlen; 695 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 696 } 697 TRUNK_WUNLOCK(trunk); 698 VLAN_RUNLOCK(); 699 } 700 701 /* 702 * A handler for network interface departure events. 703 * Track departure of trunks here so that we don't access invalid 704 * pointers or whatever if a trunk is ripped from under us, e.g., 705 * by ejecting its hot-plug card. However, if an ifnet is simply 706 * being renamed, then there's no need to tear down the state. 707 */ 708 static void 709 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 710 { 711 struct ifvlan *ifv; 712 struct ifvlantrunk *trunk; 713 714 /* If the ifnet is just being renamed, don't do anything. */ 715 if (ifp->if_flags & IFF_RENAMING) 716 return; 717 VLAN_XLOCK(); 718 trunk = ifp->if_vlantrunk; 719 if (trunk == NULL) { 720 VLAN_XUNLOCK(); 721 return; 722 } 723 724 /* 725 * OK, it's a trunk. Loop over and detach all vlan's on it. 726 * Check trunk pointer after each vlan_unconfig() as it will 727 * free it and set to NULL after the last vlan was detached. 728 */ 729 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 730 ifp->if_vlantrunk == NULL) 731 vlan_unconfig_locked(ifv->ifv_ifp, 1); 732 733 /* Trunk should have been destroyed in vlan_unconfig(). */ 734 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 735 VLAN_XUNLOCK(); 736 } 737 738 /* 739 * Return the trunk device for a virtual interface. 740 */ 741 static struct ifnet * 742 vlan_trunkdev(struct ifnet *ifp) 743 { 744 struct ifvlan *ifv; 745 VLAN_LOCK_READER; 746 747 if (ifp->if_type != IFT_L2VLAN) 748 return (NULL); 749 750 /* Not clear if callers are sleepable, so acquire the rmlock. */ 751 VLAN_RLOCK(); 752 ifv = ifp->if_softc; 753 ifp = NULL; 754 if (ifv->ifv_trunk) 755 ifp = PARENT(ifv); 756 VLAN_RUNLOCK(); 757 return (ifp); 758 } 759 760 /* 761 * Return the 12-bit VLAN VID for this interface, for use by external 762 * components such as Infiniband. 763 * 764 * XXXRW: Note that the function name here is historical; it should be named 765 * vlan_vid(). 766 */ 767 static int 768 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 769 { 770 struct ifvlan *ifv; 771 772 if (ifp->if_type != IFT_L2VLAN) 773 return (EINVAL); 774 ifv = ifp->if_softc; 775 *vidp = ifv->ifv_vid; 776 return (0); 777 } 778 779 /* 780 * Return a driver specific cookie for this interface. Synchronization 781 * with setcookie must be provided by the driver. 782 */ 783 static void * 784 vlan_cookie(struct ifnet *ifp) 785 { 786 struct ifvlan *ifv; 787 788 if (ifp->if_type != IFT_L2VLAN) 789 return (NULL); 790 ifv = ifp->if_softc; 791 return (ifv->ifv_cookie); 792 } 793 794 /* 795 * Store a cookie in our softc that drivers can use to store driver 796 * private per-instance data in. 797 */ 798 static int 799 vlan_setcookie(struct ifnet *ifp, void *cookie) 800 { 801 struct ifvlan *ifv; 802 803 if (ifp->if_type != IFT_L2VLAN) 804 return (EINVAL); 805 ifv = ifp->if_softc; 806 ifv->ifv_cookie = cookie; 807 return (0); 808 } 809 810 /* 811 * Return the vlan device present at the specific VID. 812 */ 813 static struct ifnet * 814 vlan_devat(struct ifnet *ifp, uint16_t vid) 815 { 816 struct ifvlantrunk *trunk; 817 struct ifvlan *ifv; 818 VLAN_LOCK_READER; 819 TRUNK_LOCK_READER; 820 821 /* Not clear if callers are sleepable, so acquire the rmlock. */ 822 VLAN_RLOCK(); 823 trunk = ifp->if_vlantrunk; 824 if (trunk == NULL) { 825 VLAN_RUNLOCK(); 826 return (NULL); 827 } 828 ifp = NULL; 829 TRUNK_RLOCK(trunk); 830 ifv = vlan_gethash(trunk, vid); 831 if (ifv) 832 ifp = ifv->ifv_ifp; 833 TRUNK_RUNLOCK(trunk); 834 VLAN_RUNLOCK(); 835 return (ifp); 836 } 837 838 /* 839 * Recalculate the cached VLAN tag exposed via the MIB. 840 */ 841 static void 842 vlan_tag_recalculate(struct ifvlan *ifv) 843 { 844 845 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 846 } 847 848 /* 849 * VLAN support can be loaded as a module. The only place in the 850 * system that's intimately aware of this is ether_input. We hook 851 * into this code through vlan_input_p which is defined there and 852 * set here. No one else in the system should be aware of this so 853 * we use an explicit reference here. 854 */ 855 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 856 857 /* For if_link_state_change() eyes only... */ 858 extern void (*vlan_link_state_p)(struct ifnet *); 859 860 static int 861 vlan_modevent(module_t mod, int type, void *data) 862 { 863 864 switch (type) { 865 case MOD_LOAD: 866 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 867 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 868 if (ifdetach_tag == NULL) 869 return (ENOMEM); 870 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 871 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 872 if (iflladdr_tag == NULL) 873 return (ENOMEM); 874 VLAN_LOCKING_INIT(); 875 vlan_input_p = vlan_input; 876 vlan_link_state_p = vlan_link_state; 877 vlan_trunk_cap_p = vlan_trunk_capabilities; 878 vlan_trunkdev_p = vlan_trunkdev; 879 vlan_cookie_p = vlan_cookie; 880 vlan_setcookie_p = vlan_setcookie; 881 vlan_tag_p = vlan_tag; 882 vlan_devat_p = vlan_devat; 883 #ifndef VIMAGE 884 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 885 vlan_clone_create, vlan_clone_destroy); 886 #endif 887 if (bootverbose) 888 printf("vlan: initialized, using " 889 #ifdef VLAN_ARRAY 890 "full-size arrays" 891 #else 892 "hash tables with chaining" 893 #endif 894 895 "\n"); 896 break; 897 case MOD_UNLOAD: 898 #ifndef VIMAGE 899 if_clone_detach(vlan_cloner); 900 #endif 901 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 902 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 903 vlan_input_p = NULL; 904 vlan_link_state_p = NULL; 905 vlan_trunk_cap_p = NULL; 906 vlan_trunkdev_p = NULL; 907 vlan_tag_p = NULL; 908 vlan_cookie_p = NULL; 909 vlan_setcookie_p = NULL; 910 vlan_devat_p = NULL; 911 VLAN_LOCKING_DESTROY(); 912 if (bootverbose) 913 printf("vlan: unloaded\n"); 914 break; 915 default: 916 return (EOPNOTSUPP); 917 } 918 return (0); 919 } 920 921 static moduledata_t vlan_mod = { 922 "if_vlan", 923 vlan_modevent, 924 0 925 }; 926 927 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 928 MODULE_VERSION(if_vlan, 3); 929 930 #ifdef VIMAGE 931 static void 932 vnet_vlan_init(const void *unused __unused) 933 { 934 935 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 936 vlan_clone_create, vlan_clone_destroy); 937 V_vlan_cloner = vlan_cloner; 938 } 939 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 940 vnet_vlan_init, NULL); 941 942 static void 943 vnet_vlan_uninit(const void *unused __unused) 944 { 945 946 if_clone_detach(V_vlan_cloner); 947 } 948 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 949 vnet_vlan_uninit, NULL); 950 #endif 951 952 /* 953 * Check for <etherif>.<vlan> style interface names. 954 */ 955 static struct ifnet * 956 vlan_clone_match_ethervid(const char *name, int *vidp) 957 { 958 char ifname[IFNAMSIZ]; 959 char *cp; 960 struct ifnet *ifp; 961 int vid; 962 963 strlcpy(ifname, name, IFNAMSIZ); 964 if ((cp = strchr(ifname, '.')) == NULL) 965 return (NULL); 966 *cp = '\0'; 967 if ((ifp = ifunit_ref(ifname)) == NULL) 968 return (NULL); 969 /* Parse VID. */ 970 if (*++cp == '\0') { 971 if_rele(ifp); 972 return (NULL); 973 } 974 vid = 0; 975 for(; *cp >= '0' && *cp <= '9'; cp++) 976 vid = (vid * 10) + (*cp - '0'); 977 if (*cp != '\0') { 978 if_rele(ifp); 979 return (NULL); 980 } 981 if (vidp != NULL) 982 *vidp = vid; 983 984 return (ifp); 985 } 986 987 static int 988 vlan_clone_match(struct if_clone *ifc, const char *name) 989 { 990 const char *cp; 991 992 if (vlan_clone_match_ethervid(name, NULL) != NULL) 993 return (1); 994 995 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 996 return (0); 997 for (cp = name + 4; *cp != '\0'; cp++) { 998 if (*cp < '0' || *cp > '9') 999 return (0); 1000 } 1001 1002 return (1); 1003 } 1004 1005 static int 1006 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 1007 { 1008 char *dp; 1009 int wildcard; 1010 int unit; 1011 int error; 1012 int vid; 1013 struct ifvlan *ifv; 1014 struct ifnet *ifp; 1015 struct ifnet *p; 1016 struct ifaddr *ifa; 1017 struct sockaddr_dl *sdl; 1018 struct vlanreq vlr; 1019 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 1020 1021 /* 1022 * There are 3 (ugh) ways to specify the cloned device: 1023 * o pass a parameter block with the clone request. 1024 * o specify parameters in the text of the clone device name 1025 * o specify no parameters and get an unattached device that 1026 * must be configured separately. 1027 * The first technique is preferred; the latter two are 1028 * supported for backwards compatibility. 1029 * 1030 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1031 * called for. 1032 */ 1033 if (params) { 1034 error = copyin(params, &vlr, sizeof(vlr)); 1035 if (error) 1036 return error; 1037 p = ifunit_ref(vlr.vlr_parent); 1038 if (p == NULL) 1039 return (ENXIO); 1040 error = ifc_name2unit(name, &unit); 1041 if (error != 0) { 1042 if_rele(p); 1043 return (error); 1044 } 1045 vid = vlr.vlr_tag; 1046 wildcard = (unit < 0); 1047 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1048 unit = -1; 1049 wildcard = 0; 1050 } else { 1051 p = NULL; 1052 error = ifc_name2unit(name, &unit); 1053 if (error != 0) 1054 return (error); 1055 1056 wildcard = (unit < 0); 1057 } 1058 1059 error = ifc_alloc_unit(ifc, &unit); 1060 if (error != 0) { 1061 if (p != NULL) 1062 if_rele(p); 1063 return (error); 1064 } 1065 1066 /* In the wildcard case, we need to update the name. */ 1067 if (wildcard) { 1068 for (dp = name; *dp != '\0'; dp++); 1069 if (snprintf(dp, len - (dp-name), "%d", unit) > 1070 len - (dp-name) - 1) { 1071 panic("%s: interface name too long", __func__); 1072 } 1073 } 1074 1075 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1076 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1077 if (ifp == NULL) { 1078 ifc_free_unit(ifc, unit); 1079 free(ifv, M_VLAN); 1080 if (p != NULL) 1081 if_rele(p); 1082 return (ENOSPC); 1083 } 1084 SLIST_INIT(&ifv->vlan_mc_listhead); 1085 ifp->if_softc = ifv; 1086 /* 1087 * Set the name manually rather than using if_initname because 1088 * we don't conform to the default naming convention for interfaces. 1089 */ 1090 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1091 ifp->if_dname = vlanname; 1092 ifp->if_dunit = unit; 1093 /* NB: flags are not set here */ 1094 ifp->if_linkmib = &ifv->ifv_mib; 1095 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 1096 /* NB: mtu is not set here */ 1097 1098 ifp->if_init = vlan_init; 1099 ifp->if_transmit = vlan_transmit; 1100 ifp->if_qflush = vlan_qflush; 1101 ifp->if_ioctl = vlan_ioctl; 1102 #ifdef RATELIMIT 1103 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1104 #endif 1105 ifp->if_flags = VLAN_IFFLAGS; 1106 ether_ifattach(ifp, eaddr); 1107 /* Now undo some of the damage... */ 1108 ifp->if_baudrate = 0; 1109 ifp->if_type = IFT_L2VLAN; 1110 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1111 ifa = ifp->if_addr; 1112 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1113 sdl->sdl_type = IFT_L2VLAN; 1114 1115 if (p != NULL) { 1116 error = vlan_config(ifv, p, vid); 1117 if_rele(p); 1118 if (error != 0) { 1119 /* 1120 * Since we've partially failed, we need to back 1121 * out all the way, otherwise userland could get 1122 * confused. Thus, we destroy the interface. 1123 */ 1124 ether_ifdetach(ifp); 1125 vlan_unconfig(ifp); 1126 if_free(ifp); 1127 ifc_free_unit(ifc, unit); 1128 free(ifv, M_VLAN); 1129 1130 return (error); 1131 } 1132 } 1133 1134 return (0); 1135 } 1136 1137 static int 1138 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1139 { 1140 struct ifvlan *ifv = ifp->if_softc; 1141 int unit = ifp->if_dunit; 1142 1143 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1144 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1145 /* 1146 * We should have the only reference to the ifv now, so we can now 1147 * drain any remaining lladdr task before freeing the ifnet and the 1148 * ifvlan. 1149 */ 1150 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1151 if_free(ifp); 1152 free(ifv, M_VLAN); 1153 ifc_free_unit(ifc, unit); 1154 1155 return (0); 1156 } 1157 1158 /* 1159 * The ifp->if_init entry point for vlan(4) is a no-op. 1160 */ 1161 static void 1162 vlan_init(void *foo __unused) 1163 { 1164 } 1165 1166 /* 1167 * The if_transmit method for vlan(4) interface. 1168 */ 1169 static int 1170 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1171 { 1172 struct ifvlan *ifv; 1173 struct ifnet *p; 1174 struct m_tag *mtag; 1175 uint16_t tag; 1176 int error, len, mcast; 1177 VLAN_LOCK_READER; 1178 1179 VLAN_RLOCK(); 1180 ifv = ifp->if_softc; 1181 if (TRUNK(ifv) == NULL) { 1182 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1183 VLAN_RUNLOCK(); 1184 m_freem(m); 1185 return (ENETDOWN); 1186 } 1187 p = PARENT(ifv); 1188 len = m->m_pkthdr.len; 1189 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1190 1191 BPF_MTAP(ifp, m); 1192 1193 /* 1194 * Do not run parent's if_transmit() if the parent is not up, 1195 * or parent's driver will cause a system crash. 1196 */ 1197 if (!UP_AND_RUNNING(p)) { 1198 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1199 VLAN_RUNLOCK(); 1200 m_freem(m); 1201 return (ENETDOWN); 1202 } 1203 1204 /* 1205 * Pad the frame to the minimum size allowed if told to. 1206 * This option is in accord with IEEE Std 802.1Q, 2003 Ed., 1207 * paragraph C.4.4.3.b. It can help to work around buggy 1208 * bridges that violate paragraph C.4.4.3.a from the same 1209 * document, i.e., fail to pad short frames after untagging. 1210 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but 1211 * untagging it will produce a 62-byte frame, which is a runt 1212 * and requires padding. There are VLAN-enabled network 1213 * devices that just discard such runts instead or mishandle 1214 * them somehow. 1215 */ 1216 if (V_soft_pad && p->if_type == IFT_ETHER) { 1217 static char pad[8]; /* just zeros */ 1218 int n; 1219 1220 for (n = ETHERMIN + ETHER_HDR_LEN - m->m_pkthdr.len; 1221 n > 0; n -= sizeof(pad)) 1222 if (!m_append(m, min(n, sizeof(pad)), pad)) 1223 break; 1224 1225 if (n > 0) { 1226 if_printf(ifp, "cannot pad short frame\n"); 1227 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1228 VLAN_RUNLOCK(); 1229 m_freem(m); 1230 return (0); 1231 } 1232 } 1233 1234 /* 1235 * If underlying interface can do VLAN tag insertion itself, 1236 * just pass the packet along. However, we need some way to 1237 * tell the interface where the packet came from so that it 1238 * knows how to find the VLAN tag to use, so we attach a 1239 * packet tag that holds it. 1240 */ 1241 if (vlan_mtag_pcp && (mtag = m_tag_locate(m, MTAG_8021Q, 1242 MTAG_8021Q_PCP_OUT, NULL)) != NULL) 1243 tag = EVL_MAKETAG(ifv->ifv_vid, *(uint8_t *)(mtag + 1), 0); 1244 else 1245 tag = ifv->ifv_tag; 1246 if (p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1247 m->m_pkthdr.ether_vtag = tag; 1248 m->m_flags |= M_VLANTAG; 1249 } else { 1250 m = ether_vlanencap(m, tag); 1251 if (m == NULL) { 1252 if_printf(ifp, "unable to prepend VLAN header\n"); 1253 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1254 VLAN_RUNLOCK(); 1255 return (0); 1256 } 1257 } 1258 1259 /* 1260 * Send it, precisely as ether_output() would have. 1261 */ 1262 error = (p->if_transmit)(p, m); 1263 if (error == 0) { 1264 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1265 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1266 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1267 } else 1268 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1269 VLAN_RUNLOCK(); 1270 return (error); 1271 } 1272 1273 /* 1274 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1275 */ 1276 static void 1277 vlan_qflush(struct ifnet *ifp __unused) 1278 { 1279 } 1280 1281 static void 1282 vlan_input(struct ifnet *ifp, struct mbuf *m) 1283 { 1284 struct ifvlantrunk *trunk; 1285 struct ifvlan *ifv; 1286 VLAN_LOCK_READER; 1287 TRUNK_LOCK_READER; 1288 struct m_tag *mtag; 1289 uint16_t vid, tag; 1290 1291 VLAN_RLOCK(); 1292 trunk = ifp->if_vlantrunk; 1293 if (trunk == NULL) { 1294 VLAN_RUNLOCK(); 1295 m_freem(m); 1296 return; 1297 } 1298 1299 if (m->m_flags & M_VLANTAG) { 1300 /* 1301 * Packet is tagged, but m contains a normal 1302 * Ethernet frame; the tag is stored out-of-band. 1303 */ 1304 tag = m->m_pkthdr.ether_vtag; 1305 m->m_flags &= ~M_VLANTAG; 1306 } else { 1307 struct ether_vlan_header *evl; 1308 1309 /* 1310 * Packet is tagged in-band as specified by 802.1q. 1311 */ 1312 switch (ifp->if_type) { 1313 case IFT_ETHER: 1314 if (m->m_len < sizeof(*evl) && 1315 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1316 if_printf(ifp, "cannot pullup VLAN header\n"); 1317 VLAN_RUNLOCK(); 1318 return; 1319 } 1320 evl = mtod(m, struct ether_vlan_header *); 1321 tag = ntohs(evl->evl_tag); 1322 1323 /* 1324 * Remove the 802.1q header by copying the Ethernet 1325 * addresses over it and adjusting the beginning of 1326 * the data in the mbuf. The encapsulated Ethernet 1327 * type field is already in place. 1328 */ 1329 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1330 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1331 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1332 break; 1333 1334 default: 1335 #ifdef INVARIANTS 1336 panic("%s: %s has unsupported if_type %u", 1337 __func__, ifp->if_xname, ifp->if_type); 1338 #endif 1339 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1340 VLAN_RUNLOCK(); 1341 m_freem(m); 1342 return; 1343 } 1344 } 1345 1346 vid = EVL_VLANOFTAG(tag); 1347 1348 TRUNK_RLOCK(trunk); 1349 ifv = vlan_gethash(trunk, vid); 1350 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1351 TRUNK_RUNLOCK(trunk); 1352 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1353 VLAN_RUNLOCK(); 1354 m_freem(m); 1355 return; 1356 } 1357 TRUNK_RUNLOCK(trunk); 1358 1359 if (vlan_mtag_pcp) { 1360 /* 1361 * While uncommon, it is possible that we will find a 802.1q 1362 * packet encapsulated inside another packet that also had an 1363 * 802.1q header. For example, ethernet tunneled over IPSEC 1364 * arriving over ethernet. In that case, we replace the 1365 * existing 802.1q PCP m_tag value. 1366 */ 1367 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1368 if (mtag == NULL) { 1369 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1370 sizeof(uint8_t), M_NOWAIT); 1371 if (mtag == NULL) { 1372 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1373 VLAN_RUNLOCK(); 1374 m_freem(m); 1375 return; 1376 } 1377 m_tag_prepend(m, mtag); 1378 } 1379 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1380 } 1381 1382 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1383 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1384 VLAN_RUNLOCK(); 1385 1386 /* Pass it back through the parent's input routine. */ 1387 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1388 } 1389 1390 static void 1391 vlan_lladdr_fn(void *arg, int pending __unused) 1392 { 1393 struct ifvlan *ifv; 1394 struct ifnet *ifp; 1395 1396 ifv = (struct ifvlan *)arg; 1397 ifp = ifv->ifv_ifp; 1398 /* The ifv_ifp already has the lladdr copied in. */ 1399 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1400 } 1401 1402 static int 1403 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1404 { 1405 struct ifvlantrunk *trunk; 1406 struct ifnet *ifp; 1407 int error = 0; 1408 1409 /* 1410 * We can handle non-ethernet hardware types as long as 1411 * they handle the tagging and headers themselves. 1412 */ 1413 if (p->if_type != IFT_ETHER && 1414 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1415 return (EPROTONOSUPPORT); 1416 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1417 return (EPROTONOSUPPORT); 1418 /* 1419 * Don't let the caller set up a VLAN VID with 1420 * anything except VLID bits. 1421 * VID numbers 0x0 and 0xFFF are reserved. 1422 */ 1423 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1424 return (EINVAL); 1425 if (ifv->ifv_trunk) 1426 return (EBUSY); 1427 1428 /* Acquire rmlock after the branch so we can M_WAITOK. */ 1429 VLAN_XLOCK(); 1430 if (p->if_vlantrunk == NULL) { 1431 trunk = malloc(sizeof(struct ifvlantrunk), 1432 M_VLAN, M_WAITOK | M_ZERO); 1433 vlan_inithash(trunk); 1434 TRUNK_LOCK_INIT(trunk); 1435 VLAN_WLOCK(); 1436 TRUNK_WLOCK(trunk); 1437 p->if_vlantrunk = trunk; 1438 trunk->parent = p; 1439 if_ref(trunk->parent); 1440 } else { 1441 VLAN_WLOCK(); 1442 trunk = p->if_vlantrunk; 1443 TRUNK_WLOCK(trunk); 1444 } 1445 1446 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1447 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1448 vlan_tag_recalculate(ifv); 1449 error = vlan_inshash(trunk, ifv); 1450 if (error) 1451 goto done; 1452 ifv->ifv_proto = ETHERTYPE_VLAN; 1453 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1454 ifv->ifv_mintu = ETHERMIN; 1455 ifv->ifv_pflags = 0; 1456 ifv->ifv_capenable = -1; 1457 1458 /* 1459 * If the parent supports the VLAN_MTU capability, 1460 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1461 * use it. 1462 */ 1463 if (p->if_capenable & IFCAP_VLAN_MTU) { 1464 /* 1465 * No need to fudge the MTU since the parent can 1466 * handle extended frames. 1467 */ 1468 ifv->ifv_mtufudge = 0; 1469 } else { 1470 /* 1471 * Fudge the MTU by the encapsulation size. This 1472 * makes us incompatible with strictly compliant 1473 * 802.1Q implementations, but allows us to use 1474 * the feature with other NetBSD implementations, 1475 * which might still be useful. 1476 */ 1477 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1478 } 1479 1480 ifv->ifv_trunk = trunk; 1481 ifp = ifv->ifv_ifp; 1482 /* 1483 * Initialize fields from our parent. This duplicates some 1484 * work with ether_ifattach() but allows for non-ethernet 1485 * interfaces to also work. 1486 */ 1487 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1488 ifp->if_baudrate = p->if_baudrate; 1489 ifp->if_output = p->if_output; 1490 ifp->if_input = p->if_input; 1491 ifp->if_resolvemulti = p->if_resolvemulti; 1492 ifp->if_addrlen = p->if_addrlen; 1493 ifp->if_broadcastaddr = p->if_broadcastaddr; 1494 1495 /* 1496 * Copy only a selected subset of flags from the parent. 1497 * Other flags are none of our business. 1498 */ 1499 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1500 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1501 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1502 #undef VLAN_COPY_FLAGS 1503 1504 ifp->if_link_state = p->if_link_state; 1505 1506 vlan_capabilities(ifv); 1507 1508 /* 1509 * Set up our interface address to reflect the underlying 1510 * physical interface's. 1511 */ 1512 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1513 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1514 p->if_addrlen; 1515 1516 /* 1517 * Configure multicast addresses that may already be 1518 * joined on the vlan device. 1519 */ 1520 (void)vlan_setmulti(ifp); 1521 1522 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1523 1524 /* We are ready for operation now. */ 1525 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1526 1527 /* Update flags on the parent, if necessary. */ 1528 vlan_setflags(ifp, 1); 1529 done: 1530 /* 1531 * We need to drop the non-sleepable rmlock so that the underlying 1532 * devices can sleep in their vlan_config hooks. 1533 */ 1534 TRUNK_WUNLOCK(trunk); 1535 VLAN_WUNLOCK(); 1536 if (error == 0) 1537 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1538 VLAN_XUNLOCK(); 1539 1540 return (error); 1541 } 1542 1543 static void 1544 vlan_unconfig(struct ifnet *ifp) 1545 { 1546 1547 VLAN_XLOCK(); 1548 vlan_unconfig_locked(ifp, 0); 1549 VLAN_XUNLOCK(); 1550 } 1551 1552 static void 1553 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1554 { 1555 struct ifvlantrunk *trunk; 1556 struct vlan_mc_entry *mc; 1557 struct ifvlan *ifv; 1558 struct ifnet *parent; 1559 int error; 1560 1561 VLAN_XLOCK_ASSERT(); 1562 1563 ifv = ifp->if_softc; 1564 trunk = ifv->ifv_trunk; 1565 parent = NULL; 1566 1567 if (trunk != NULL) { 1568 /* 1569 * Both vlan_transmit and vlan_input rely on the trunk fields 1570 * being NULL to determine whether to bail, so we need to get 1571 * an exclusive lock here to prevent them from using bad 1572 * ifvlans. 1573 */ 1574 VLAN_WLOCK(); 1575 parent = trunk->parent; 1576 1577 /* 1578 * Since the interface is being unconfigured, we need to 1579 * empty the list of multicast groups that we may have joined 1580 * while we were alive from the parent's list. 1581 */ 1582 while ((mc = SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1583 /* 1584 * If the parent interface is being detached, 1585 * all its multicast addresses have already 1586 * been removed. Warn about errors if 1587 * if_delmulti() does fail, but don't abort as 1588 * all callers expect vlan destruction to 1589 * succeed. 1590 */ 1591 if (!departing) { 1592 error = if_delmulti(parent, 1593 (struct sockaddr *)&mc->mc_addr); 1594 if (error) 1595 if_printf(ifp, 1596 "Failed to delete multicast address from parent: %d\n", 1597 error); 1598 } 1599 SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1600 free(mc, M_VLAN); 1601 } 1602 1603 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1604 1605 /* 1606 * The trunk lock isn't actually required here, but 1607 * vlan_remhash expects it. 1608 */ 1609 TRUNK_WLOCK(trunk); 1610 vlan_remhash(trunk, ifv); 1611 TRUNK_WUNLOCK(trunk); 1612 ifv->ifv_trunk = NULL; 1613 1614 /* 1615 * Check if we were the last. 1616 */ 1617 if (trunk->refcnt == 0) { 1618 parent->if_vlantrunk = NULL; 1619 trunk_destroy(trunk); 1620 } 1621 VLAN_WUNLOCK(); 1622 } 1623 1624 /* Disconnect from parent. */ 1625 if (ifv->ifv_pflags) 1626 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1627 ifp->if_mtu = ETHERMTU; 1628 ifp->if_link_state = LINK_STATE_UNKNOWN; 1629 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1630 1631 /* 1632 * Only dispatch an event if vlan was 1633 * attached, otherwise there is nothing 1634 * to cleanup anyway. 1635 */ 1636 if (parent != NULL) 1637 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1638 } 1639 1640 /* Handle a reference counted flag that should be set on the parent as well */ 1641 static int 1642 vlan_setflag(struct ifnet *ifp, int flag, int status, 1643 int (*func)(struct ifnet *, int)) 1644 { 1645 struct ifvlan *ifv; 1646 int error; 1647 1648 VLAN_SXLOCK_ASSERT(); 1649 1650 ifv = ifp->if_softc; 1651 status = status ? (ifp->if_flags & flag) : 0; 1652 /* Now "status" contains the flag value or 0 */ 1653 1654 /* 1655 * See if recorded parent's status is different from what 1656 * we want it to be. If it is, flip it. We record parent's 1657 * status in ifv_pflags so that we won't clear parent's flag 1658 * we haven't set. In fact, we don't clear or set parent's 1659 * flags directly, but get or release references to them. 1660 * That's why we can be sure that recorded flags still are 1661 * in accord with actual parent's flags. 1662 */ 1663 if (status != (ifv->ifv_pflags & flag)) { 1664 error = (*func)(PARENT(ifv), status); 1665 if (error) 1666 return (error); 1667 ifv->ifv_pflags &= ~flag; 1668 ifv->ifv_pflags |= status; 1669 } 1670 return (0); 1671 } 1672 1673 /* 1674 * Handle IFF_* flags that require certain changes on the parent: 1675 * if "status" is true, update parent's flags respective to our if_flags; 1676 * if "status" is false, forcedly clear the flags set on parent. 1677 */ 1678 static int 1679 vlan_setflags(struct ifnet *ifp, int status) 1680 { 1681 int error, i; 1682 1683 for (i = 0; vlan_pflags[i].flag; i++) { 1684 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1685 status, vlan_pflags[i].func); 1686 if (error) 1687 return (error); 1688 } 1689 return (0); 1690 } 1691 1692 /* Inform all vlans that their parent has changed link state */ 1693 static void 1694 vlan_link_state(struct ifnet *ifp) 1695 { 1696 struct ifvlantrunk *trunk; 1697 struct ifvlan *ifv; 1698 VLAN_LOCK_READER; 1699 1700 /* Called from a taskqueue_swi task, so we cannot sleep. */ 1701 VLAN_RLOCK(); 1702 trunk = ifp->if_vlantrunk; 1703 if (trunk == NULL) { 1704 VLAN_RUNLOCK(); 1705 return; 1706 } 1707 1708 TRUNK_WLOCK(trunk); 1709 VLAN_FOREACH(ifv, trunk) { 1710 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1711 if_link_state_change(ifv->ifv_ifp, 1712 trunk->parent->if_link_state); 1713 } 1714 TRUNK_WUNLOCK(trunk); 1715 VLAN_RUNLOCK(); 1716 } 1717 1718 static void 1719 vlan_capabilities(struct ifvlan *ifv) 1720 { 1721 struct ifnet *p; 1722 struct ifnet *ifp; 1723 struct ifnet_hw_tsomax hw_tsomax; 1724 int cap = 0, ena = 0, mena; 1725 u_long hwa = 0; 1726 1727 VLAN_SXLOCK_ASSERT(); 1728 TRUNK_WLOCK_ASSERT(TRUNK(ifv)); 1729 p = PARENT(ifv); 1730 ifp = ifv->ifv_ifp; 1731 1732 /* Mask parent interface enabled capabilities disabled by user. */ 1733 mena = p->if_capenable & ifv->ifv_capenable; 1734 1735 /* 1736 * If the parent interface can do checksum offloading 1737 * on VLANs, then propagate its hardware-assisted 1738 * checksumming flags. Also assert that checksum 1739 * offloading requires hardware VLAN tagging. 1740 */ 1741 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1742 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1743 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1744 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1745 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1746 if (ena & IFCAP_TXCSUM) 1747 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1748 CSUM_UDP | CSUM_SCTP); 1749 if (ena & IFCAP_TXCSUM_IPV6) 1750 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1751 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1752 } 1753 1754 /* 1755 * If the parent interface can do TSO on VLANs then 1756 * propagate the hardware-assisted flag. TSO on VLANs 1757 * does not necessarily require hardware VLAN tagging. 1758 */ 1759 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1760 if_hw_tsomax_common(p, &hw_tsomax); 1761 if_hw_tsomax_update(ifp, &hw_tsomax); 1762 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1763 cap |= p->if_capabilities & IFCAP_TSO; 1764 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1765 ena |= mena & IFCAP_TSO; 1766 if (ena & IFCAP_TSO) 1767 hwa |= p->if_hwassist & CSUM_TSO; 1768 } 1769 1770 /* 1771 * If the parent interface can do LRO and checksum offloading on 1772 * VLANs, then guess it may do LRO on VLANs. False positive here 1773 * cost nothing, while false negative may lead to some confusions. 1774 */ 1775 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1776 cap |= p->if_capabilities & IFCAP_LRO; 1777 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1778 ena |= p->if_capenable & IFCAP_LRO; 1779 1780 /* 1781 * If the parent interface can offload TCP connections over VLANs then 1782 * propagate its TOE capability to the VLAN interface. 1783 * 1784 * All TOE drivers in the tree today can deal with VLANs. If this 1785 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1786 * with its own bit. 1787 */ 1788 #define IFCAP_VLAN_TOE IFCAP_TOE 1789 if (p->if_capabilities & IFCAP_VLAN_TOE) 1790 cap |= p->if_capabilities & IFCAP_TOE; 1791 if (p->if_capenable & IFCAP_VLAN_TOE) { 1792 TOEDEV(ifp) = TOEDEV(p); 1793 ena |= mena & IFCAP_TOE; 1794 } 1795 1796 /* 1797 * If the parent interface supports dynamic link state, so does the 1798 * VLAN interface. 1799 */ 1800 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1801 ena |= (mena & IFCAP_LINKSTATE); 1802 1803 #ifdef RATELIMIT 1804 /* 1805 * If the parent interface supports ratelimiting, so does the 1806 * VLAN interface. 1807 */ 1808 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1809 ena |= (mena & IFCAP_TXRTLMT); 1810 #endif 1811 1812 ifp->if_capabilities = cap; 1813 ifp->if_capenable = ena; 1814 ifp->if_hwassist = hwa; 1815 } 1816 1817 static void 1818 vlan_trunk_capabilities(struct ifnet *ifp) 1819 { 1820 struct ifvlantrunk *trunk; 1821 struct ifvlan *ifv; 1822 1823 VLAN_SLOCK(); 1824 trunk = ifp->if_vlantrunk; 1825 if (trunk == NULL) { 1826 VLAN_SUNLOCK(); 1827 return; 1828 } 1829 TRUNK_WLOCK(trunk); 1830 VLAN_FOREACH(ifv, trunk) { 1831 vlan_capabilities(ifv); 1832 } 1833 TRUNK_WUNLOCK(trunk); 1834 VLAN_SUNLOCK(); 1835 } 1836 1837 static int 1838 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1839 { 1840 struct ifnet *p; 1841 struct ifreq *ifr; 1842 struct ifaddr *ifa; 1843 struct ifvlan *ifv; 1844 struct ifvlantrunk *trunk; 1845 struct vlanreq vlr; 1846 int error = 0; 1847 VLAN_LOCK_READER; 1848 1849 ifr = (struct ifreq *)data; 1850 ifa = (struct ifaddr *) data; 1851 ifv = ifp->if_softc; 1852 1853 switch (cmd) { 1854 case SIOCSIFADDR: 1855 ifp->if_flags |= IFF_UP; 1856 #ifdef INET 1857 if (ifa->ifa_addr->sa_family == AF_INET) 1858 arp_ifinit(ifp, ifa); 1859 #endif 1860 break; 1861 case SIOCGIFADDR: 1862 { 1863 struct sockaddr *sa; 1864 1865 sa = (struct sockaddr *)&ifr->ifr_data; 1866 bcopy(IF_LLADDR(ifp), sa->sa_data, ifp->if_addrlen); 1867 } 1868 break; 1869 case SIOCGIFMEDIA: 1870 VLAN_SLOCK(); 1871 if (TRUNK(ifv) != NULL) { 1872 p = PARENT(ifv); 1873 if_ref(p); 1874 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1875 if_rele(p); 1876 /* Limit the result to the parent's current config. */ 1877 if (error == 0) { 1878 struct ifmediareq *ifmr; 1879 1880 ifmr = (struct ifmediareq *)data; 1881 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1882 ifmr->ifm_count = 1; 1883 error = copyout(&ifmr->ifm_current, 1884 ifmr->ifm_ulist, 1885 sizeof(int)); 1886 } 1887 } 1888 } else { 1889 error = EINVAL; 1890 } 1891 VLAN_SUNLOCK(); 1892 break; 1893 1894 case SIOCSIFMEDIA: 1895 error = EINVAL; 1896 break; 1897 1898 case SIOCSIFMTU: 1899 /* 1900 * Set the interface MTU. 1901 */ 1902 VLAN_SLOCK(); 1903 trunk = TRUNK(ifv); 1904 if (trunk != NULL) { 1905 TRUNK_WLOCK(trunk); 1906 if (ifr->ifr_mtu > 1907 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1908 ifr->ifr_mtu < 1909 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1910 error = EINVAL; 1911 else 1912 ifp->if_mtu = ifr->ifr_mtu; 1913 TRUNK_WUNLOCK(trunk); 1914 } else 1915 error = EINVAL; 1916 VLAN_SUNLOCK(); 1917 break; 1918 1919 case SIOCSETVLAN: 1920 #ifdef VIMAGE 1921 /* 1922 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1923 * interface to be delegated to a jail without allowing the 1924 * jail to change what underlying interface/VID it is 1925 * associated with. We are not entirely convinced that this 1926 * is the right way to accomplish that policy goal. 1927 */ 1928 if (ifp->if_vnet != ifp->if_home_vnet) { 1929 error = EPERM; 1930 break; 1931 } 1932 #endif 1933 error = copyin(ifr->ifr_data, &vlr, sizeof(vlr)); 1934 if (error) 1935 break; 1936 if (vlr.vlr_parent[0] == '\0') { 1937 vlan_unconfig(ifp); 1938 break; 1939 } 1940 p = ifunit_ref(vlr.vlr_parent); 1941 if (p == NULL) { 1942 error = ENOENT; 1943 break; 1944 } 1945 error = vlan_config(ifv, p, vlr.vlr_tag); 1946 if_rele(p); 1947 break; 1948 1949 case SIOCGETVLAN: 1950 #ifdef VIMAGE 1951 if (ifp->if_vnet != ifp->if_home_vnet) { 1952 error = EPERM; 1953 break; 1954 } 1955 #endif 1956 bzero(&vlr, sizeof(vlr)); 1957 VLAN_SLOCK(); 1958 if (TRUNK(ifv) != NULL) { 1959 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1960 sizeof(vlr.vlr_parent)); 1961 vlr.vlr_tag = ifv->ifv_vid; 1962 } 1963 VLAN_SUNLOCK(); 1964 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1965 break; 1966 1967 case SIOCSIFFLAGS: 1968 /* 1969 * We should propagate selected flags to the parent, 1970 * e.g., promiscuous mode. 1971 */ 1972 VLAN_XLOCK(); 1973 if (TRUNK(ifv) != NULL) 1974 error = vlan_setflags(ifp, 1); 1975 VLAN_XUNLOCK(); 1976 break; 1977 1978 case SIOCADDMULTI: 1979 case SIOCDELMULTI: 1980 /* 1981 * If we don't have a parent, just remember the membership for 1982 * when we do. 1983 * 1984 * XXX We need the rmlock here to avoid sleeping while 1985 * holding in6_multi_mtx. 1986 */ 1987 VLAN_RLOCK(); 1988 trunk = TRUNK(ifv); 1989 if (trunk != NULL) { 1990 TRUNK_WLOCK(trunk); 1991 error = vlan_setmulti(ifp); 1992 TRUNK_WUNLOCK(trunk); 1993 } 1994 VLAN_RUNLOCK(); 1995 break; 1996 1997 case SIOCGVLANPCP: 1998 #ifdef VIMAGE 1999 if (ifp->if_vnet != ifp->if_home_vnet) { 2000 error = EPERM; 2001 break; 2002 } 2003 #endif 2004 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 2005 break; 2006 2007 case SIOCSVLANPCP: 2008 #ifdef VIMAGE 2009 if (ifp->if_vnet != ifp->if_home_vnet) { 2010 error = EPERM; 2011 break; 2012 } 2013 #endif 2014 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 2015 if (error) 2016 break; 2017 if (ifr->ifr_vlan_pcp > 7) { 2018 error = EINVAL; 2019 break; 2020 } 2021 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 2022 vlan_tag_recalculate(ifv); 2023 break; 2024 2025 case SIOCSIFCAP: 2026 VLAN_SLOCK(); 2027 ifv->ifv_capenable = ifr->ifr_reqcap; 2028 trunk = TRUNK(ifv); 2029 if (trunk != NULL) { 2030 TRUNK_WLOCK(trunk); 2031 vlan_capabilities(ifv); 2032 TRUNK_WUNLOCK(trunk); 2033 } 2034 VLAN_SUNLOCK(); 2035 break; 2036 2037 default: 2038 error = EINVAL; 2039 break; 2040 } 2041 2042 return (error); 2043 } 2044 2045 #ifdef RATELIMIT 2046 static int 2047 vlan_snd_tag_alloc(struct ifnet *ifp, 2048 union if_snd_tag_alloc_params *params, 2049 struct m_snd_tag **ppmt) 2050 { 2051 2052 /* get trunk device */ 2053 ifp = vlan_trunkdev(ifp); 2054 if (ifp == NULL || (ifp->if_capenable & IFCAP_TXRTLMT) == 0) 2055 return (EOPNOTSUPP); 2056 /* forward allocation request */ 2057 return (ifp->if_snd_tag_alloc(ifp, params, ppmt)); 2058 } 2059 #endif 2060