1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 #include "opt_inet.h" 47 #include "opt_inet6.h" 48 #include "opt_kern_tls.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/if_clone.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 #include <net/if_vlan_var.h> 78 #include <net/route.h> 79 #include <net/vnet.h> 80 81 #ifdef INET 82 #include <netinet/in.h> 83 #include <netinet/if_ether.h> 84 #endif 85 86 #include <netlink/netlink.h> 87 #include <netlink/netlink_ctl.h> 88 #include <netlink/netlink_route.h> 89 #include <netlink/route/route_var.h> 90 91 #define VLAN_DEF_HWIDTH 4 92 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 93 94 #define UP_AND_RUNNING(ifp) \ 95 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 96 97 CK_SLIST_HEAD(ifvlanhead, ifvlan); 98 99 struct ifvlantrunk { 100 struct ifnet *parent; /* parent interface of this trunk */ 101 struct mtx lock; 102 #ifdef VLAN_ARRAY 103 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 104 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 105 #else 106 struct ifvlanhead *hash; /* dynamic hash-list table */ 107 uint16_t hmask; 108 uint16_t hwidth; 109 #endif 110 int refcnt; 111 }; 112 113 #if defined(KERN_TLS) || defined(RATELIMIT) 114 struct vlan_snd_tag { 115 struct m_snd_tag com; 116 struct m_snd_tag *tag; 117 }; 118 119 static inline struct vlan_snd_tag * 120 mst_to_vst(struct m_snd_tag *mst) 121 { 122 123 return (__containerof(mst, struct vlan_snd_tag, com)); 124 } 125 #endif 126 127 /* 128 * This macro provides a facility to iterate over every vlan on a trunk with 129 * the assumption that none will be added/removed during iteration. 130 */ 131 #ifdef VLAN_ARRAY 132 #define VLAN_FOREACH(_ifv, _trunk) \ 133 size_t _i; \ 134 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 135 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 136 #else /* VLAN_ARRAY */ 137 #define VLAN_FOREACH(_ifv, _trunk) \ 138 struct ifvlan *_next; \ 139 size_t _i; \ 140 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 141 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 142 #endif /* VLAN_ARRAY */ 143 144 /* 145 * This macro provides a facility to iterate over every vlan on a trunk while 146 * also modifying the number of vlans on the trunk. The iteration continues 147 * until some condition is met or there are no more vlans on the trunk. 148 */ 149 #ifdef VLAN_ARRAY 150 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 151 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 152 size_t _i; \ 153 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 154 if (((_ifv) = (_trunk)->vlans[_i])) 155 #else /* VLAN_ARRAY */ 156 /* 157 * The hash table case is more complicated. We allow for the hash table to be 158 * modified (i.e. vlans removed) while we are iterating over it. To allow for 159 * this we must restart the iteration every time we "touch" something during 160 * the iteration, since removal will resize the hash table and invalidate our 161 * current position. If acting on the touched element causes the trunk to be 162 * emptied, then iteration also stops. 163 */ 164 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 165 size_t _i; \ 166 bool _touch = false; \ 167 for (_i = 0; \ 168 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 169 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 170 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 171 (_touch = true)) 172 #endif /* VLAN_ARRAY */ 173 174 struct vlan_mc_entry { 175 struct sockaddr_dl mc_addr; 176 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 177 struct epoch_context mc_epoch_ctx; 178 }; 179 180 struct ifvlan { 181 struct ifvlantrunk *ifv_trunk; 182 struct ifnet *ifv_ifp; 183 #define TRUNK(ifv) ((ifv)->ifv_trunk) 184 #define PARENT(ifv) (TRUNK(ifv)->parent) 185 void *ifv_cookie; 186 int ifv_pflags; /* special flags we have set on parent */ 187 int ifv_capenable; 188 int ifv_encaplen; /* encapsulation length */ 189 int ifv_mtufudge; /* MTU fudged by this much */ 190 int ifv_mintu; /* min transmission unit */ 191 struct ether_8021q_tag ifv_qtag; 192 #define ifv_proto ifv_qtag.proto 193 #define ifv_vid ifv_qtag.vid 194 #define ifv_pcp ifv_qtag.pcp 195 struct task lladdr_task; 196 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 197 #ifndef VLAN_ARRAY 198 CK_SLIST_ENTRY(ifvlan) ifv_list; 199 #endif 200 }; 201 202 /* Special flags we should propagate to parent. */ 203 static struct { 204 int flag; 205 int (*func)(struct ifnet *, int); 206 } vlan_pflags[] = { 207 {IFF_PROMISC, ifpromisc}, 208 {IFF_ALLMULTI, if_allmulti}, 209 {0, NULL} 210 }; 211 212 VNET_DECLARE(int, vlan_mtag_pcp); 213 #define V_vlan_mtag_pcp VNET(vlan_mtag_pcp) 214 215 static const char vlanname[] = "vlan"; 216 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 217 218 static eventhandler_tag ifdetach_tag; 219 static eventhandler_tag iflladdr_tag; 220 static eventhandler_tag ifevent_tag; 221 222 /* 223 * if_vlan uses two module-level synchronizations primitives to allow concurrent 224 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 225 * while they are being used for tx/rx. To accomplish this in a way that has 226 * acceptable performance and cooperation with other parts of the network stack 227 * there is a non-sleepable epoch(9) and an sx(9). 228 * 229 * The performance-sensitive paths that warrant using the epoch(9) are 230 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 231 * existence using if_vlantrunk, and being in the network tx/rx paths the use 232 * of an epoch(9) gives a measureable improvement in performance. 233 * 234 * The reason for having an sx(9) is mostly because there are still areas that 235 * must be sleepable and also have safe concurrent access to a vlan interface. 236 * Since the sx(9) exists, it is used by default in most paths unless sleeping 237 * is not permitted, or if it is not clear whether sleeping is permitted. 238 * 239 */ 240 #define _VLAN_SX_ID ifv_sx 241 242 static struct sx _VLAN_SX_ID; 243 244 #define VLAN_LOCKING_INIT() \ 245 sx_init_flags(&_VLAN_SX_ID, "vlan_sx", SX_RECURSE) 246 247 #define VLAN_LOCKING_DESTROY() \ 248 sx_destroy(&_VLAN_SX_ID) 249 250 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 251 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 252 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 253 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 254 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 255 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 256 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 257 258 /* 259 * We also have a per-trunk mutex that should be acquired when changing 260 * its state. 261 */ 262 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 263 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 264 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 265 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 266 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 267 268 /* 269 * The VLAN_ARRAY substitutes the dynamic hash with a static array 270 * with 4096 entries. In theory this can give a boost in processing, 271 * however in practice it does not. Probably this is because the array 272 * is too big to fit into CPU cache. 273 */ 274 #ifndef VLAN_ARRAY 275 static void vlan_inithash(struct ifvlantrunk *trunk); 276 static void vlan_freehash(struct ifvlantrunk *trunk); 277 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 278 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 279 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 280 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 281 uint16_t vid); 282 #endif 283 static void trunk_destroy(struct ifvlantrunk *trunk); 284 285 static void vlan_init(void *foo); 286 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 287 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 288 #if defined(KERN_TLS) || defined(RATELIMIT) 289 static int vlan_snd_tag_alloc(struct ifnet *, 290 union if_snd_tag_alloc_params *, struct m_snd_tag **); 291 static int vlan_snd_tag_modify(struct m_snd_tag *, 292 union if_snd_tag_modify_params *); 293 static int vlan_snd_tag_query(struct m_snd_tag *, 294 union if_snd_tag_query_params *); 295 static void vlan_snd_tag_free(struct m_snd_tag *); 296 static struct m_snd_tag *vlan_next_snd_tag(struct m_snd_tag *); 297 static void vlan_ratelimit_query(struct ifnet *, 298 struct if_ratelimit_query_results *); 299 #endif 300 static void vlan_qflush(struct ifnet *ifp); 301 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 302 int (*func)(struct ifnet *, int)); 303 static int vlan_setflags(struct ifnet *ifp, int status); 304 static int vlan_setmulti(struct ifnet *ifp); 305 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 306 #ifdef ALTQ 307 static void vlan_altq_start(struct ifnet *ifp); 308 static int vlan_altq_transmit(struct ifnet *ifp, struct mbuf *m); 309 #endif 310 static int vlan_output(struct ifnet *ifp, struct mbuf *m, 311 const struct sockaddr *dst, struct route *ro); 312 static void vlan_unconfig(struct ifnet *ifp); 313 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 314 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag, 315 uint16_t proto); 316 static void vlan_link_state(struct ifnet *ifp); 317 static void vlan_capabilities(struct ifvlan *ifv); 318 static void vlan_trunk_capabilities(struct ifnet *ifp); 319 320 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 321 static int vlan_clone_match(struct if_clone *, const char *); 322 static int vlan_clone_create(struct if_clone *, char *, size_t, 323 struct ifc_data *, struct ifnet **); 324 static int vlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t); 325 326 static int vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len, 327 struct ifc_data_nl *ifd); 328 static int vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd); 329 static void vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw); 330 331 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 332 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 333 static void vlan_ifevent(void *arg, struct ifnet *ifp, int event); 334 335 static void vlan_lladdr_fn(void *arg, int pending); 336 337 static struct if_clone *vlan_cloner; 338 339 #ifdef VIMAGE 340 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 341 #define V_vlan_cloner VNET(vlan_cloner) 342 #endif 343 344 #ifdef RATELIMIT 345 static const struct if_snd_tag_sw vlan_snd_tag_ul_sw = { 346 .snd_tag_modify = vlan_snd_tag_modify, 347 .snd_tag_query = vlan_snd_tag_query, 348 .snd_tag_free = vlan_snd_tag_free, 349 .next_snd_tag = vlan_next_snd_tag, 350 .type = IF_SND_TAG_TYPE_UNLIMITED 351 }; 352 353 static const struct if_snd_tag_sw vlan_snd_tag_rl_sw = { 354 .snd_tag_modify = vlan_snd_tag_modify, 355 .snd_tag_query = vlan_snd_tag_query, 356 .snd_tag_free = vlan_snd_tag_free, 357 .next_snd_tag = vlan_next_snd_tag, 358 .type = IF_SND_TAG_TYPE_RATE_LIMIT 359 }; 360 #endif 361 362 #ifdef KERN_TLS 363 static const struct if_snd_tag_sw vlan_snd_tag_tls_sw = { 364 .snd_tag_modify = vlan_snd_tag_modify, 365 .snd_tag_query = vlan_snd_tag_query, 366 .snd_tag_free = vlan_snd_tag_free, 367 .next_snd_tag = vlan_next_snd_tag, 368 .type = IF_SND_TAG_TYPE_TLS 369 }; 370 371 #ifdef RATELIMIT 372 static const struct if_snd_tag_sw vlan_snd_tag_tls_rl_sw = { 373 .snd_tag_modify = vlan_snd_tag_modify, 374 .snd_tag_query = vlan_snd_tag_query, 375 .snd_tag_free = vlan_snd_tag_free, 376 .next_snd_tag = vlan_next_snd_tag, 377 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 378 }; 379 #endif 380 #endif 381 382 static void 383 vlan_mc_free(struct epoch_context *ctx) 384 { 385 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 386 free(mc, M_VLAN); 387 } 388 389 #ifndef VLAN_ARRAY 390 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 391 392 static void 393 vlan_inithash(struct ifvlantrunk *trunk) 394 { 395 int i, n; 396 397 /* 398 * The trunk must not be locked here since we call malloc(M_WAITOK). 399 * It is OK in case this function is called before the trunk struct 400 * gets hooked up and becomes visible from other threads. 401 */ 402 403 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 404 ("%s: hash already initialized", __func__)); 405 406 trunk->hwidth = VLAN_DEF_HWIDTH; 407 n = 1 << trunk->hwidth; 408 trunk->hmask = n - 1; 409 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 410 for (i = 0; i < n; i++) 411 CK_SLIST_INIT(&trunk->hash[i]); 412 } 413 414 static void 415 vlan_freehash(struct ifvlantrunk *trunk) 416 { 417 #ifdef INVARIANTS 418 int i; 419 420 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 421 for (i = 0; i < (1 << trunk->hwidth); i++) 422 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 423 ("%s: hash table not empty", __func__)); 424 #endif 425 free(trunk->hash, M_VLAN); 426 trunk->hash = NULL; 427 trunk->hwidth = trunk->hmask = 0; 428 } 429 430 static int 431 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 432 { 433 int i, b; 434 struct ifvlan *ifv2; 435 436 VLAN_XLOCK_ASSERT(); 437 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 438 439 b = 1 << trunk->hwidth; 440 i = HASH(ifv->ifv_vid, trunk->hmask); 441 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 442 if (ifv->ifv_vid == ifv2->ifv_vid) 443 return (EEXIST); 444 445 /* 446 * Grow the hash when the number of vlans exceeds half of the number of 447 * hash buckets squared. This will make the average linked-list length 448 * buckets/2. 449 */ 450 if (trunk->refcnt > (b * b) / 2) { 451 vlan_growhash(trunk, 1); 452 i = HASH(ifv->ifv_vid, trunk->hmask); 453 } 454 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 455 trunk->refcnt++; 456 457 return (0); 458 } 459 460 static int 461 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 462 { 463 int i, b; 464 struct ifvlan *ifv2; 465 466 VLAN_XLOCK_ASSERT(); 467 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 468 469 b = 1 << (trunk->hwidth - 1); 470 i = HASH(ifv->ifv_vid, trunk->hmask); 471 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 472 if (ifv2 == ifv) { 473 trunk->refcnt--; 474 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 475 if (trunk->refcnt < (b * b) / 2) 476 vlan_growhash(trunk, -1); 477 return (0); 478 } 479 480 panic("%s: vlan not found\n", __func__); 481 return (ENOENT); /*NOTREACHED*/ 482 } 483 484 /* 485 * Grow the hash larger or smaller if memory permits. 486 */ 487 static void 488 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 489 { 490 struct ifvlan *ifv; 491 struct ifvlanhead *hash2; 492 int hwidth2, i, j, n, n2; 493 494 VLAN_XLOCK_ASSERT(); 495 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 496 497 if (howmuch == 0) { 498 /* Harmless yet obvious coding error */ 499 printf("%s: howmuch is 0\n", __func__); 500 return; 501 } 502 503 hwidth2 = trunk->hwidth + howmuch; 504 n = 1 << trunk->hwidth; 505 n2 = 1 << hwidth2; 506 /* Do not shrink the table below the default */ 507 if (hwidth2 < VLAN_DEF_HWIDTH) 508 return; 509 510 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 511 if (hash2 == NULL) { 512 printf("%s: out of memory -- hash size not changed\n", 513 __func__); 514 return; /* We can live with the old hash table */ 515 } 516 for (j = 0; j < n2; j++) 517 CK_SLIST_INIT(&hash2[j]); 518 for (i = 0; i < n; i++) 519 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 520 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 521 j = HASH(ifv->ifv_vid, n2 - 1); 522 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 523 } 524 NET_EPOCH_WAIT(); 525 free(trunk->hash, M_VLAN); 526 trunk->hash = hash2; 527 trunk->hwidth = hwidth2; 528 trunk->hmask = n2 - 1; 529 530 if (bootverbose) 531 if_printf(trunk->parent, 532 "VLAN hash table resized from %d to %d buckets\n", n, n2); 533 } 534 535 static __inline struct ifvlan * 536 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 537 { 538 struct ifvlan *ifv; 539 540 NET_EPOCH_ASSERT(); 541 542 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 543 if (ifv->ifv_vid == vid) 544 return (ifv); 545 return (NULL); 546 } 547 548 #if 0 549 /* Debugging code to view the hashtables. */ 550 static void 551 vlan_dumphash(struct ifvlantrunk *trunk) 552 { 553 int i; 554 struct ifvlan *ifv; 555 556 for (i = 0; i < (1 << trunk->hwidth); i++) { 557 printf("%d: ", i); 558 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 559 printf("%s ", ifv->ifv_ifp->if_xname); 560 printf("\n"); 561 } 562 } 563 #endif /* 0 */ 564 #else 565 566 static __inline struct ifvlan * 567 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 568 { 569 570 return trunk->vlans[vid]; 571 } 572 573 static __inline int 574 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 575 { 576 577 if (trunk->vlans[ifv->ifv_vid] != NULL) 578 return EEXIST; 579 trunk->vlans[ifv->ifv_vid] = ifv; 580 trunk->refcnt++; 581 582 return (0); 583 } 584 585 static __inline int 586 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 587 { 588 589 trunk->vlans[ifv->ifv_vid] = NULL; 590 trunk->refcnt--; 591 592 return (0); 593 } 594 595 static __inline void 596 vlan_freehash(struct ifvlantrunk *trunk) 597 { 598 } 599 600 static __inline void 601 vlan_inithash(struct ifvlantrunk *trunk) 602 { 603 } 604 605 #endif /* !VLAN_ARRAY */ 606 607 static void 608 trunk_destroy(struct ifvlantrunk *trunk) 609 { 610 VLAN_XLOCK_ASSERT(); 611 612 vlan_freehash(trunk); 613 trunk->parent->if_vlantrunk = NULL; 614 TRUNK_LOCK_DESTROY(trunk); 615 if_rele(trunk->parent); 616 free(trunk, M_VLAN); 617 } 618 619 /* 620 * Program our multicast filter. What we're actually doing is 621 * programming the multicast filter of the parent. This has the 622 * side effect of causing the parent interface to receive multicast 623 * traffic that it doesn't really want, which ends up being discarded 624 * later by the upper protocol layers. Unfortunately, there's no way 625 * to avoid this: there really is only one physical interface. 626 */ 627 static int 628 vlan_setmulti(struct ifnet *ifp) 629 { 630 struct ifnet *ifp_p; 631 struct ifmultiaddr *ifma; 632 struct ifvlan *sc; 633 struct vlan_mc_entry *mc; 634 int error; 635 636 VLAN_XLOCK_ASSERT(); 637 638 /* Find the parent. */ 639 sc = ifp->if_softc; 640 ifp_p = PARENT(sc); 641 642 CURVNET_SET_QUIET(ifp_p->if_vnet); 643 644 /* First, remove any existing filter entries. */ 645 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 646 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 647 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 648 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 649 } 650 651 /* Now program new ones. */ 652 IF_ADDR_WLOCK(ifp); 653 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 654 if (ifma->ifma_addr->sa_family != AF_LINK) 655 continue; 656 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 657 if (mc == NULL) { 658 IF_ADDR_WUNLOCK(ifp); 659 CURVNET_RESTORE(); 660 return (ENOMEM); 661 } 662 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 663 mc->mc_addr.sdl_index = ifp_p->if_index; 664 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 665 } 666 IF_ADDR_WUNLOCK(ifp); 667 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 668 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 669 NULL); 670 if (error) { 671 CURVNET_RESTORE(); 672 return (error); 673 } 674 } 675 676 CURVNET_RESTORE(); 677 return (0); 678 } 679 680 /* 681 * A handler for interface ifnet events. 682 */ 683 static void 684 vlan_ifevent(void *arg __unused, struct ifnet *ifp, int event) 685 { 686 struct epoch_tracker et; 687 struct ifvlan *ifv; 688 struct ifvlantrunk *trunk; 689 690 if (event != IFNET_EVENT_UPDATE_BAUDRATE) 691 return; 692 693 NET_EPOCH_ENTER(et); 694 trunk = ifp->if_vlantrunk; 695 if (trunk == NULL) { 696 NET_EPOCH_EXIT(et); 697 return; 698 } 699 700 TRUNK_WLOCK(trunk); 701 VLAN_FOREACH(ifv, trunk) { 702 ifv->ifv_ifp->if_baudrate = ifp->if_baudrate; 703 } 704 TRUNK_WUNLOCK(trunk); 705 NET_EPOCH_EXIT(et); 706 } 707 708 /* 709 * A handler for parent interface link layer address changes. 710 * If the parent interface link layer address is changed we 711 * should also change it on all children vlans. 712 */ 713 static void 714 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 715 { 716 struct epoch_tracker et; 717 struct ifvlan *ifv; 718 struct ifnet *ifv_ifp; 719 struct ifvlantrunk *trunk; 720 struct sockaddr_dl *sdl; 721 722 /* Need the epoch since this is run on taskqueue_swi. */ 723 NET_EPOCH_ENTER(et); 724 trunk = ifp->if_vlantrunk; 725 if (trunk == NULL) { 726 NET_EPOCH_EXIT(et); 727 return; 728 } 729 730 /* 731 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 732 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 733 * ioctl calls on the parent garbling the lladdr of the child vlan. 734 */ 735 TRUNK_WLOCK(trunk); 736 VLAN_FOREACH(ifv, trunk) { 737 /* 738 * Copy new new lladdr into the ifv_ifp, enqueue a task 739 * to actually call if_setlladdr. if_setlladdr needs to 740 * be deferred to a taskqueue because it will call into 741 * the if_vlan ioctl path and try to acquire the global 742 * lock. 743 */ 744 ifv_ifp = ifv->ifv_ifp; 745 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 746 ifp->if_addrlen); 747 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 748 sdl->sdl_alen = ifp->if_addrlen; 749 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 750 } 751 TRUNK_WUNLOCK(trunk); 752 NET_EPOCH_EXIT(et); 753 } 754 755 /* 756 * A handler for network interface departure events. 757 * Track departure of trunks here so that we don't access invalid 758 * pointers or whatever if a trunk is ripped from under us, e.g., 759 * by ejecting its hot-plug card. However, if an ifnet is simply 760 * being renamed, then there's no need to tear down the state. 761 */ 762 static void 763 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 764 { 765 struct ifvlan *ifv; 766 struct ifvlantrunk *trunk; 767 768 /* If the ifnet is just being renamed, don't do anything. */ 769 if (ifp->if_flags & IFF_RENAMING) 770 return; 771 VLAN_XLOCK(); 772 trunk = ifp->if_vlantrunk; 773 if (trunk == NULL) { 774 VLAN_XUNLOCK(); 775 return; 776 } 777 778 /* 779 * OK, it's a trunk. Loop over and detach all vlan's on it. 780 * Check trunk pointer after each vlan_unconfig() as it will 781 * free it and set to NULL after the last vlan was detached. 782 */ 783 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 784 ifp->if_vlantrunk == NULL) 785 vlan_unconfig_locked(ifv->ifv_ifp, 1); 786 787 /* Trunk should have been destroyed in vlan_unconfig(). */ 788 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 789 VLAN_XUNLOCK(); 790 } 791 792 /* 793 * Return the trunk device for a virtual interface. 794 */ 795 static struct ifnet * 796 vlan_trunkdev(struct ifnet *ifp) 797 { 798 struct ifvlan *ifv; 799 800 NET_EPOCH_ASSERT(); 801 802 if (ifp->if_type != IFT_L2VLAN) 803 return (NULL); 804 805 ifv = ifp->if_softc; 806 ifp = NULL; 807 if (ifv->ifv_trunk) 808 ifp = PARENT(ifv); 809 return (ifp); 810 } 811 812 /* 813 * Return the 12-bit VLAN VID for this interface, for use by external 814 * components such as Infiniband. 815 * 816 * XXXRW: Note that the function name here is historical; it should be named 817 * vlan_vid(). 818 */ 819 static int 820 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 821 { 822 struct ifvlan *ifv; 823 824 if (ifp->if_type != IFT_L2VLAN) 825 return (EINVAL); 826 ifv = ifp->if_softc; 827 *vidp = ifv->ifv_vid; 828 return (0); 829 } 830 831 static int 832 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 833 { 834 struct ifvlan *ifv; 835 836 if (ifp->if_type != IFT_L2VLAN) 837 return (EINVAL); 838 ifv = ifp->if_softc; 839 *pcpp = ifv->ifv_pcp; 840 return (0); 841 } 842 843 /* 844 * Return a driver specific cookie for this interface. Synchronization 845 * with setcookie must be provided by the driver. 846 */ 847 static void * 848 vlan_cookie(struct ifnet *ifp) 849 { 850 struct ifvlan *ifv; 851 852 if (ifp->if_type != IFT_L2VLAN) 853 return (NULL); 854 ifv = ifp->if_softc; 855 return (ifv->ifv_cookie); 856 } 857 858 /* 859 * Store a cookie in our softc that drivers can use to store driver 860 * private per-instance data in. 861 */ 862 static int 863 vlan_setcookie(struct ifnet *ifp, void *cookie) 864 { 865 struct ifvlan *ifv; 866 867 if (ifp->if_type != IFT_L2VLAN) 868 return (EINVAL); 869 ifv = ifp->if_softc; 870 ifv->ifv_cookie = cookie; 871 return (0); 872 } 873 874 /* 875 * Return the vlan device present at the specific VID. 876 */ 877 static struct ifnet * 878 vlan_devat(struct ifnet *ifp, uint16_t vid) 879 { 880 struct ifvlantrunk *trunk; 881 struct ifvlan *ifv; 882 883 NET_EPOCH_ASSERT(); 884 885 trunk = ifp->if_vlantrunk; 886 if (trunk == NULL) 887 return (NULL); 888 ifp = NULL; 889 ifv = vlan_gethash(trunk, vid); 890 if (ifv) 891 ifp = ifv->ifv_ifp; 892 return (ifp); 893 } 894 895 /* 896 * VLAN support can be loaded as a module. The only place in the 897 * system that's intimately aware of this is ether_input. We hook 898 * into this code through vlan_input_p which is defined there and 899 * set here. No one else in the system should be aware of this so 900 * we use an explicit reference here. 901 */ 902 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 903 904 /* For if_link_state_change() eyes only... */ 905 extern void (*vlan_link_state_p)(struct ifnet *); 906 907 static struct if_clone_addreq_v2 vlan_addreq = { 908 .version = 2, 909 .match_f = vlan_clone_match, 910 .create_f = vlan_clone_create, 911 .destroy_f = vlan_clone_destroy, 912 .create_nl_f = vlan_clone_create_nl, 913 .modify_nl_f = vlan_clone_modify_nl, 914 .dump_nl_f = vlan_clone_dump_nl, 915 }; 916 917 static int 918 vlan_modevent(module_t mod, int type, void *data) 919 { 920 921 switch (type) { 922 case MOD_LOAD: 923 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 924 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 925 if (ifdetach_tag == NULL) 926 return (ENOMEM); 927 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 928 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 929 if (iflladdr_tag == NULL) 930 return (ENOMEM); 931 ifevent_tag = EVENTHANDLER_REGISTER(ifnet_event, 932 vlan_ifevent, NULL, EVENTHANDLER_PRI_ANY); 933 if (ifevent_tag == NULL) 934 return (ENOMEM); 935 VLAN_LOCKING_INIT(); 936 vlan_input_p = vlan_input; 937 vlan_link_state_p = vlan_link_state; 938 vlan_trunk_cap_p = vlan_trunk_capabilities; 939 vlan_trunkdev_p = vlan_trunkdev; 940 vlan_cookie_p = vlan_cookie; 941 vlan_setcookie_p = vlan_setcookie; 942 vlan_tag_p = vlan_tag; 943 vlan_pcp_p = vlan_pcp; 944 vlan_devat_p = vlan_devat; 945 #ifndef VIMAGE 946 vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq); 947 #endif 948 if (bootverbose) 949 printf("vlan: initialized, using " 950 #ifdef VLAN_ARRAY 951 "full-size arrays" 952 #else 953 "hash tables with chaining" 954 #endif 955 956 "\n"); 957 break; 958 case MOD_UNLOAD: 959 #ifndef VIMAGE 960 ifc_detach_cloner(vlan_cloner); 961 #endif 962 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 963 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 964 EVENTHANDLER_DEREGISTER(ifnet_event, ifevent_tag); 965 vlan_input_p = NULL; 966 vlan_link_state_p = NULL; 967 vlan_trunk_cap_p = NULL; 968 vlan_trunkdev_p = NULL; 969 vlan_tag_p = NULL; 970 vlan_cookie_p = NULL; 971 vlan_setcookie_p = NULL; 972 vlan_devat_p = NULL; 973 VLAN_LOCKING_DESTROY(); 974 if (bootverbose) 975 printf("vlan: unloaded\n"); 976 break; 977 default: 978 return (EOPNOTSUPP); 979 } 980 return (0); 981 } 982 983 static moduledata_t vlan_mod = { 984 "if_vlan", 985 vlan_modevent, 986 0 987 }; 988 989 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 990 MODULE_VERSION(if_vlan, 3); 991 992 #ifdef VIMAGE 993 static void 994 vnet_vlan_init(const void *unused __unused) 995 { 996 vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq); 997 V_vlan_cloner = vlan_cloner; 998 } 999 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 1000 vnet_vlan_init, NULL); 1001 1002 static void 1003 vnet_vlan_uninit(const void *unused __unused) 1004 { 1005 1006 ifc_detach_cloner(V_vlan_cloner); 1007 } 1008 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, 1009 vnet_vlan_uninit, NULL); 1010 #endif 1011 1012 /* 1013 * Check for <etherif>.<vlan>[.<vlan> ...] style interface names. 1014 */ 1015 static struct ifnet * 1016 vlan_clone_match_ethervid(const char *name, int *vidp) 1017 { 1018 char ifname[IFNAMSIZ]; 1019 char *cp; 1020 struct ifnet *ifp; 1021 int vid; 1022 1023 strlcpy(ifname, name, IFNAMSIZ); 1024 if ((cp = strrchr(ifname, '.')) == NULL) 1025 return (NULL); 1026 *cp = '\0'; 1027 if ((ifp = ifunit_ref(ifname)) == NULL) 1028 return (NULL); 1029 /* Parse VID. */ 1030 if (*++cp == '\0') { 1031 if_rele(ifp); 1032 return (NULL); 1033 } 1034 vid = 0; 1035 for(; *cp >= '0' && *cp <= '9'; cp++) 1036 vid = (vid * 10) + (*cp - '0'); 1037 if (*cp != '\0') { 1038 if_rele(ifp); 1039 return (NULL); 1040 } 1041 if (vidp != NULL) 1042 *vidp = vid; 1043 1044 return (ifp); 1045 } 1046 1047 static int 1048 vlan_clone_match(struct if_clone *ifc, const char *name) 1049 { 1050 struct ifnet *ifp; 1051 const char *cp; 1052 1053 ifp = vlan_clone_match_ethervid(name, NULL); 1054 if (ifp != NULL) { 1055 if_rele(ifp); 1056 return (1); 1057 } 1058 1059 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 1060 return (0); 1061 for (cp = name + 4; *cp != '\0'; cp++) { 1062 if (*cp < '0' || *cp > '9') 1063 return (0); 1064 } 1065 1066 return (1); 1067 } 1068 1069 static int 1070 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, 1071 struct ifc_data *ifd, struct ifnet **ifpp) 1072 { 1073 char *dp; 1074 bool wildcard = false; 1075 bool subinterface = false; 1076 int unit; 1077 int error; 1078 int vid = 0; 1079 uint16_t proto = ETHERTYPE_VLAN; 1080 struct ifvlan *ifv; 1081 struct ifnet *ifp; 1082 struct ifnet *p = NULL; 1083 struct ifaddr *ifa; 1084 struct sockaddr_dl *sdl; 1085 struct vlanreq vlr; 1086 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 1087 1088 1089 /* 1090 * There are three ways to specify the cloned device: 1091 * o pass a parameter block with the clone request. 1092 * o specify parameters in the text of the clone device name 1093 * o specify no parameters and get an unattached device that 1094 * must be configured separately. 1095 * The first technique is preferred; the latter two are supported 1096 * for backwards compatibility. 1097 * 1098 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1099 * called for. 1100 */ 1101 1102 if (ifd->params != NULL) { 1103 error = ifc_copyin(ifd, &vlr, sizeof(vlr)); 1104 if (error) 1105 return error; 1106 vid = vlr.vlr_tag; 1107 proto = vlr.vlr_proto; 1108 if (proto == 0) 1109 proto = ETHERTYPE_VLAN; 1110 p = ifunit_ref(vlr.vlr_parent); 1111 if (p == NULL) 1112 return (ENXIO); 1113 } 1114 1115 if ((error = ifc_name2unit(name, &unit)) == 0) { 1116 1117 /* 1118 * vlanX interface. Set wildcard to true if the unit number 1119 * is not fixed (-1) 1120 */ 1121 wildcard = (unit < 0); 1122 } else { 1123 struct ifnet *p_tmp = vlan_clone_match_ethervid(name, &vid); 1124 if (p_tmp != NULL) { 1125 error = 0; 1126 subinterface = true; 1127 unit = IF_DUNIT_NONE; 1128 wildcard = false; 1129 if (p != NULL) { 1130 if_rele(p_tmp); 1131 if (p != p_tmp) 1132 error = EINVAL; 1133 } else 1134 p = p_tmp; 1135 } else 1136 error = ENXIO; 1137 } 1138 1139 if (error != 0) { 1140 if (p != NULL) 1141 if_rele(p); 1142 return (error); 1143 } 1144 1145 if (!subinterface) { 1146 /* vlanX interface, mark X as busy or allocate new unit # */ 1147 error = ifc_alloc_unit(ifc, &unit); 1148 if (error != 0) { 1149 if (p != NULL) 1150 if_rele(p); 1151 return (error); 1152 } 1153 } 1154 1155 /* In the wildcard case, we need to update the name. */ 1156 if (wildcard) { 1157 for (dp = name; *dp != '\0'; dp++); 1158 if (snprintf(dp, len - (dp-name), "%d", unit) > 1159 len - (dp-name) - 1) { 1160 panic("%s: interface name too long", __func__); 1161 } 1162 } 1163 1164 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1165 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1166 if (ifp == NULL) { 1167 if (!subinterface) 1168 ifc_free_unit(ifc, unit); 1169 free(ifv, M_VLAN); 1170 if (p != NULL) 1171 if_rele(p); 1172 return (ENOSPC); 1173 } 1174 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1175 ifp->if_softc = ifv; 1176 /* 1177 * Set the name manually rather than using if_initname because 1178 * we don't conform to the default naming convention for interfaces. 1179 */ 1180 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1181 ifp->if_dname = vlanname; 1182 ifp->if_dunit = unit; 1183 1184 ifp->if_init = vlan_init; 1185 #ifdef ALTQ 1186 ifp->if_start = vlan_altq_start; 1187 ifp->if_transmit = vlan_altq_transmit; 1188 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 1189 ifp->if_snd.ifq_drv_maxlen = 0; 1190 IFQ_SET_READY(&ifp->if_snd); 1191 #else 1192 ifp->if_transmit = vlan_transmit; 1193 #endif 1194 ifp->if_qflush = vlan_qflush; 1195 ifp->if_ioctl = vlan_ioctl; 1196 #if defined(KERN_TLS) || defined(RATELIMIT) 1197 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1198 ifp->if_ratelimit_query = vlan_ratelimit_query; 1199 #endif 1200 ifp->if_flags = VLAN_IFFLAGS; 1201 ether_ifattach(ifp, eaddr); 1202 /* Now undo some of the damage... */ 1203 ifp->if_baudrate = 0; 1204 ifp->if_type = IFT_L2VLAN; 1205 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1206 ifa = ifp->if_addr; 1207 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1208 sdl->sdl_type = IFT_L2VLAN; 1209 1210 if (p != NULL) { 1211 error = vlan_config(ifv, p, vid, proto); 1212 if_rele(p); 1213 if (error != 0) { 1214 /* 1215 * Since we've partially failed, we need to back 1216 * out all the way, otherwise userland could get 1217 * confused. Thus, we destroy the interface. 1218 */ 1219 ether_ifdetach(ifp); 1220 vlan_unconfig(ifp); 1221 if_free(ifp); 1222 if (!subinterface) 1223 ifc_free_unit(ifc, unit); 1224 free(ifv, M_VLAN); 1225 1226 return (error); 1227 } 1228 } 1229 *ifpp = ifp; 1230 1231 return (0); 1232 } 1233 1234 /* 1235 * 1236 * Parsers of IFLA_INFO_DATA inside IFLA_LINKINFO of RTM_NEWLINK 1237 * {{nla_len=8, nla_type=IFLA_LINK}, 2}, 1238 * {{nla_len=12, nla_type=IFLA_IFNAME}, "xvlan22"}, 1239 * {{nla_len=24, nla_type=IFLA_LINKINFO}, 1240 * [ 1241 * {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...}, 1242 * {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]} 1243 */ 1244 1245 struct nl_parsed_vlan { 1246 uint16_t vlan_id; 1247 uint16_t vlan_proto; 1248 struct ifla_vlan_flags vlan_flags; 1249 }; 1250 1251 #define _OUT(_field) offsetof(struct nl_parsed_vlan, _field) 1252 static const struct nlattr_parser nla_p_vlan[] = { 1253 { .type = IFLA_VLAN_ID, .off = _OUT(vlan_id), .cb = nlattr_get_uint16 }, 1254 { .type = IFLA_VLAN_FLAGS, .off = _OUT(vlan_flags), .cb = nlattr_get_nla }, 1255 { .type = IFLA_VLAN_PROTOCOL, .off = _OUT(vlan_proto), .cb = nlattr_get_uint16 }, 1256 }; 1257 #undef _OUT 1258 NL_DECLARE_ATTR_PARSER(vlan_parser, nla_p_vlan); 1259 1260 static int 1261 vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len, 1262 struct ifc_data_nl *ifd) 1263 { 1264 struct epoch_tracker et; 1265 struct ifnet *ifp_parent; 1266 struct nl_pstate *npt = ifd->npt; 1267 struct nl_parsed_link *lattrs = ifd->lattrs; 1268 int error; 1269 1270 /* 1271 * lattrs.ifla_ifname is the new interface name 1272 * lattrs.ifi_index contains parent interface index 1273 * lattrs.ifla_idata contains un-parsed vlan data 1274 */ 1275 struct nl_parsed_vlan attrs = { 1276 .vlan_id = 0xFEFE, 1277 .vlan_proto = ETHERTYPE_VLAN 1278 }; 1279 1280 if (lattrs->ifla_idata == NULL) { 1281 nlmsg_report_err_msg(npt, "vlan id is required, guessing not supported"); 1282 return (ENOTSUP); 1283 } 1284 1285 error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, npt, &attrs); 1286 if (error != 0) 1287 return (error); 1288 if (attrs.vlan_id > 4095) { 1289 nlmsg_report_err_msg(npt, "Invalid VID: %d", attrs.vlan_id); 1290 return (EINVAL); 1291 } 1292 if (attrs.vlan_proto != ETHERTYPE_VLAN && attrs.vlan_proto != ETHERTYPE_QINQ) { 1293 nlmsg_report_err_msg(npt, "Unsupported ethertype: 0x%04X", attrs.vlan_proto); 1294 return (ENOTSUP); 1295 } 1296 1297 struct vlanreq params = { 1298 .vlr_tag = attrs.vlan_id, 1299 .vlr_proto = attrs.vlan_proto, 1300 }; 1301 struct ifc_data ifd_new = { .flags = IFC_F_SYSSPACE, .unit = ifd->unit, .params = ¶ms }; 1302 1303 NET_EPOCH_ENTER(et); 1304 ifp_parent = ifnet_byindex(lattrs->ifi_index); 1305 if (ifp_parent != NULL) 1306 strlcpy(params.vlr_parent, if_name(ifp_parent), sizeof(params.vlr_parent)); 1307 NET_EPOCH_EXIT(et); 1308 1309 if (ifp_parent == NULL) { 1310 nlmsg_report_err_msg(npt, "unable to find parent interface %u", lattrs->ifi_index); 1311 return (ENOENT); 1312 } 1313 1314 error = vlan_clone_create(ifc, name, len, &ifd_new, &ifd->ifp); 1315 1316 return (error); 1317 } 1318 1319 static int 1320 vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd) 1321 { 1322 struct nl_parsed_link *lattrs = ifd->lattrs; 1323 1324 if ((lattrs->ifla_idata != NULL) && ((ifd->flags & IFC_F_CREATE) == 0)) { 1325 struct epoch_tracker et; 1326 struct nl_parsed_vlan attrs = { 1327 .vlan_proto = ETHERTYPE_VLAN, 1328 }; 1329 int error; 1330 1331 error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, ifd->npt, &attrs); 1332 if (error != 0) 1333 return (error); 1334 1335 NET_EPOCH_ENTER(et); 1336 struct ifnet *ifp_parent = ifnet_byindex_ref(lattrs->ifla_link); 1337 NET_EPOCH_EXIT(et); 1338 1339 if (ifp_parent == NULL) { 1340 nlmsg_report_err_msg(ifd->npt, "unable to find parent interface %u", 1341 lattrs->ifla_link); 1342 return (ENOENT); 1343 } 1344 1345 struct ifvlan *ifv = ifp->if_softc; 1346 error = vlan_config(ifv, ifp_parent, attrs.vlan_id, attrs.vlan_proto); 1347 1348 if_rele(ifp_parent); 1349 if (error != 0) 1350 return (error); 1351 } 1352 1353 return (nl_modify_ifp_generic(ifp, ifd->lattrs, ifd->bm, ifd->npt)); 1354 } 1355 1356 /* 1357 * {{nla_len=24, nla_type=IFLA_LINKINFO}, 1358 * [ 1359 * {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...}, 1360 * {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]} 1361 */ 1362 static void 1363 vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw) 1364 { 1365 uint32_t parent_index = 0; 1366 uint16_t vlan_id = 0; 1367 uint16_t vlan_proto = 0; 1368 1369 VLAN_SLOCK(); 1370 struct ifvlan *ifv = ifp->if_softc; 1371 if (TRUNK(ifv) != NULL) 1372 parent_index = PARENT(ifv)->if_index; 1373 vlan_id = ifv->ifv_vid; 1374 vlan_proto = ifv->ifv_proto; 1375 VLAN_SUNLOCK(); 1376 1377 if (parent_index != 0) 1378 nlattr_add_u32(nw, IFLA_LINK, parent_index); 1379 1380 int off = nlattr_add_nested(nw, IFLA_LINKINFO); 1381 if (off != 0) { 1382 nlattr_add_string(nw, IFLA_INFO_KIND, "vlan"); 1383 int off2 = nlattr_add_nested(nw, IFLA_INFO_DATA); 1384 if (off2 != 0) { 1385 nlattr_add_u16(nw, IFLA_VLAN_ID, vlan_id); 1386 nlattr_add_u16(nw, IFLA_VLAN_PROTOCOL, vlan_proto); 1387 nlattr_set_len(nw, off2); 1388 } 1389 nlattr_set_len(nw, off); 1390 } 1391 } 1392 1393 static int 1394 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) 1395 { 1396 struct ifvlan *ifv = ifp->if_softc; 1397 int unit = ifp->if_dunit; 1398 1399 if (ifp->if_vlantrunk) 1400 return (EBUSY); 1401 1402 #ifdef ALTQ 1403 IFQ_PURGE(&ifp->if_snd); 1404 #endif 1405 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1406 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1407 /* 1408 * We should have the only reference to the ifv now, so we can now 1409 * drain any remaining lladdr task before freeing the ifnet and the 1410 * ifvlan. 1411 */ 1412 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1413 NET_EPOCH_WAIT(); 1414 if_free(ifp); 1415 free(ifv, M_VLAN); 1416 if (unit != IF_DUNIT_NONE) 1417 ifc_free_unit(ifc, unit); 1418 1419 return (0); 1420 } 1421 1422 /* 1423 * The ifp->if_init entry point for vlan(4) is a no-op. 1424 */ 1425 static void 1426 vlan_init(void *foo __unused) 1427 { 1428 } 1429 1430 /* 1431 * The if_transmit method for vlan(4) interface. 1432 */ 1433 static int 1434 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1435 { 1436 struct ifvlan *ifv; 1437 struct ifnet *p; 1438 int error, len, mcast; 1439 1440 NET_EPOCH_ASSERT(); 1441 1442 ifv = ifp->if_softc; 1443 if (TRUNK(ifv) == NULL) { 1444 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1445 m_freem(m); 1446 return (ENETDOWN); 1447 } 1448 p = PARENT(ifv); 1449 len = m->m_pkthdr.len; 1450 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1451 1452 BPF_MTAP(ifp, m); 1453 1454 #if defined(KERN_TLS) || defined(RATELIMIT) 1455 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1456 struct vlan_snd_tag *vst; 1457 struct m_snd_tag *mst; 1458 1459 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 1460 mst = m->m_pkthdr.snd_tag; 1461 vst = mst_to_vst(mst); 1462 if (vst->tag->ifp != p) { 1463 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1464 m_freem(m); 1465 return (EAGAIN); 1466 } 1467 1468 m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag); 1469 m_snd_tag_rele(mst); 1470 } 1471 #endif 1472 1473 /* 1474 * Do not run parent's if_transmit() if the parent is not up, 1475 * or parent's driver will cause a system crash. 1476 */ 1477 if (!UP_AND_RUNNING(p)) { 1478 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1479 m_freem(m); 1480 return (ENETDOWN); 1481 } 1482 1483 if (!ether_8021q_frame(&m, ifp, p, &ifv->ifv_qtag)) { 1484 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1485 return (0); 1486 } 1487 1488 /* 1489 * Send it, precisely as ether_output() would have. 1490 */ 1491 error = (p->if_transmit)(p, m); 1492 if (error == 0) { 1493 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1494 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1495 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1496 } else 1497 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1498 return (error); 1499 } 1500 1501 static int 1502 vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1503 struct route *ro) 1504 { 1505 struct ifvlan *ifv; 1506 struct ifnet *p; 1507 1508 NET_EPOCH_ASSERT(); 1509 1510 /* 1511 * Find the first non-VLAN parent interface. 1512 */ 1513 ifv = ifp->if_softc; 1514 do { 1515 if (TRUNK(ifv) == NULL) { 1516 m_freem(m); 1517 return (ENETDOWN); 1518 } 1519 p = PARENT(ifv); 1520 ifv = p->if_softc; 1521 } while (p->if_type == IFT_L2VLAN); 1522 1523 return p->if_output(ifp, m, dst, ro); 1524 } 1525 1526 #ifdef ALTQ 1527 static void 1528 vlan_altq_start(if_t ifp) 1529 { 1530 struct ifaltq *ifq = &ifp->if_snd; 1531 struct mbuf *m; 1532 1533 IFQ_LOCK(ifq); 1534 IFQ_DEQUEUE_NOLOCK(ifq, m); 1535 while (m != NULL) { 1536 vlan_transmit(ifp, m); 1537 IFQ_DEQUEUE_NOLOCK(ifq, m); 1538 } 1539 IFQ_UNLOCK(ifq); 1540 } 1541 1542 static int 1543 vlan_altq_transmit(if_t ifp, struct mbuf *m) 1544 { 1545 int err; 1546 1547 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 1548 IFQ_ENQUEUE(&ifp->if_snd, m, err); 1549 if (err == 0) 1550 vlan_altq_start(ifp); 1551 } else 1552 err = vlan_transmit(ifp, m); 1553 1554 return (err); 1555 } 1556 #endif /* ALTQ */ 1557 1558 /* 1559 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1560 */ 1561 static void 1562 vlan_qflush(struct ifnet *ifp __unused) 1563 { 1564 } 1565 1566 static void 1567 vlan_input(struct ifnet *ifp, struct mbuf *m) 1568 { 1569 struct ifvlantrunk *trunk; 1570 struct ifvlan *ifv; 1571 struct m_tag *mtag; 1572 uint16_t vid, tag; 1573 1574 NET_EPOCH_ASSERT(); 1575 1576 trunk = ifp->if_vlantrunk; 1577 if (trunk == NULL) { 1578 m_freem(m); 1579 return; 1580 } 1581 1582 if (m->m_flags & M_VLANTAG) { 1583 /* 1584 * Packet is tagged, but m contains a normal 1585 * Ethernet frame; the tag is stored out-of-band. 1586 */ 1587 tag = m->m_pkthdr.ether_vtag; 1588 m->m_flags &= ~M_VLANTAG; 1589 } else { 1590 struct ether_vlan_header *evl; 1591 1592 /* 1593 * Packet is tagged in-band as specified by 802.1q. 1594 */ 1595 switch (ifp->if_type) { 1596 case IFT_ETHER: 1597 if (m->m_len < sizeof(*evl) && 1598 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1599 if_printf(ifp, "cannot pullup VLAN header\n"); 1600 return; 1601 } 1602 evl = mtod(m, struct ether_vlan_header *); 1603 tag = ntohs(evl->evl_tag); 1604 1605 /* 1606 * Remove the 802.1q header by copying the Ethernet 1607 * addresses over it and adjusting the beginning of 1608 * the data in the mbuf. The encapsulated Ethernet 1609 * type field is already in place. 1610 */ 1611 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1612 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1613 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1614 break; 1615 1616 default: 1617 #ifdef INVARIANTS 1618 panic("%s: %s has unsupported if_type %u", 1619 __func__, ifp->if_xname, ifp->if_type); 1620 #endif 1621 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1622 m_freem(m); 1623 return; 1624 } 1625 } 1626 1627 vid = EVL_VLANOFTAG(tag); 1628 1629 ifv = vlan_gethash(trunk, vid); 1630 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1631 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1632 m_freem(m); 1633 return; 1634 } 1635 1636 if (V_vlan_mtag_pcp) { 1637 /* 1638 * While uncommon, it is possible that we will find a 802.1q 1639 * packet encapsulated inside another packet that also had an 1640 * 802.1q header. For example, ethernet tunneled over IPSEC 1641 * arriving over ethernet. In that case, we replace the 1642 * existing 802.1q PCP m_tag value. 1643 */ 1644 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1645 if (mtag == NULL) { 1646 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1647 sizeof(uint8_t), M_NOWAIT); 1648 if (mtag == NULL) { 1649 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1650 m_freem(m); 1651 return; 1652 } 1653 m_tag_prepend(m, mtag); 1654 } 1655 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1656 } 1657 1658 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1659 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1660 1661 /* Pass it back through the parent's input routine. */ 1662 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1663 } 1664 1665 static void 1666 vlan_lladdr_fn(void *arg, int pending __unused) 1667 { 1668 struct ifvlan *ifv; 1669 struct ifnet *ifp; 1670 1671 ifv = (struct ifvlan *)arg; 1672 ifp = ifv->ifv_ifp; 1673 1674 CURVNET_SET(ifp->if_vnet); 1675 1676 /* The ifv_ifp already has the lladdr copied in. */ 1677 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1678 1679 CURVNET_RESTORE(); 1680 } 1681 1682 static int 1683 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid, 1684 uint16_t proto) 1685 { 1686 struct epoch_tracker et; 1687 struct ifvlantrunk *trunk; 1688 struct ifnet *ifp; 1689 int error = 0; 1690 1691 /* 1692 * We can handle non-ethernet hardware types as long as 1693 * they handle the tagging and headers themselves. 1694 */ 1695 if (p->if_type != IFT_ETHER && 1696 p->if_type != IFT_L2VLAN && 1697 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1698 return (EPROTONOSUPPORT); 1699 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1700 return (EPROTONOSUPPORT); 1701 /* 1702 * Don't let the caller set up a VLAN VID with 1703 * anything except VLID bits. 1704 * VID numbers 0x0 and 0xFFF are reserved. 1705 */ 1706 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1707 return (EINVAL); 1708 if (ifv->ifv_trunk) { 1709 trunk = ifv->ifv_trunk; 1710 if (trunk->parent != p) 1711 return (EBUSY); 1712 1713 VLAN_XLOCK(); 1714 1715 ifv->ifv_proto = proto; 1716 1717 if (ifv->ifv_vid != vid) { 1718 /* Re-hash */ 1719 vlan_remhash(trunk, ifv); 1720 ifv->ifv_vid = vid; 1721 error = vlan_inshash(trunk, ifv); 1722 } 1723 /* Will unlock */ 1724 goto done; 1725 } 1726 1727 VLAN_XLOCK(); 1728 if (p->if_vlantrunk == NULL) { 1729 trunk = malloc(sizeof(struct ifvlantrunk), 1730 M_VLAN, M_WAITOK | M_ZERO); 1731 vlan_inithash(trunk); 1732 TRUNK_LOCK_INIT(trunk); 1733 TRUNK_WLOCK(trunk); 1734 p->if_vlantrunk = trunk; 1735 trunk->parent = p; 1736 if_ref(trunk->parent); 1737 TRUNK_WUNLOCK(trunk); 1738 } else { 1739 trunk = p->if_vlantrunk; 1740 } 1741 1742 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1743 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1744 error = vlan_inshash(trunk, ifv); 1745 if (error) 1746 goto done; 1747 ifv->ifv_proto = proto; 1748 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1749 ifv->ifv_mintu = ETHERMIN; 1750 ifv->ifv_pflags = 0; 1751 ifv->ifv_capenable = -1; 1752 1753 /* 1754 * If the parent supports the VLAN_MTU capability, 1755 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1756 * use it. 1757 */ 1758 if (p->if_capenable & IFCAP_VLAN_MTU) { 1759 /* 1760 * No need to fudge the MTU since the parent can 1761 * handle extended frames. 1762 */ 1763 ifv->ifv_mtufudge = 0; 1764 } else { 1765 /* 1766 * Fudge the MTU by the encapsulation size. This 1767 * makes us incompatible with strictly compliant 1768 * 802.1Q implementations, but allows us to use 1769 * the feature with other NetBSD implementations, 1770 * which might still be useful. 1771 */ 1772 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1773 } 1774 1775 ifv->ifv_trunk = trunk; 1776 ifp = ifv->ifv_ifp; 1777 /* 1778 * Initialize fields from our parent. This duplicates some 1779 * work with ether_ifattach() but allows for non-ethernet 1780 * interfaces to also work. 1781 */ 1782 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1783 ifp->if_baudrate = p->if_baudrate; 1784 ifp->if_input = p->if_input; 1785 ifp->if_resolvemulti = p->if_resolvemulti; 1786 ifp->if_addrlen = p->if_addrlen; 1787 ifp->if_broadcastaddr = p->if_broadcastaddr; 1788 ifp->if_pcp = ifv->ifv_pcp; 1789 1790 /* 1791 * We wrap the parent's if_output using vlan_output to ensure that it 1792 * can't become stale. 1793 */ 1794 ifp->if_output = vlan_output; 1795 1796 /* 1797 * Copy only a selected subset of flags from the parent. 1798 * Other flags are none of our business. 1799 */ 1800 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1801 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1802 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1803 #undef VLAN_COPY_FLAGS 1804 1805 ifp->if_link_state = p->if_link_state; 1806 1807 NET_EPOCH_ENTER(et); 1808 vlan_capabilities(ifv); 1809 NET_EPOCH_EXIT(et); 1810 1811 /* 1812 * Set up our interface address to reflect the underlying 1813 * physical interface's. 1814 */ 1815 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1816 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1817 p->if_addrlen; 1818 1819 /* 1820 * Do not schedule link address update if it was the same 1821 * as previous parent's. This helps avoid updating for each 1822 * associated llentry. 1823 */ 1824 if (memcmp(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen) != 0) { 1825 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1826 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 1827 } 1828 1829 /* We are ready for operation now. */ 1830 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1831 1832 /* Update flags on the parent, if necessary. */ 1833 vlan_setflags(ifp, 1); 1834 1835 /* 1836 * Configure multicast addresses that may already be 1837 * joined on the vlan device. 1838 */ 1839 (void)vlan_setmulti(ifp); 1840 1841 done: 1842 if (error == 0) 1843 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1844 VLAN_XUNLOCK(); 1845 1846 return (error); 1847 } 1848 1849 static void 1850 vlan_unconfig(struct ifnet *ifp) 1851 { 1852 1853 VLAN_XLOCK(); 1854 vlan_unconfig_locked(ifp, 0); 1855 VLAN_XUNLOCK(); 1856 } 1857 1858 static void 1859 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1860 { 1861 struct ifvlantrunk *trunk; 1862 struct vlan_mc_entry *mc; 1863 struct ifvlan *ifv; 1864 struct ifnet *parent; 1865 int error; 1866 1867 VLAN_XLOCK_ASSERT(); 1868 1869 ifv = ifp->if_softc; 1870 trunk = ifv->ifv_trunk; 1871 parent = NULL; 1872 1873 if (trunk != NULL) { 1874 parent = trunk->parent; 1875 1876 /* 1877 * Since the interface is being unconfigured, we need to 1878 * empty the list of multicast groups that we may have joined 1879 * while we were alive from the parent's list. 1880 */ 1881 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1882 /* 1883 * If the parent interface is being detached, 1884 * all its multicast addresses have already 1885 * been removed. Warn about errors if 1886 * if_delmulti() does fail, but don't abort as 1887 * all callers expect vlan destruction to 1888 * succeed. 1889 */ 1890 if (!departing) { 1891 error = if_delmulti(parent, 1892 (struct sockaddr *)&mc->mc_addr); 1893 if (error) 1894 if_printf(ifp, 1895 "Failed to delete multicast address from parent: %d\n", 1896 error); 1897 } 1898 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1899 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 1900 } 1901 1902 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1903 1904 vlan_remhash(trunk, ifv); 1905 ifv->ifv_trunk = NULL; 1906 1907 /* 1908 * Check if we were the last. 1909 */ 1910 if (trunk->refcnt == 0) { 1911 parent->if_vlantrunk = NULL; 1912 NET_EPOCH_WAIT(); 1913 trunk_destroy(trunk); 1914 } 1915 } 1916 1917 /* Disconnect from parent. */ 1918 if (ifv->ifv_pflags) 1919 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1920 ifp->if_mtu = ETHERMTU; 1921 ifp->if_link_state = LINK_STATE_UNKNOWN; 1922 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1923 1924 /* 1925 * Only dispatch an event if vlan was 1926 * attached, otherwise there is nothing 1927 * to cleanup anyway. 1928 */ 1929 if (parent != NULL) 1930 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1931 } 1932 1933 /* Handle a reference counted flag that should be set on the parent as well */ 1934 static int 1935 vlan_setflag(struct ifnet *ifp, int flag, int status, 1936 int (*func)(struct ifnet *, int)) 1937 { 1938 struct ifvlan *ifv; 1939 int error; 1940 1941 VLAN_SXLOCK_ASSERT(); 1942 1943 ifv = ifp->if_softc; 1944 status = status ? (ifp->if_flags & flag) : 0; 1945 /* Now "status" contains the flag value or 0 */ 1946 1947 /* 1948 * See if recorded parent's status is different from what 1949 * we want it to be. If it is, flip it. We record parent's 1950 * status in ifv_pflags so that we won't clear parent's flag 1951 * we haven't set. In fact, we don't clear or set parent's 1952 * flags directly, but get or release references to them. 1953 * That's why we can be sure that recorded flags still are 1954 * in accord with actual parent's flags. 1955 */ 1956 if (status != (ifv->ifv_pflags & flag)) { 1957 error = (*func)(PARENT(ifv), status); 1958 if (error) 1959 return (error); 1960 ifv->ifv_pflags &= ~flag; 1961 ifv->ifv_pflags |= status; 1962 } 1963 return (0); 1964 } 1965 1966 /* 1967 * Handle IFF_* flags that require certain changes on the parent: 1968 * if "status" is true, update parent's flags respective to our if_flags; 1969 * if "status" is false, forcedly clear the flags set on parent. 1970 */ 1971 static int 1972 vlan_setflags(struct ifnet *ifp, int status) 1973 { 1974 int error, i; 1975 1976 for (i = 0; vlan_pflags[i].flag; i++) { 1977 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1978 status, vlan_pflags[i].func); 1979 if (error) 1980 return (error); 1981 } 1982 return (0); 1983 } 1984 1985 /* Inform all vlans that their parent has changed link state */ 1986 static void 1987 vlan_link_state(struct ifnet *ifp) 1988 { 1989 struct epoch_tracker et; 1990 struct ifvlantrunk *trunk; 1991 struct ifvlan *ifv; 1992 1993 NET_EPOCH_ENTER(et); 1994 trunk = ifp->if_vlantrunk; 1995 if (trunk == NULL) { 1996 NET_EPOCH_EXIT(et); 1997 return; 1998 } 1999 2000 TRUNK_WLOCK(trunk); 2001 VLAN_FOREACH(ifv, trunk) { 2002 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 2003 if_link_state_change(ifv->ifv_ifp, 2004 trunk->parent->if_link_state); 2005 } 2006 TRUNK_WUNLOCK(trunk); 2007 NET_EPOCH_EXIT(et); 2008 } 2009 2010 static void 2011 vlan_capabilities(struct ifvlan *ifv) 2012 { 2013 struct ifnet *p; 2014 struct ifnet *ifp; 2015 struct ifnet_hw_tsomax hw_tsomax; 2016 int cap = 0, ena = 0, mena; 2017 u_long hwa = 0; 2018 2019 NET_EPOCH_ASSERT(); 2020 VLAN_SXLOCK_ASSERT(); 2021 2022 p = PARENT(ifv); 2023 ifp = ifv->ifv_ifp; 2024 2025 /* Mask parent interface enabled capabilities disabled by user. */ 2026 mena = p->if_capenable & ifv->ifv_capenable; 2027 2028 /* 2029 * If the parent interface can do checksum offloading 2030 * on VLANs, then propagate its hardware-assisted 2031 * checksumming flags. Also assert that checksum 2032 * offloading requires hardware VLAN tagging. 2033 */ 2034 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 2035 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 2036 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 2037 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 2038 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 2039 if (ena & IFCAP_TXCSUM) 2040 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 2041 CSUM_UDP | CSUM_SCTP); 2042 if (ena & IFCAP_TXCSUM_IPV6) 2043 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 2044 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 2045 } 2046 2047 /* 2048 * If the parent interface can do TSO on VLANs then 2049 * propagate the hardware-assisted flag. TSO on VLANs 2050 * does not necessarily require hardware VLAN tagging. 2051 */ 2052 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 2053 if_hw_tsomax_common(p, &hw_tsomax); 2054 if_hw_tsomax_update(ifp, &hw_tsomax); 2055 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 2056 cap |= p->if_capabilities & IFCAP_TSO; 2057 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 2058 ena |= mena & IFCAP_TSO; 2059 if (ena & IFCAP_TSO) 2060 hwa |= p->if_hwassist & CSUM_TSO; 2061 } 2062 2063 /* 2064 * If the parent interface can do LRO and checksum offloading on 2065 * VLANs, then guess it may do LRO on VLANs. False positive here 2066 * cost nothing, while false negative may lead to some confusions. 2067 */ 2068 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 2069 cap |= p->if_capabilities & IFCAP_LRO; 2070 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 2071 ena |= mena & IFCAP_LRO; 2072 2073 /* 2074 * If the parent interface can offload TCP connections over VLANs then 2075 * propagate its TOE capability to the VLAN interface. 2076 * 2077 * All TOE drivers in the tree today can deal with VLANs. If this 2078 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 2079 * with its own bit. 2080 */ 2081 #define IFCAP_VLAN_TOE IFCAP_TOE 2082 if (p->if_capabilities & IFCAP_VLAN_TOE) 2083 cap |= p->if_capabilities & IFCAP_TOE; 2084 if (p->if_capenable & IFCAP_VLAN_TOE) { 2085 SETTOEDEV(ifp, TOEDEV(p)); 2086 ena |= mena & IFCAP_TOE; 2087 } 2088 2089 /* 2090 * If the parent interface supports dynamic link state, so does the 2091 * VLAN interface. 2092 */ 2093 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 2094 ena |= (mena & IFCAP_LINKSTATE); 2095 2096 #ifdef RATELIMIT 2097 /* 2098 * If the parent interface supports ratelimiting, so does the 2099 * VLAN interface. 2100 */ 2101 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 2102 ena |= (mena & IFCAP_TXRTLMT); 2103 #endif 2104 2105 /* 2106 * If the parent interface supports unmapped mbufs, so does 2107 * the VLAN interface. Note that this should be fine even for 2108 * interfaces that don't support hardware tagging as headers 2109 * are prepended in normal mbufs to unmapped mbufs holding 2110 * payload data. 2111 */ 2112 cap |= (p->if_capabilities & IFCAP_MEXTPG); 2113 ena |= (mena & IFCAP_MEXTPG); 2114 2115 /* 2116 * If the parent interface can offload encryption and segmentation 2117 * of TLS records over TCP, propagate it's capability to the VLAN 2118 * interface. 2119 * 2120 * All TLS drivers in the tree today can deal with VLANs. If 2121 * this ever changes, then a new IFCAP_VLAN_TXTLS can be 2122 * defined. 2123 */ 2124 if (p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT)) 2125 cap |= p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT); 2126 if (p->if_capenable & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT)) 2127 ena |= mena & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT); 2128 2129 ifp->if_capabilities = cap; 2130 ifp->if_capenable = ena; 2131 ifp->if_hwassist = hwa; 2132 } 2133 2134 static void 2135 vlan_trunk_capabilities(struct ifnet *ifp) 2136 { 2137 struct epoch_tracker et; 2138 struct ifvlantrunk *trunk; 2139 struct ifvlan *ifv; 2140 2141 VLAN_SLOCK(); 2142 trunk = ifp->if_vlantrunk; 2143 if (trunk == NULL) { 2144 VLAN_SUNLOCK(); 2145 return; 2146 } 2147 NET_EPOCH_ENTER(et); 2148 VLAN_FOREACH(ifv, trunk) 2149 vlan_capabilities(ifv); 2150 NET_EPOCH_EXIT(et); 2151 VLAN_SUNLOCK(); 2152 } 2153 2154 static int 2155 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2156 { 2157 struct ifnet *p; 2158 struct ifreq *ifr; 2159 #ifdef INET 2160 struct ifaddr *ifa; 2161 #endif 2162 struct ifvlan *ifv; 2163 struct ifvlantrunk *trunk; 2164 struct vlanreq vlr; 2165 int error = 0, oldmtu; 2166 2167 ifr = (struct ifreq *)data; 2168 #ifdef INET 2169 ifa = (struct ifaddr *) data; 2170 #endif 2171 ifv = ifp->if_softc; 2172 2173 switch (cmd) { 2174 case SIOCSIFADDR: 2175 ifp->if_flags |= IFF_UP; 2176 #ifdef INET 2177 if (ifa->ifa_addr->sa_family == AF_INET) 2178 arp_ifinit(ifp, ifa); 2179 #endif 2180 break; 2181 case SIOCGIFADDR: 2182 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 2183 ifp->if_addrlen); 2184 break; 2185 case SIOCGIFMEDIA: 2186 VLAN_SLOCK(); 2187 if (TRUNK(ifv) != NULL) { 2188 p = PARENT(ifv); 2189 if_ref(p); 2190 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 2191 if_rele(p); 2192 /* Limit the result to the parent's current config. */ 2193 if (error == 0) { 2194 struct ifmediareq *ifmr; 2195 2196 ifmr = (struct ifmediareq *)data; 2197 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 2198 ifmr->ifm_count = 1; 2199 error = copyout(&ifmr->ifm_current, 2200 ifmr->ifm_ulist, 2201 sizeof(int)); 2202 } 2203 } 2204 } else { 2205 error = EINVAL; 2206 } 2207 VLAN_SUNLOCK(); 2208 break; 2209 2210 case SIOCSIFMEDIA: 2211 error = EINVAL; 2212 break; 2213 2214 case SIOCSIFMTU: 2215 /* 2216 * Set the interface MTU. 2217 */ 2218 VLAN_SLOCK(); 2219 trunk = TRUNK(ifv); 2220 if (trunk != NULL) { 2221 TRUNK_WLOCK(trunk); 2222 if (ifr->ifr_mtu > 2223 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 2224 ifr->ifr_mtu < 2225 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 2226 error = EINVAL; 2227 else 2228 ifp->if_mtu = ifr->ifr_mtu; 2229 TRUNK_WUNLOCK(trunk); 2230 } else 2231 error = EINVAL; 2232 VLAN_SUNLOCK(); 2233 break; 2234 2235 case SIOCSETVLAN: 2236 #ifdef VIMAGE 2237 /* 2238 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 2239 * interface to be delegated to a jail without allowing the 2240 * jail to change what underlying interface/VID it is 2241 * associated with. We are not entirely convinced that this 2242 * is the right way to accomplish that policy goal. 2243 */ 2244 if (ifp->if_vnet != ifp->if_home_vnet) { 2245 error = EPERM; 2246 break; 2247 } 2248 #endif 2249 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 2250 if (error) 2251 break; 2252 if (vlr.vlr_parent[0] == '\0') { 2253 vlan_unconfig(ifp); 2254 break; 2255 } 2256 p = ifunit_ref(vlr.vlr_parent); 2257 if (p == NULL) { 2258 error = ENOENT; 2259 break; 2260 } 2261 if (vlr.vlr_proto == 0) 2262 vlr.vlr_proto = ETHERTYPE_VLAN; 2263 oldmtu = ifp->if_mtu; 2264 error = vlan_config(ifv, p, vlr.vlr_tag, vlr.vlr_proto); 2265 if_rele(p); 2266 2267 /* 2268 * VLAN MTU may change during addition of the vlandev. 2269 * If it did, do network layer specific procedure. 2270 */ 2271 if (ifp->if_mtu != oldmtu) 2272 if_notifymtu(ifp); 2273 break; 2274 2275 case SIOCGETVLAN: 2276 #ifdef VIMAGE 2277 if (ifp->if_vnet != ifp->if_home_vnet) { 2278 error = EPERM; 2279 break; 2280 } 2281 #endif 2282 bzero(&vlr, sizeof(vlr)); 2283 VLAN_SLOCK(); 2284 if (TRUNK(ifv) != NULL) { 2285 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 2286 sizeof(vlr.vlr_parent)); 2287 vlr.vlr_tag = ifv->ifv_vid; 2288 vlr.vlr_proto = ifv->ifv_proto; 2289 } 2290 VLAN_SUNLOCK(); 2291 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 2292 break; 2293 2294 case SIOCSIFFLAGS: 2295 /* 2296 * We should propagate selected flags to the parent, 2297 * e.g., promiscuous mode. 2298 */ 2299 VLAN_SLOCK(); 2300 if (TRUNK(ifv) != NULL) 2301 error = vlan_setflags(ifp, 1); 2302 VLAN_SUNLOCK(); 2303 break; 2304 2305 case SIOCADDMULTI: 2306 case SIOCDELMULTI: 2307 /* 2308 * If we don't have a parent, just remember the membership for 2309 * when we do. 2310 * 2311 * XXX We need the rmlock here to avoid sleeping while 2312 * holding in6_multi_mtx. 2313 */ 2314 VLAN_XLOCK(); 2315 trunk = TRUNK(ifv); 2316 if (trunk != NULL) 2317 error = vlan_setmulti(ifp); 2318 VLAN_XUNLOCK(); 2319 2320 break; 2321 case SIOCGVLANPCP: 2322 #ifdef VIMAGE 2323 if (ifp->if_vnet != ifp->if_home_vnet) { 2324 error = EPERM; 2325 break; 2326 } 2327 #endif 2328 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 2329 break; 2330 2331 case SIOCSVLANPCP: 2332 #ifdef VIMAGE 2333 if (ifp->if_vnet != ifp->if_home_vnet) { 2334 error = EPERM; 2335 break; 2336 } 2337 #endif 2338 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 2339 if (error) 2340 break; 2341 if (ifr->ifr_vlan_pcp > VLAN_PCP_MAX) { 2342 error = EINVAL; 2343 break; 2344 } 2345 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 2346 ifp->if_pcp = ifv->ifv_pcp; 2347 /* broadcast event about PCP change */ 2348 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 2349 break; 2350 2351 case SIOCSIFCAP: 2352 VLAN_SLOCK(); 2353 ifv->ifv_capenable = ifr->ifr_reqcap; 2354 trunk = TRUNK(ifv); 2355 if (trunk != NULL) { 2356 struct epoch_tracker et; 2357 2358 NET_EPOCH_ENTER(et); 2359 vlan_capabilities(ifv); 2360 NET_EPOCH_EXIT(et); 2361 } 2362 VLAN_SUNLOCK(); 2363 break; 2364 2365 default: 2366 error = EINVAL; 2367 break; 2368 } 2369 2370 return (error); 2371 } 2372 2373 #if defined(KERN_TLS) || defined(RATELIMIT) 2374 static int 2375 vlan_snd_tag_alloc(struct ifnet *ifp, 2376 union if_snd_tag_alloc_params *params, 2377 struct m_snd_tag **ppmt) 2378 { 2379 struct epoch_tracker et; 2380 const struct if_snd_tag_sw *sw; 2381 struct vlan_snd_tag *vst; 2382 struct ifvlan *ifv; 2383 struct ifnet *parent; 2384 struct m_snd_tag *mst; 2385 int error; 2386 2387 NET_EPOCH_ENTER(et); 2388 ifv = ifp->if_softc; 2389 2390 switch (params->hdr.type) { 2391 #ifdef RATELIMIT 2392 case IF_SND_TAG_TYPE_UNLIMITED: 2393 sw = &vlan_snd_tag_ul_sw; 2394 break; 2395 case IF_SND_TAG_TYPE_RATE_LIMIT: 2396 sw = &vlan_snd_tag_rl_sw; 2397 break; 2398 #endif 2399 #ifdef KERN_TLS 2400 case IF_SND_TAG_TYPE_TLS: 2401 sw = &vlan_snd_tag_tls_sw; 2402 break; 2403 case IF_SND_TAG_TYPE_TLS_RX: 2404 sw = NULL; 2405 if (params->tls_rx.vlan_id != 0) 2406 goto failure; 2407 params->tls_rx.vlan_id = ifv->ifv_vid; 2408 break; 2409 #ifdef RATELIMIT 2410 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 2411 sw = &vlan_snd_tag_tls_rl_sw; 2412 break; 2413 #endif 2414 #endif 2415 default: 2416 goto failure; 2417 } 2418 2419 if (ifv->ifv_trunk != NULL) 2420 parent = PARENT(ifv); 2421 else 2422 parent = NULL; 2423 if (parent == NULL) 2424 goto failure; 2425 if_ref(parent); 2426 NET_EPOCH_EXIT(et); 2427 2428 if (sw != NULL) { 2429 vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT); 2430 if (vst == NULL) { 2431 if_rele(parent); 2432 return (ENOMEM); 2433 } 2434 } else 2435 vst = NULL; 2436 2437 error = m_snd_tag_alloc(parent, params, &mst); 2438 if_rele(parent); 2439 if (error) { 2440 free(vst, M_VLAN); 2441 return (error); 2442 } 2443 2444 if (sw != NULL) { 2445 m_snd_tag_init(&vst->com, ifp, sw); 2446 vst->tag = mst; 2447 2448 *ppmt = &vst->com; 2449 } else 2450 *ppmt = mst; 2451 2452 return (0); 2453 failure: 2454 NET_EPOCH_EXIT(et); 2455 return (EOPNOTSUPP); 2456 } 2457 2458 static struct m_snd_tag * 2459 vlan_next_snd_tag(struct m_snd_tag *mst) 2460 { 2461 struct vlan_snd_tag *vst; 2462 2463 vst = mst_to_vst(mst); 2464 return (vst->tag); 2465 } 2466 2467 static int 2468 vlan_snd_tag_modify(struct m_snd_tag *mst, 2469 union if_snd_tag_modify_params *params) 2470 { 2471 struct vlan_snd_tag *vst; 2472 2473 vst = mst_to_vst(mst); 2474 return (vst->tag->sw->snd_tag_modify(vst->tag, params)); 2475 } 2476 2477 static int 2478 vlan_snd_tag_query(struct m_snd_tag *mst, 2479 union if_snd_tag_query_params *params) 2480 { 2481 struct vlan_snd_tag *vst; 2482 2483 vst = mst_to_vst(mst); 2484 return (vst->tag->sw->snd_tag_query(vst->tag, params)); 2485 } 2486 2487 static void 2488 vlan_snd_tag_free(struct m_snd_tag *mst) 2489 { 2490 struct vlan_snd_tag *vst; 2491 2492 vst = mst_to_vst(mst); 2493 m_snd_tag_rele(vst->tag); 2494 free(vst, M_VLAN); 2495 } 2496 2497 static void 2498 vlan_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q) 2499 { 2500 /* 2501 * For vlan, we have an indirect 2502 * interface. The caller needs to 2503 * get a ratelimit tag on the actual 2504 * interface the flow will go on. 2505 */ 2506 q->rate_table = NULL; 2507 q->flags = RT_IS_INDIRECT; 2508 q->max_flows = 0; 2509 q->number_of_rates = 0; 2510 } 2511 2512 #endif 2513