1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include "opt_inet.h" 46 #include "opt_inet6.h" 47 #include "opt_ipsec.h" 48 #include "opt_kern_tls.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_private.h> 74 #include <net/if_clone.h> 75 #include <net/if_dl.h> 76 #include <net/if_types.h> 77 #include <net/if_vlan_var.h> 78 #include <net/route.h> 79 #include <net/vnet.h> 80 81 #ifdef INET 82 #include <netinet/in.h> 83 #include <netinet/if_ether.h> 84 #endif 85 86 #include <netlink/netlink.h> 87 #include <netlink/netlink_ctl.h> 88 #include <netlink/netlink_route.h> 89 #include <netlink/route/route_var.h> 90 91 #define VLAN_DEF_HWIDTH 4 92 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 93 94 #define UP_AND_RUNNING(ifp) \ 95 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 96 97 CK_SLIST_HEAD(ifvlanhead, ifvlan); 98 99 struct ifvlantrunk { 100 struct ifnet *parent; /* parent interface of this trunk */ 101 struct mtx lock; 102 #ifdef VLAN_ARRAY 103 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 104 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 105 #else 106 struct ifvlanhead *hash; /* dynamic hash-list table */ 107 uint16_t hmask; 108 uint16_t hwidth; 109 #endif 110 int refcnt; 111 }; 112 113 #if defined(KERN_TLS) || defined(RATELIMIT) 114 struct vlan_snd_tag { 115 struct m_snd_tag com; 116 struct m_snd_tag *tag; 117 }; 118 119 static inline struct vlan_snd_tag * 120 mst_to_vst(struct m_snd_tag *mst) 121 { 122 123 return (__containerof(mst, struct vlan_snd_tag, com)); 124 } 125 #endif 126 127 /* 128 * This macro provides a facility to iterate over every vlan on a trunk with 129 * the assumption that none will be added/removed during iteration. 130 */ 131 #ifdef VLAN_ARRAY 132 #define VLAN_FOREACH(_ifv, _trunk) \ 133 size_t _i; \ 134 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 135 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 136 #else /* VLAN_ARRAY */ 137 #define VLAN_FOREACH(_ifv, _trunk) \ 138 struct ifvlan *_next; \ 139 size_t _i; \ 140 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 141 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 142 #endif /* VLAN_ARRAY */ 143 144 /* 145 * This macro provides a facility to iterate over every vlan on a trunk while 146 * also modifying the number of vlans on the trunk. The iteration continues 147 * until some condition is met or there are no more vlans on the trunk. 148 */ 149 #ifdef VLAN_ARRAY 150 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 151 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 152 size_t _i; \ 153 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 154 if (((_ifv) = (_trunk)->vlans[_i])) 155 #else /* VLAN_ARRAY */ 156 /* 157 * The hash table case is more complicated. We allow for the hash table to be 158 * modified (i.e. vlans removed) while we are iterating over it. To allow for 159 * this we must restart the iteration every time we "touch" something during 160 * the iteration, since removal will resize the hash table and invalidate our 161 * current position. If acting on the touched element causes the trunk to be 162 * emptied, then iteration also stops. 163 */ 164 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 165 size_t _i; \ 166 bool _touch = false; \ 167 for (_i = 0; \ 168 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 169 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 170 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 171 (_touch = true)) 172 #endif /* VLAN_ARRAY */ 173 174 struct vlan_mc_entry { 175 struct sockaddr_dl mc_addr; 176 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 177 struct epoch_context mc_epoch_ctx; 178 }; 179 180 struct ifvlan { 181 struct ifvlantrunk *ifv_trunk; 182 struct ifnet *ifv_ifp; 183 #define TRUNK(ifv) ((ifv)->ifv_trunk) 184 #define PARENT(ifv) (TRUNK(ifv)->parent) 185 void *ifv_cookie; 186 int ifv_pflags; /* special flags we have set on parent */ 187 int ifv_capenable; 188 int ifv_capenable2; 189 int ifv_encaplen; /* encapsulation length */ 190 int ifv_mtufudge; /* MTU fudged by this much */ 191 int ifv_mintu; /* min transmission unit */ 192 struct ether_8021q_tag ifv_qtag; 193 #define ifv_proto ifv_qtag.proto 194 #define ifv_vid ifv_qtag.vid 195 #define ifv_pcp ifv_qtag.pcp 196 struct task lladdr_task; 197 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 198 #ifndef VLAN_ARRAY 199 CK_SLIST_ENTRY(ifvlan) ifv_list; 200 #endif 201 }; 202 203 /* Special flags we should propagate to parent. */ 204 static struct { 205 int flag; 206 int (*func)(struct ifnet *, int); 207 } vlan_pflags[] = { 208 {IFF_PROMISC, ifpromisc}, 209 {IFF_ALLMULTI, if_allmulti}, 210 {0, NULL} 211 }; 212 213 VNET_DECLARE(int, vlan_mtag_pcp); 214 #define V_vlan_mtag_pcp VNET(vlan_mtag_pcp) 215 216 static const char vlanname[] = "vlan"; 217 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 218 219 static eventhandler_tag ifdetach_tag; 220 static eventhandler_tag iflladdr_tag; 221 static eventhandler_tag ifevent_tag; 222 223 /* 224 * if_vlan uses two module-level synchronizations primitives to allow concurrent 225 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 226 * while they are being used for tx/rx. To accomplish this in a way that has 227 * acceptable performance and cooperation with other parts of the network stack 228 * there is a non-sleepable epoch(9) and an sx(9). 229 * 230 * The performance-sensitive paths that warrant using the epoch(9) are 231 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 232 * existence using if_vlantrunk, and being in the network tx/rx paths the use 233 * of an epoch(9) gives a measureable improvement in performance. 234 * 235 * The reason for having an sx(9) is mostly because there are still areas that 236 * must be sleepable and also have safe concurrent access to a vlan interface. 237 * Since the sx(9) exists, it is used by default in most paths unless sleeping 238 * is not permitted, or if it is not clear whether sleeping is permitted. 239 * 240 */ 241 #define _VLAN_SX_ID ifv_sx 242 243 static struct sx _VLAN_SX_ID; 244 245 #define VLAN_LOCKING_INIT() \ 246 sx_init_flags(&_VLAN_SX_ID, "vlan_sx", SX_RECURSE) 247 248 #define VLAN_LOCKING_DESTROY() \ 249 sx_destroy(&_VLAN_SX_ID) 250 251 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 252 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 253 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 254 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 255 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 256 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 257 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 258 259 /* 260 * We also have a per-trunk mutex that should be acquired when changing 261 * its state. 262 */ 263 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 264 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 265 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 266 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 267 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 268 269 /* 270 * The VLAN_ARRAY substitutes the dynamic hash with a static array 271 * with 4096 entries. In theory this can give a boost in processing, 272 * however in practice it does not. Probably this is because the array 273 * is too big to fit into CPU cache. 274 */ 275 #ifndef VLAN_ARRAY 276 static void vlan_inithash(struct ifvlantrunk *trunk); 277 static void vlan_freehash(struct ifvlantrunk *trunk); 278 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 279 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 280 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 281 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 282 uint16_t vid); 283 #endif 284 static void trunk_destroy(struct ifvlantrunk *trunk); 285 286 static void vlan_init(void *foo); 287 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 288 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 289 #if defined(KERN_TLS) || defined(RATELIMIT) 290 static int vlan_snd_tag_alloc(struct ifnet *, 291 union if_snd_tag_alloc_params *, struct m_snd_tag **); 292 static int vlan_snd_tag_modify(struct m_snd_tag *, 293 union if_snd_tag_modify_params *); 294 static int vlan_snd_tag_query(struct m_snd_tag *, 295 union if_snd_tag_query_params *); 296 static void vlan_snd_tag_free(struct m_snd_tag *); 297 static struct m_snd_tag *vlan_next_snd_tag(struct m_snd_tag *); 298 static void vlan_ratelimit_query(struct ifnet *, 299 struct if_ratelimit_query_results *); 300 #endif 301 static void vlan_qflush(struct ifnet *ifp); 302 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 303 int (*func)(struct ifnet *, int)); 304 static int vlan_setflags(struct ifnet *ifp, int status); 305 static int vlan_setmulti(struct ifnet *ifp); 306 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 307 #ifdef ALTQ 308 static void vlan_altq_start(struct ifnet *ifp); 309 static int vlan_altq_transmit(struct ifnet *ifp, struct mbuf *m); 310 #endif 311 static int vlan_output(struct ifnet *ifp, struct mbuf *m, 312 const struct sockaddr *dst, struct route *ro); 313 static void vlan_unconfig(struct ifnet *ifp); 314 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 315 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag, 316 uint16_t proto); 317 static void vlan_link_state(struct ifnet *ifp); 318 static void vlan_capabilities(struct ifvlan *ifv); 319 static void vlan_trunk_capabilities(struct ifnet *ifp); 320 321 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 322 static int vlan_clone_match(struct if_clone *, const char *); 323 static int vlan_clone_create(struct if_clone *, char *, size_t, 324 struct ifc_data *, struct ifnet **); 325 static int vlan_clone_destroy(struct if_clone *, struct ifnet *, uint32_t); 326 327 static int vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len, 328 struct ifc_data_nl *ifd); 329 static int vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd); 330 static void vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw); 331 332 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 333 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 334 static void vlan_ifevent(void *arg, struct ifnet *ifp, int event); 335 336 static void vlan_lladdr_fn(void *arg, int pending); 337 338 static struct if_clone *vlan_cloner; 339 340 #ifdef VIMAGE 341 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 342 #define V_vlan_cloner VNET(vlan_cloner) 343 #endif 344 345 #ifdef RATELIMIT 346 static const struct if_snd_tag_sw vlan_snd_tag_ul_sw = { 347 .snd_tag_modify = vlan_snd_tag_modify, 348 .snd_tag_query = vlan_snd_tag_query, 349 .snd_tag_free = vlan_snd_tag_free, 350 .next_snd_tag = vlan_next_snd_tag, 351 .type = IF_SND_TAG_TYPE_UNLIMITED 352 }; 353 354 static const struct if_snd_tag_sw vlan_snd_tag_rl_sw = { 355 .snd_tag_modify = vlan_snd_tag_modify, 356 .snd_tag_query = vlan_snd_tag_query, 357 .snd_tag_free = vlan_snd_tag_free, 358 .next_snd_tag = vlan_next_snd_tag, 359 .type = IF_SND_TAG_TYPE_RATE_LIMIT 360 }; 361 #endif 362 363 #ifdef KERN_TLS 364 static const struct if_snd_tag_sw vlan_snd_tag_tls_sw = { 365 .snd_tag_modify = vlan_snd_tag_modify, 366 .snd_tag_query = vlan_snd_tag_query, 367 .snd_tag_free = vlan_snd_tag_free, 368 .next_snd_tag = vlan_next_snd_tag, 369 .type = IF_SND_TAG_TYPE_TLS 370 }; 371 372 #ifdef RATELIMIT 373 static const struct if_snd_tag_sw vlan_snd_tag_tls_rl_sw = { 374 .snd_tag_modify = vlan_snd_tag_modify, 375 .snd_tag_query = vlan_snd_tag_query, 376 .snd_tag_free = vlan_snd_tag_free, 377 .next_snd_tag = vlan_next_snd_tag, 378 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT 379 }; 380 #endif 381 #endif 382 383 static void 384 vlan_mc_free(struct epoch_context *ctx) 385 { 386 struct vlan_mc_entry *mc = __containerof(ctx, struct vlan_mc_entry, mc_epoch_ctx); 387 free(mc, M_VLAN); 388 } 389 390 #ifndef VLAN_ARRAY 391 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 392 393 static void 394 vlan_inithash(struct ifvlantrunk *trunk) 395 { 396 int i, n; 397 398 /* 399 * The trunk must not be locked here since we call malloc(M_WAITOK). 400 * It is OK in case this function is called before the trunk struct 401 * gets hooked up and becomes visible from other threads. 402 */ 403 404 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 405 ("%s: hash already initialized", __func__)); 406 407 trunk->hwidth = VLAN_DEF_HWIDTH; 408 n = 1 << trunk->hwidth; 409 trunk->hmask = n - 1; 410 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 411 for (i = 0; i < n; i++) 412 CK_SLIST_INIT(&trunk->hash[i]); 413 } 414 415 static void 416 vlan_freehash(struct ifvlantrunk *trunk) 417 { 418 #ifdef INVARIANTS 419 int i; 420 421 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 422 for (i = 0; i < (1 << trunk->hwidth); i++) 423 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 424 ("%s: hash table not empty", __func__)); 425 #endif 426 free(trunk->hash, M_VLAN); 427 trunk->hash = NULL; 428 trunk->hwidth = trunk->hmask = 0; 429 } 430 431 static int 432 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 433 { 434 int i, b; 435 struct ifvlan *ifv2; 436 437 VLAN_XLOCK_ASSERT(); 438 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 439 440 b = 1 << trunk->hwidth; 441 i = HASH(ifv->ifv_vid, trunk->hmask); 442 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 443 if (ifv->ifv_vid == ifv2->ifv_vid) 444 return (EEXIST); 445 446 /* 447 * Grow the hash when the number of vlans exceeds half of the number of 448 * hash buckets squared. This will make the average linked-list length 449 * buckets/2. 450 */ 451 if (trunk->refcnt > (b * b) / 2) { 452 vlan_growhash(trunk, 1); 453 i = HASH(ifv->ifv_vid, trunk->hmask); 454 } 455 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 456 trunk->refcnt++; 457 458 return (0); 459 } 460 461 static int 462 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 463 { 464 int i, b; 465 struct ifvlan *ifv2; 466 467 VLAN_XLOCK_ASSERT(); 468 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 469 470 b = 1 << (trunk->hwidth - 1); 471 i = HASH(ifv->ifv_vid, trunk->hmask); 472 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 473 if (ifv2 == ifv) { 474 trunk->refcnt--; 475 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 476 if (trunk->refcnt < (b * b) / 2) 477 vlan_growhash(trunk, -1); 478 return (0); 479 } 480 481 panic("%s: vlan not found\n", __func__); 482 return (ENOENT); /*NOTREACHED*/ 483 } 484 485 /* 486 * Grow the hash larger or smaller if memory permits. 487 */ 488 static void 489 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 490 { 491 struct ifvlan *ifv; 492 struct ifvlanhead *hash2; 493 int hwidth2, i, j, n, n2; 494 495 VLAN_XLOCK_ASSERT(); 496 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 497 498 if (howmuch == 0) { 499 /* Harmless yet obvious coding error */ 500 printf("%s: howmuch is 0\n", __func__); 501 return; 502 } 503 504 hwidth2 = trunk->hwidth + howmuch; 505 n = 1 << trunk->hwidth; 506 n2 = 1 << hwidth2; 507 /* Do not shrink the table below the default */ 508 if (hwidth2 < VLAN_DEF_HWIDTH) 509 return; 510 511 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 512 for (j = 0; j < n2; j++) 513 CK_SLIST_INIT(&hash2[j]); 514 for (i = 0; i < n; i++) 515 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 516 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 517 j = HASH(ifv->ifv_vid, n2 - 1); 518 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 519 } 520 NET_EPOCH_WAIT(); 521 free(trunk->hash, M_VLAN); 522 trunk->hash = hash2; 523 trunk->hwidth = hwidth2; 524 trunk->hmask = n2 - 1; 525 526 if (bootverbose) 527 if_printf(trunk->parent, 528 "VLAN hash table resized from %d to %d buckets\n", n, n2); 529 } 530 531 static __inline struct ifvlan * 532 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 533 { 534 struct ifvlan *ifv; 535 536 NET_EPOCH_ASSERT(); 537 538 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 539 if (ifv->ifv_vid == vid) 540 return (ifv); 541 return (NULL); 542 } 543 544 #if 0 545 /* Debugging code to view the hashtables. */ 546 static void 547 vlan_dumphash(struct ifvlantrunk *trunk) 548 { 549 int i; 550 struct ifvlan *ifv; 551 552 for (i = 0; i < (1 << trunk->hwidth); i++) { 553 printf("%d: ", i); 554 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 555 printf("%s ", ifv->ifv_ifp->if_xname); 556 printf("\n"); 557 } 558 } 559 #endif /* 0 */ 560 #else 561 562 static __inline struct ifvlan * 563 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 564 { 565 566 return trunk->vlans[vid]; 567 } 568 569 static __inline int 570 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 571 { 572 573 if (trunk->vlans[ifv->ifv_vid] != NULL) 574 return EEXIST; 575 trunk->vlans[ifv->ifv_vid] = ifv; 576 trunk->refcnt++; 577 578 return (0); 579 } 580 581 static __inline int 582 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 583 { 584 585 trunk->vlans[ifv->ifv_vid] = NULL; 586 trunk->refcnt--; 587 588 return (0); 589 } 590 591 static __inline void 592 vlan_freehash(struct ifvlantrunk *trunk) 593 { 594 } 595 596 static __inline void 597 vlan_inithash(struct ifvlantrunk *trunk) 598 { 599 } 600 601 #endif /* !VLAN_ARRAY */ 602 603 static void 604 trunk_destroy(struct ifvlantrunk *trunk) 605 { 606 VLAN_XLOCK_ASSERT(); 607 608 vlan_freehash(trunk); 609 trunk->parent->if_vlantrunk = NULL; 610 TRUNK_LOCK_DESTROY(trunk); 611 if_rele(trunk->parent); 612 free(trunk, M_VLAN); 613 } 614 615 /* 616 * Program our multicast filter. What we're actually doing is 617 * programming the multicast filter of the parent. This has the 618 * side effect of causing the parent interface to receive multicast 619 * traffic that it doesn't really want, which ends up being discarded 620 * later by the upper protocol layers. Unfortunately, there's no way 621 * to avoid this: there really is only one physical interface. 622 */ 623 static int 624 vlan_setmulti(struct ifnet *ifp) 625 { 626 struct ifnet *ifp_p; 627 struct ifmultiaddr *ifma; 628 struct ifvlan *sc; 629 struct vlan_mc_entry *mc; 630 int error; 631 632 VLAN_XLOCK_ASSERT(); 633 634 /* Find the parent. */ 635 sc = ifp->if_softc; 636 ifp_p = PARENT(sc); 637 638 CURVNET_SET_QUIET(ifp_p->if_vnet); 639 640 /* First, remove any existing filter entries. */ 641 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 642 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 643 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 644 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 645 } 646 647 /* Now program new ones. */ 648 IF_ADDR_WLOCK(ifp); 649 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 650 if (ifma->ifma_addr->sa_family != AF_LINK) 651 continue; 652 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 653 if (mc == NULL) { 654 IF_ADDR_WUNLOCK(ifp); 655 CURVNET_RESTORE(); 656 return (ENOMEM); 657 } 658 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 659 mc->mc_addr.sdl_index = ifp_p->if_index; 660 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 661 } 662 IF_ADDR_WUNLOCK(ifp); 663 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 664 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 665 NULL); 666 if (error) { 667 CURVNET_RESTORE(); 668 return (error); 669 } 670 } 671 672 CURVNET_RESTORE(); 673 return (0); 674 } 675 676 /* 677 * A handler for interface ifnet events. 678 */ 679 static void 680 vlan_ifevent(void *arg __unused, struct ifnet *ifp, int event) 681 { 682 struct epoch_tracker et; 683 struct ifvlan *ifv; 684 struct ifvlantrunk *trunk; 685 686 if (event != IFNET_EVENT_UPDATE_BAUDRATE) 687 return; 688 689 NET_EPOCH_ENTER(et); 690 trunk = ifp->if_vlantrunk; 691 if (trunk == NULL) { 692 NET_EPOCH_EXIT(et); 693 return; 694 } 695 696 TRUNK_WLOCK(trunk); 697 VLAN_FOREACH(ifv, trunk) { 698 ifv->ifv_ifp->if_baudrate = ifp->if_baudrate; 699 } 700 TRUNK_WUNLOCK(trunk); 701 NET_EPOCH_EXIT(et); 702 } 703 704 /* 705 * A handler for parent interface link layer address changes. 706 * If the parent interface link layer address is changed we 707 * should also change it on all children vlans. 708 */ 709 static void 710 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 711 { 712 struct epoch_tracker et; 713 struct ifvlan *ifv; 714 struct ifnet *ifv_ifp; 715 struct ifvlantrunk *trunk; 716 struct sockaddr_dl *sdl; 717 718 /* Need the epoch since this is run on taskqueue_swi. */ 719 NET_EPOCH_ENTER(et); 720 trunk = ifp->if_vlantrunk; 721 if (trunk == NULL) { 722 NET_EPOCH_EXIT(et); 723 return; 724 } 725 726 /* 727 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 728 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 729 * ioctl calls on the parent garbling the lladdr of the child vlan. 730 */ 731 TRUNK_WLOCK(trunk); 732 VLAN_FOREACH(ifv, trunk) { 733 /* 734 * Copy new new lladdr into the ifv_ifp, enqueue a task 735 * to actually call if_setlladdr. if_setlladdr needs to 736 * be deferred to a taskqueue because it will call into 737 * the if_vlan ioctl path and try to acquire the global 738 * lock. 739 */ 740 ifv_ifp = ifv->ifv_ifp; 741 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 742 ifp->if_addrlen); 743 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 744 sdl->sdl_alen = ifp->if_addrlen; 745 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 746 } 747 TRUNK_WUNLOCK(trunk); 748 NET_EPOCH_EXIT(et); 749 } 750 751 /* 752 * A handler for network interface departure events. 753 * Track departure of trunks here so that we don't access invalid 754 * pointers or whatever if a trunk is ripped from under us, e.g., 755 * by ejecting its hot-plug card. However, if an ifnet is simply 756 * being renamed, then there's no need to tear down the state. 757 */ 758 static void 759 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 760 { 761 struct ifvlan *ifv; 762 struct ifvlantrunk *trunk; 763 764 /* If the ifnet is just being renamed, don't do anything. */ 765 if (ifp->if_flags & IFF_RENAMING) 766 return; 767 VLAN_XLOCK(); 768 trunk = ifp->if_vlantrunk; 769 if (trunk == NULL) { 770 VLAN_XUNLOCK(); 771 return; 772 } 773 774 /* 775 * OK, it's a trunk. Loop over and detach all vlan's on it. 776 * Check trunk pointer after each vlan_unconfig() as it will 777 * free it and set to NULL after the last vlan was detached. 778 */ 779 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 780 ifp->if_vlantrunk == NULL) 781 vlan_unconfig_locked(ifv->ifv_ifp, 1); 782 783 /* Trunk should have been destroyed in vlan_unconfig(). */ 784 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 785 VLAN_XUNLOCK(); 786 } 787 788 /* 789 * Return the trunk device for a virtual interface. 790 */ 791 static struct ifnet * 792 vlan_trunkdev(struct ifnet *ifp) 793 { 794 struct ifvlan *ifv; 795 796 NET_EPOCH_ASSERT(); 797 798 if (ifp->if_type != IFT_L2VLAN) 799 return (NULL); 800 801 ifv = ifp->if_softc; 802 ifp = NULL; 803 if (ifv->ifv_trunk) 804 ifp = PARENT(ifv); 805 return (ifp); 806 } 807 808 /* 809 * Return the 12-bit VLAN VID for this interface, for use by external 810 * components such as Infiniband. 811 * 812 * XXXRW: Note that the function name here is historical; it should be named 813 * vlan_vid(). 814 */ 815 static int 816 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 817 { 818 struct ifvlan *ifv; 819 820 if (ifp->if_type != IFT_L2VLAN) 821 return (EINVAL); 822 ifv = ifp->if_softc; 823 *vidp = ifv->ifv_vid; 824 return (0); 825 } 826 827 static int 828 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 829 { 830 struct ifvlan *ifv; 831 832 if (ifp->if_type != IFT_L2VLAN) 833 return (EINVAL); 834 ifv = ifp->if_softc; 835 *pcpp = ifv->ifv_pcp; 836 return (0); 837 } 838 839 /* 840 * Return a driver specific cookie for this interface. Synchronization 841 * with setcookie must be provided by the driver. 842 */ 843 static void * 844 vlan_cookie(struct ifnet *ifp) 845 { 846 struct ifvlan *ifv; 847 848 if (ifp->if_type != IFT_L2VLAN) 849 return (NULL); 850 ifv = ifp->if_softc; 851 return (ifv->ifv_cookie); 852 } 853 854 /* 855 * Store a cookie in our softc that drivers can use to store driver 856 * private per-instance data in. 857 */ 858 static int 859 vlan_setcookie(struct ifnet *ifp, void *cookie) 860 { 861 struct ifvlan *ifv; 862 863 if (ifp->if_type != IFT_L2VLAN) 864 return (EINVAL); 865 ifv = ifp->if_softc; 866 ifv->ifv_cookie = cookie; 867 return (0); 868 } 869 870 /* 871 * Return the vlan device present at the specific VID. 872 */ 873 static struct ifnet * 874 vlan_devat(struct ifnet *ifp, uint16_t vid) 875 { 876 struct ifvlantrunk *trunk; 877 struct ifvlan *ifv; 878 879 NET_EPOCH_ASSERT(); 880 881 trunk = ifp->if_vlantrunk; 882 if (trunk == NULL) 883 return (NULL); 884 ifp = NULL; 885 ifv = vlan_gethash(trunk, vid); 886 if (ifv) 887 ifp = ifv->ifv_ifp; 888 return (ifp); 889 } 890 891 /* For if_link_state_change() eyes only... */ 892 extern void (*vlan_link_state_p)(struct ifnet *); 893 894 static struct if_clone_addreq_v2 vlan_addreq = { 895 .version = 2, 896 .match_f = vlan_clone_match, 897 .create_f = vlan_clone_create, 898 .destroy_f = vlan_clone_destroy, 899 .create_nl_f = vlan_clone_create_nl, 900 .modify_nl_f = vlan_clone_modify_nl, 901 .dump_nl_f = vlan_clone_dump_nl, 902 }; 903 904 static int 905 vlan_modevent(module_t mod, int type, void *data) 906 { 907 908 switch (type) { 909 case MOD_LOAD: 910 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 911 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 912 if (ifdetach_tag == NULL) 913 return (ENOMEM); 914 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 915 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 916 if (iflladdr_tag == NULL) 917 return (ENOMEM); 918 ifevent_tag = EVENTHANDLER_REGISTER(ifnet_event, 919 vlan_ifevent, NULL, EVENTHANDLER_PRI_ANY); 920 if (ifevent_tag == NULL) 921 return (ENOMEM); 922 VLAN_LOCKING_INIT(); 923 vlan_input_p = vlan_input; 924 vlan_link_state_p = vlan_link_state; 925 vlan_trunk_cap_p = vlan_trunk_capabilities; 926 vlan_trunkdev_p = vlan_trunkdev; 927 vlan_cookie_p = vlan_cookie; 928 vlan_setcookie_p = vlan_setcookie; 929 vlan_tag_p = vlan_tag; 930 vlan_pcp_p = vlan_pcp; 931 vlan_devat_p = vlan_devat; 932 #ifndef VIMAGE 933 vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq); 934 #endif 935 if (bootverbose) 936 printf("vlan: initialized, using " 937 #ifdef VLAN_ARRAY 938 "full-size arrays" 939 #else 940 "hash tables with chaining" 941 #endif 942 943 "\n"); 944 break; 945 case MOD_UNLOAD: 946 #ifndef VIMAGE 947 ifc_detach_cloner(vlan_cloner); 948 #endif 949 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 950 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 951 EVENTHANDLER_DEREGISTER(ifnet_event, ifevent_tag); 952 vlan_input_p = NULL; 953 vlan_link_state_p = NULL; 954 vlan_trunk_cap_p = NULL; 955 vlan_trunkdev_p = NULL; 956 vlan_tag_p = NULL; 957 vlan_cookie_p = NULL; 958 vlan_setcookie_p = NULL; 959 vlan_devat_p = NULL; 960 VLAN_LOCKING_DESTROY(); 961 if (bootverbose) 962 printf("vlan: unloaded\n"); 963 break; 964 default: 965 return (EOPNOTSUPP); 966 } 967 return (0); 968 } 969 970 static moduledata_t vlan_mod = { 971 "if_vlan", 972 vlan_modevent, 973 0 974 }; 975 976 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 977 MODULE_VERSION(if_vlan, 3); 978 979 #ifdef VIMAGE 980 static void 981 vnet_vlan_init(const void *unused __unused) 982 { 983 vlan_cloner = ifc_attach_cloner(vlanname, (struct if_clone_addreq *)&vlan_addreq); 984 V_vlan_cloner = vlan_cloner; 985 } 986 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 987 vnet_vlan_init, NULL); 988 989 static void 990 vnet_vlan_uninit(const void *unused __unused) 991 { 992 993 ifc_detach_cloner(V_vlan_cloner); 994 } 995 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY, 996 vnet_vlan_uninit, NULL); 997 #endif 998 999 /* 1000 * Check for <etherif>.<vlan>[.<vlan> ...] style interface names. 1001 */ 1002 static struct ifnet * 1003 vlan_clone_match_ethervid(const char *name, int *vidp) 1004 { 1005 char ifname[IFNAMSIZ]; 1006 char *cp; 1007 struct ifnet *ifp; 1008 int vid; 1009 1010 strlcpy(ifname, name, IFNAMSIZ); 1011 if ((cp = strrchr(ifname, '.')) == NULL) 1012 return (NULL); 1013 *cp = '\0'; 1014 if ((ifp = ifunit_ref(ifname)) == NULL) 1015 return (NULL); 1016 /* Parse VID. */ 1017 if (*++cp == '\0') { 1018 if_rele(ifp); 1019 return (NULL); 1020 } 1021 vid = 0; 1022 for(; *cp >= '0' && *cp <= '9'; cp++) 1023 vid = (vid * 10) + (*cp - '0'); 1024 if (*cp != '\0') { 1025 if_rele(ifp); 1026 return (NULL); 1027 } 1028 if (vidp != NULL) 1029 *vidp = vid; 1030 1031 return (ifp); 1032 } 1033 1034 static int 1035 vlan_clone_match(struct if_clone *ifc, const char *name) 1036 { 1037 struct ifnet *ifp; 1038 const char *cp; 1039 1040 ifp = vlan_clone_match_ethervid(name, NULL); 1041 if (ifp != NULL) { 1042 if_rele(ifp); 1043 return (1); 1044 } 1045 1046 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 1047 return (0); 1048 for (cp = name + 4; *cp != '\0'; cp++) { 1049 if (*cp < '0' || *cp > '9') 1050 return (0); 1051 } 1052 1053 return (1); 1054 } 1055 1056 static int 1057 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, 1058 struct ifc_data *ifd, struct ifnet **ifpp) 1059 { 1060 char *dp; 1061 bool wildcard = false; 1062 bool subinterface = false; 1063 int unit; 1064 int error; 1065 int vid = 0; 1066 uint16_t proto = ETHERTYPE_VLAN; 1067 struct ifvlan *ifv; 1068 struct ifnet *ifp; 1069 struct ifnet *p = NULL; 1070 struct ifaddr *ifa; 1071 struct sockaddr_dl *sdl; 1072 struct vlanreq vlr; 1073 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 1074 1075 1076 /* 1077 * There are three ways to specify the cloned device: 1078 * o pass a parameter block with the clone request. 1079 * o specify parameters in the text of the clone device name 1080 * o specify no parameters and get an unattached device that 1081 * must be configured separately. 1082 * The first technique is preferred; the latter two are supported 1083 * for backwards compatibility. 1084 * 1085 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 1086 * called for. 1087 */ 1088 1089 if (ifd->params != NULL) { 1090 error = ifc_copyin(ifd, &vlr, sizeof(vlr)); 1091 if (error) 1092 return error; 1093 vid = vlr.vlr_tag; 1094 proto = vlr.vlr_proto; 1095 if (proto == 0) 1096 proto = ETHERTYPE_VLAN; 1097 p = ifunit_ref(vlr.vlr_parent); 1098 if (p == NULL) 1099 return (ENXIO); 1100 } 1101 1102 if ((error = ifc_name2unit(name, &unit)) == 0) { 1103 1104 /* 1105 * vlanX interface. Set wildcard to true if the unit number 1106 * is not fixed (-1) 1107 */ 1108 wildcard = (unit < 0); 1109 } else { 1110 struct ifnet *p_tmp = vlan_clone_match_ethervid(name, &vid); 1111 if (p_tmp != NULL) { 1112 error = 0; 1113 subinterface = true; 1114 unit = IF_DUNIT_NONE; 1115 wildcard = false; 1116 if (p != NULL) { 1117 if_rele(p_tmp); 1118 if (p != p_tmp) 1119 error = EINVAL; 1120 } else 1121 p = p_tmp; 1122 } else 1123 error = ENXIO; 1124 } 1125 1126 if (error != 0) { 1127 if (p != NULL) 1128 if_rele(p); 1129 return (error); 1130 } 1131 1132 if (!subinterface) { 1133 /* vlanX interface, mark X as busy or allocate new unit # */ 1134 error = ifc_alloc_unit(ifc, &unit); 1135 if (error != 0) { 1136 if (p != NULL) 1137 if_rele(p); 1138 return (error); 1139 } 1140 } 1141 1142 /* In the wildcard case, we need to update the name. */ 1143 if (wildcard) { 1144 for (dp = name; *dp != '\0'; dp++); 1145 if (snprintf(dp, len - (dp-name), "%d", unit) > 1146 len - (dp-name) - 1) { 1147 panic("%s: interface name too long", __func__); 1148 } 1149 } 1150 1151 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1152 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1153 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1154 ifp->if_softc = ifv; 1155 /* 1156 * Set the name manually rather than using if_initname because 1157 * we don't conform to the default naming convention for interfaces. 1158 */ 1159 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1160 ifp->if_dname = vlanname; 1161 ifp->if_dunit = unit; 1162 1163 ifp->if_init = vlan_init; 1164 #ifdef ALTQ 1165 ifp->if_start = vlan_altq_start; 1166 ifp->if_transmit = vlan_altq_transmit; 1167 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 1168 ifp->if_snd.ifq_drv_maxlen = 0; 1169 IFQ_SET_READY(&ifp->if_snd); 1170 #else 1171 ifp->if_transmit = vlan_transmit; 1172 #endif 1173 ifp->if_qflush = vlan_qflush; 1174 ifp->if_ioctl = vlan_ioctl; 1175 #if defined(KERN_TLS) || defined(RATELIMIT) 1176 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1177 ifp->if_ratelimit_query = vlan_ratelimit_query; 1178 #endif 1179 ifp->if_flags = VLAN_IFFLAGS; 1180 ifp->if_type = IFT_L2VLAN; 1181 ether_ifattach(ifp, eaddr); 1182 /* Now undo some of the damage... */ 1183 ifp->if_baudrate = 0; 1184 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1185 ifa = ifp->if_addr; 1186 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1187 sdl->sdl_type = IFT_L2VLAN; 1188 1189 if (p != NULL) { 1190 error = vlan_config(ifv, p, vid, proto); 1191 if_rele(p); 1192 if (error != 0) { 1193 /* 1194 * Since we've partially failed, we need to back 1195 * out all the way, otherwise userland could get 1196 * confused. Thus, we destroy the interface. 1197 */ 1198 ether_ifdetach(ifp); 1199 vlan_unconfig(ifp); 1200 if_free(ifp); 1201 if (!subinterface) 1202 ifc_free_unit(ifc, unit); 1203 free(ifv, M_VLAN); 1204 1205 return (error); 1206 } 1207 } 1208 *ifpp = ifp; 1209 1210 return (0); 1211 } 1212 1213 /* 1214 * 1215 * Parsers of IFLA_INFO_DATA inside IFLA_LINKINFO of RTM_NEWLINK 1216 * {{nla_len=8, nla_type=IFLA_LINK}, 2}, 1217 * {{nla_len=12, nla_type=IFLA_IFNAME}, "xvlan22"}, 1218 * {{nla_len=24, nla_type=IFLA_LINKINFO}, 1219 * [ 1220 * {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...}, 1221 * {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]} 1222 */ 1223 1224 struct nl_parsed_vlan { 1225 uint16_t vlan_id; 1226 uint16_t vlan_proto; 1227 struct ifla_vlan_flags vlan_flags; 1228 }; 1229 1230 #define _OUT(_field) offsetof(struct nl_parsed_vlan, _field) 1231 static const struct nlattr_parser nla_p_vlan[] = { 1232 { .type = IFLA_VLAN_ID, .off = _OUT(vlan_id), .cb = nlattr_get_uint16 }, 1233 { .type = IFLA_VLAN_FLAGS, .off = _OUT(vlan_flags), .cb = nlattr_get_nla }, 1234 { .type = IFLA_VLAN_PROTOCOL, .off = _OUT(vlan_proto), .cb = nlattr_get_uint16 }, 1235 }; 1236 #undef _OUT 1237 NL_DECLARE_ATTR_PARSER(vlan_parser, nla_p_vlan); 1238 1239 static int 1240 vlan_clone_create_nl(struct if_clone *ifc, char *name, size_t len, 1241 struct ifc_data_nl *ifd) 1242 { 1243 struct epoch_tracker et; 1244 struct ifnet *ifp_parent; 1245 struct nl_pstate *npt = ifd->npt; 1246 struct nl_parsed_link *lattrs = ifd->lattrs; 1247 int error; 1248 1249 /* 1250 * lattrs.ifla_ifname is the new interface name 1251 * lattrs.ifi_index contains parent interface index 1252 * lattrs.ifla_idata contains un-parsed vlan data 1253 */ 1254 struct nl_parsed_vlan attrs = { 1255 .vlan_id = 0xFEFE, 1256 .vlan_proto = ETHERTYPE_VLAN 1257 }; 1258 1259 if (lattrs->ifla_idata == NULL) { 1260 nlmsg_report_err_msg(npt, "vlan id is required, guessing not supported"); 1261 return (ENOTSUP); 1262 } 1263 1264 error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, npt, &attrs); 1265 if (error != 0) 1266 return (error); 1267 if (attrs.vlan_id > DOT1Q_VID_MAX) { 1268 nlmsg_report_err_msg(npt, "Invalid VID: %d", attrs.vlan_id); 1269 return (EINVAL); 1270 } 1271 if (attrs.vlan_proto != ETHERTYPE_VLAN && attrs.vlan_proto != ETHERTYPE_QINQ) { 1272 nlmsg_report_err_msg(npt, "Unsupported ethertype: 0x%04X", attrs.vlan_proto); 1273 return (ENOTSUP); 1274 } 1275 1276 struct vlanreq params = { 1277 .vlr_tag = attrs.vlan_id, 1278 .vlr_proto = attrs.vlan_proto, 1279 }; 1280 struct ifc_data ifd_new = { .flags = IFC_F_SYSSPACE, .unit = ifd->unit, .params = ¶ms }; 1281 1282 NET_EPOCH_ENTER(et); 1283 ifp_parent = ifnet_byindex(lattrs->ifi_index); 1284 if (ifp_parent != NULL) 1285 strlcpy(params.vlr_parent, if_name(ifp_parent), sizeof(params.vlr_parent)); 1286 NET_EPOCH_EXIT(et); 1287 1288 if (ifp_parent == NULL) { 1289 nlmsg_report_err_msg(npt, "unable to find parent interface %u", lattrs->ifi_index); 1290 return (ENOENT); 1291 } 1292 1293 error = vlan_clone_create(ifc, name, len, &ifd_new, &ifd->ifp); 1294 1295 return (error); 1296 } 1297 1298 static int 1299 vlan_clone_modify_nl(struct ifnet *ifp, struct ifc_data_nl *ifd) 1300 { 1301 struct nl_parsed_link *lattrs = ifd->lattrs; 1302 1303 if ((lattrs->ifla_idata != NULL) && ((ifd->flags & IFC_F_CREATE) == 0)) { 1304 struct epoch_tracker et; 1305 struct nl_parsed_vlan attrs = { 1306 .vlan_proto = ETHERTYPE_VLAN, 1307 }; 1308 int error; 1309 1310 error = nl_parse_nested(lattrs->ifla_idata, &vlan_parser, ifd->npt, &attrs); 1311 if (error != 0) 1312 return (error); 1313 1314 NET_EPOCH_ENTER(et); 1315 struct ifnet *ifp_parent = ifnet_byindex_ref(lattrs->ifla_link); 1316 NET_EPOCH_EXIT(et); 1317 1318 if (ifp_parent == NULL) { 1319 nlmsg_report_err_msg(ifd->npt, "unable to find parent interface %u", 1320 lattrs->ifla_link); 1321 return (ENOENT); 1322 } 1323 1324 struct ifvlan *ifv = ifp->if_softc; 1325 error = vlan_config(ifv, ifp_parent, attrs.vlan_id, attrs.vlan_proto); 1326 1327 if_rele(ifp_parent); 1328 if (error != 0) 1329 return (error); 1330 } 1331 1332 return (nl_modify_ifp_generic(ifp, ifd->lattrs, ifd->bm, ifd->npt)); 1333 } 1334 1335 /* 1336 * {{nla_len=24, nla_type=IFLA_LINKINFO}, 1337 * [ 1338 * {{nla_len=8, nla_type=IFLA_INFO_KIND}, "vlan"...}, 1339 * {{nla_len=12, nla_type=IFLA_INFO_DATA}, "\x06\x00\x01\x00\x16\x00\x00\x00"}]} 1340 */ 1341 static void 1342 vlan_clone_dump_nl(struct ifnet *ifp, struct nl_writer *nw) 1343 { 1344 struct ifvlan *ifv; 1345 uint32_t parent_index = 0; 1346 uint16_t vlan_id = 0; 1347 uint16_t vlan_proto = 0; 1348 1349 VLAN_SLOCK(); 1350 if (__predict_false((ifv = ifp->if_softc) == NULL)) { 1351 /* 1352 * XXXGL: the interface already went through if_dead(). This 1353 * check to be removed when we got better interface removal. 1354 */ 1355 return; 1356 } 1357 if (TRUNK(ifv) != NULL) 1358 parent_index = PARENT(ifv)->if_index; 1359 vlan_id = ifv->ifv_vid; 1360 vlan_proto = ifv->ifv_proto; 1361 VLAN_SUNLOCK(); 1362 1363 if (parent_index != 0) 1364 nlattr_add_u32(nw, IFLA_LINK, parent_index); 1365 1366 int off = nlattr_add_nested(nw, IFLA_LINKINFO); 1367 if (off != 0) { 1368 nlattr_add_string(nw, IFLA_INFO_KIND, "vlan"); 1369 int off2 = nlattr_add_nested(nw, IFLA_INFO_DATA); 1370 if (off2 != 0) { 1371 nlattr_add_u16(nw, IFLA_VLAN_ID, vlan_id); 1372 nlattr_add_u16(nw, IFLA_VLAN_PROTOCOL, vlan_proto); 1373 nlattr_set_len(nw, off2); 1374 } 1375 nlattr_set_len(nw, off); 1376 } 1377 } 1378 1379 static int 1380 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags) 1381 { 1382 struct ifvlan *ifv = ifp->if_softc; 1383 int unit = ifp->if_dunit; 1384 1385 if (ifp->if_vlantrunk) 1386 return (EBUSY); 1387 1388 #ifdef ALTQ 1389 IFQ_PURGE(&ifp->if_snd); 1390 #endif 1391 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1392 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1393 /* 1394 * We should have the only reference to the ifv now, so we can now 1395 * drain any remaining lladdr task before freeing the ifnet and the 1396 * ifvlan. 1397 */ 1398 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1399 NET_EPOCH_WAIT(); 1400 ifp->if_softc = NULL; 1401 if_free(ifp); 1402 free(ifv, M_VLAN); 1403 if (unit != IF_DUNIT_NONE) 1404 ifc_free_unit(ifc, unit); 1405 1406 return (0); 1407 } 1408 1409 /* 1410 * The ifp->if_init entry point for vlan(4) is a no-op. 1411 */ 1412 static void 1413 vlan_init(void *foo __unused) 1414 { 1415 } 1416 1417 /* 1418 * The if_transmit method for vlan(4) interface. 1419 */ 1420 static int 1421 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1422 { 1423 struct ifvlan *ifv; 1424 struct ifnet *p; 1425 int error, len, mcast; 1426 1427 NET_EPOCH_ASSERT(); 1428 1429 ifv = ifp->if_softc; 1430 if (TRUNK(ifv) == NULL) { 1431 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1432 m_freem(m); 1433 return (ENETDOWN); 1434 } 1435 p = PARENT(ifv); 1436 len = m->m_pkthdr.len; 1437 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1438 1439 BPF_MTAP(ifp, m); 1440 1441 #if defined(KERN_TLS) || defined(RATELIMIT) 1442 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1443 struct vlan_snd_tag *vst; 1444 struct m_snd_tag *mst; 1445 1446 MPASS(m->m_pkthdr.snd_tag->ifp == ifp); 1447 mst = m->m_pkthdr.snd_tag; 1448 vst = mst_to_vst(mst); 1449 if (vst->tag->ifp != p) { 1450 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1451 m_freem(m); 1452 return (EAGAIN); 1453 } 1454 1455 m->m_pkthdr.snd_tag = m_snd_tag_ref(vst->tag); 1456 m_snd_tag_rele(mst); 1457 } 1458 #endif 1459 1460 /* 1461 * Do not run parent's if_transmit() if the parent is not up, 1462 * or parent's driver will cause a system crash. 1463 */ 1464 if (!UP_AND_RUNNING(p)) { 1465 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1466 m_freem(m); 1467 return (ENETDOWN); 1468 } 1469 1470 if (!ether_8021q_frame(&m, ifp, p, &ifv->ifv_qtag)) { 1471 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1472 return (0); 1473 } 1474 1475 /* 1476 * Send it, precisely as ether_output() would have. 1477 */ 1478 error = (p->if_transmit)(p, m); 1479 if (error == 0) { 1480 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1481 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1482 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1483 } else 1484 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1485 return (error); 1486 } 1487 1488 static int 1489 vlan_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 1490 struct route *ro) 1491 { 1492 struct ifvlan *ifv; 1493 struct ifnet *p; 1494 1495 NET_EPOCH_ASSERT(); 1496 1497 /* 1498 * Find the first non-VLAN parent interface. 1499 */ 1500 ifv = ifp->if_softc; 1501 do { 1502 if (TRUNK(ifv) == NULL) { 1503 m_freem(m); 1504 return (ENETDOWN); 1505 } 1506 p = PARENT(ifv); 1507 ifv = p->if_softc; 1508 } while (p->if_type == IFT_L2VLAN); 1509 1510 return p->if_output(ifp, m, dst, ro); 1511 } 1512 1513 #ifdef ALTQ 1514 static void 1515 vlan_altq_start(if_t ifp) 1516 { 1517 struct ifaltq *ifq = &ifp->if_snd; 1518 struct mbuf *m; 1519 1520 IFQ_LOCK(ifq); 1521 IFQ_DEQUEUE_NOLOCK(ifq, m); 1522 while (m != NULL) { 1523 vlan_transmit(ifp, m); 1524 IFQ_DEQUEUE_NOLOCK(ifq, m); 1525 } 1526 IFQ_UNLOCK(ifq); 1527 } 1528 1529 static int 1530 vlan_altq_transmit(if_t ifp, struct mbuf *m) 1531 { 1532 int err; 1533 1534 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 1535 IFQ_ENQUEUE(&ifp->if_snd, m, err); 1536 if (err == 0) 1537 vlan_altq_start(ifp); 1538 } else 1539 err = vlan_transmit(ifp, m); 1540 1541 return (err); 1542 } 1543 #endif /* ALTQ */ 1544 1545 /* 1546 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1547 */ 1548 static void 1549 vlan_qflush(struct ifnet *ifp __unused) 1550 { 1551 } 1552 1553 static void 1554 vlan_input(struct ifnet *ifp, struct mbuf *m) 1555 { 1556 struct ifvlantrunk *trunk; 1557 struct ifvlan *ifv; 1558 struct m_tag *mtag; 1559 uint16_t vid, tag; 1560 1561 NET_EPOCH_ASSERT(); 1562 1563 trunk = ifp->if_vlantrunk; 1564 if (trunk == NULL) { 1565 m_freem(m); 1566 return; 1567 } 1568 1569 if (m->m_flags & M_VLANTAG) { 1570 /* 1571 * Packet is tagged, but m contains a normal 1572 * Ethernet frame; the tag is stored out-of-band. 1573 */ 1574 tag = m->m_pkthdr.ether_vtag; 1575 m->m_flags &= ~M_VLANTAG; 1576 } else { 1577 struct ether_vlan_header *evl; 1578 1579 /* 1580 * Packet is tagged in-band as specified by 802.1q. 1581 */ 1582 switch (ifp->if_type) { 1583 case IFT_ETHER: 1584 if (m->m_len < sizeof(*evl) && 1585 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1586 if_printf(ifp, "cannot pullup VLAN header\n"); 1587 return; 1588 } 1589 evl = mtod(m, struct ether_vlan_header *); 1590 tag = ntohs(evl->evl_tag); 1591 1592 /* 1593 * Remove the 802.1q header by copying the Ethernet 1594 * addresses over it and adjusting the beginning of 1595 * the data in the mbuf. The encapsulated Ethernet 1596 * type field is already in place. 1597 */ 1598 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1599 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1600 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1601 break; 1602 1603 default: 1604 #ifdef INVARIANTS 1605 panic("%s: %s has unsupported if_type %u", 1606 __func__, ifp->if_xname, ifp->if_type); 1607 #endif 1608 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1609 m_freem(m); 1610 return; 1611 } 1612 } 1613 1614 vid = EVL_VLANOFTAG(tag); 1615 1616 ifv = vlan_gethash(trunk, vid); 1617 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1618 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1619 m_freem(m); 1620 return; 1621 } 1622 1623 if (V_vlan_mtag_pcp) { 1624 /* 1625 * While uncommon, it is possible that we will find a 802.1q 1626 * packet encapsulated inside another packet that also had an 1627 * 802.1q header. For example, ethernet tunneled over IPSEC 1628 * arriving over ethernet. In that case, we replace the 1629 * existing 802.1q PCP m_tag value. 1630 */ 1631 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1632 if (mtag == NULL) { 1633 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1634 sizeof(uint8_t), M_NOWAIT); 1635 if (mtag == NULL) { 1636 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1637 m_freem(m); 1638 return; 1639 } 1640 m_tag_prepend(m, mtag); 1641 } 1642 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1643 } 1644 1645 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1646 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1647 1648 /* Pass it back through the parent's input routine. */ 1649 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1650 } 1651 1652 static void 1653 vlan_lladdr_fn(void *arg, int pending __unused) 1654 { 1655 struct ifvlan *ifv; 1656 struct ifnet *ifp; 1657 1658 ifv = (struct ifvlan *)arg; 1659 ifp = ifv->ifv_ifp; 1660 1661 CURVNET_SET(ifp->if_vnet); 1662 1663 /* The ifv_ifp already has the lladdr copied in. */ 1664 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1665 1666 CURVNET_RESTORE(); 1667 } 1668 1669 static int 1670 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid, 1671 uint16_t proto) 1672 { 1673 struct epoch_tracker et; 1674 struct ifvlantrunk *trunk; 1675 struct ifnet *ifp; 1676 int error = 0; 1677 1678 /* 1679 * We can handle non-ethernet hardware types as long as 1680 * they handle the tagging and headers themselves. 1681 */ 1682 if (p->if_type != IFT_ETHER && 1683 p->if_type != IFT_L2VLAN && 1684 p->if_type != IFT_BRIDGE && 1685 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1686 return (EPROTONOSUPPORT); 1687 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1688 return (EPROTONOSUPPORT); 1689 /* 1690 * Don't let the caller set up a VLAN VID with 1691 * anything except VLID bits. 1692 * VID numbers 0x0 and 0xFFF are reserved. 1693 */ 1694 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1695 return (EINVAL); 1696 if (ifv->ifv_trunk) { 1697 trunk = ifv->ifv_trunk; 1698 if (trunk->parent != p) 1699 return (EBUSY); 1700 1701 VLAN_XLOCK(); 1702 1703 ifv->ifv_proto = proto; 1704 1705 if (ifv->ifv_vid != vid) { 1706 int oldvid = ifv->ifv_vid; 1707 1708 /* Re-hash */ 1709 vlan_remhash(trunk, ifv); 1710 ifv->ifv_vid = vid; 1711 error = vlan_inshash(trunk, ifv); 1712 if (error) { 1713 int ret __diagused; 1714 1715 ifv->ifv_vid = oldvid; 1716 /* Re-insert back where we found it. */ 1717 ret = vlan_inshash(trunk, ifv); 1718 MPASS(ret == 0); 1719 } 1720 } 1721 /* Will unlock */ 1722 goto done; 1723 } 1724 1725 VLAN_XLOCK(); 1726 if (p->if_vlantrunk == NULL) { 1727 trunk = malloc(sizeof(struct ifvlantrunk), 1728 M_VLAN, M_WAITOK | M_ZERO); 1729 vlan_inithash(trunk); 1730 TRUNK_LOCK_INIT(trunk); 1731 TRUNK_WLOCK(trunk); 1732 p->if_vlantrunk = trunk; 1733 trunk->parent = p; 1734 if_ref(trunk->parent); 1735 TRUNK_WUNLOCK(trunk); 1736 } else { 1737 trunk = p->if_vlantrunk; 1738 } 1739 1740 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1741 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1742 error = vlan_inshash(trunk, ifv); 1743 if (error) 1744 goto done; 1745 ifv->ifv_proto = proto; 1746 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1747 ifv->ifv_mintu = ETHERMIN; 1748 ifv->ifv_pflags = 0; 1749 ifv->ifv_capenable = -1; 1750 ifv->ifv_capenable2 = -1; 1751 1752 /* 1753 * If the parent supports the VLAN_MTU capability, 1754 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1755 * use it. 1756 */ 1757 if (p->if_capenable & IFCAP_VLAN_MTU) { 1758 /* 1759 * No need to fudge the MTU since the parent can 1760 * handle extended frames. 1761 */ 1762 ifv->ifv_mtufudge = 0; 1763 } else { 1764 /* 1765 * Fudge the MTU by the encapsulation size. This 1766 * makes us incompatible with strictly compliant 1767 * 802.1Q implementations, but allows us to use 1768 * the feature with other NetBSD implementations, 1769 * which might still be useful. 1770 */ 1771 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1772 } 1773 1774 ifv->ifv_trunk = trunk; 1775 ifp = ifv->ifv_ifp; 1776 /* 1777 * Initialize fields from our parent. This duplicates some 1778 * work with ether_ifattach() but allows for non-ethernet 1779 * interfaces to also work. 1780 */ 1781 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1782 ifp->if_baudrate = p->if_baudrate; 1783 ifp->if_input = p->if_input; 1784 ifp->if_resolvemulti = p->if_resolvemulti; 1785 ifp->if_addrlen = p->if_addrlen; 1786 ifp->if_broadcastaddr = p->if_broadcastaddr; 1787 ifp->if_pcp = ifv->ifv_pcp; 1788 1789 /* 1790 * We wrap the parent's if_output using vlan_output to ensure that it 1791 * can't become stale. 1792 */ 1793 ifp->if_output = vlan_output; 1794 1795 /* 1796 * Copy only a selected subset of flags from the parent. 1797 * Other flags are none of our business. 1798 */ 1799 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1800 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1801 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1802 #undef VLAN_COPY_FLAGS 1803 1804 ifp->if_link_state = p->if_link_state; 1805 1806 NET_EPOCH_ENTER(et); 1807 vlan_capabilities(ifv); 1808 NET_EPOCH_EXIT(et); 1809 1810 /* 1811 * Set up our interface address to reflect the underlying 1812 * physical interface's. 1813 */ 1814 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1815 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1816 p->if_addrlen; 1817 1818 /* 1819 * Do not schedule link address update if it was the same 1820 * as previous parent's. This helps avoid updating for each 1821 * associated llentry. 1822 */ 1823 if (memcmp(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen) != 0) { 1824 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1825 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 1826 } 1827 1828 /* We are ready for operation now. */ 1829 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1830 1831 /* Update flags on the parent, if necessary. */ 1832 vlan_setflags(ifp, 1); 1833 1834 /* 1835 * Configure multicast addresses that may already be 1836 * joined on the vlan device. 1837 */ 1838 (void)vlan_setmulti(ifp); 1839 1840 done: 1841 if (error == 0) 1842 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1843 VLAN_XUNLOCK(); 1844 1845 return (error); 1846 } 1847 1848 static void 1849 vlan_unconfig(struct ifnet *ifp) 1850 { 1851 1852 VLAN_XLOCK(); 1853 vlan_unconfig_locked(ifp, 0); 1854 VLAN_XUNLOCK(); 1855 } 1856 1857 static void 1858 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1859 { 1860 struct ifvlantrunk *trunk; 1861 struct vlan_mc_entry *mc; 1862 struct ifvlan *ifv; 1863 struct ifnet *parent; 1864 int error; 1865 1866 VLAN_XLOCK_ASSERT(); 1867 1868 ifv = ifp->if_softc; 1869 trunk = ifv->ifv_trunk; 1870 parent = NULL; 1871 1872 if (trunk != NULL) { 1873 parent = trunk->parent; 1874 1875 /* 1876 * Since the interface is being unconfigured, we need to 1877 * empty the list of multicast groups that we may have joined 1878 * while we were alive from the parent's list. 1879 */ 1880 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1881 /* 1882 * If the parent interface is being detached, 1883 * all its multicast addresses have already 1884 * been removed. Warn about errors if 1885 * if_delmulti() does fail, but don't abort as 1886 * all callers expect vlan destruction to 1887 * succeed. 1888 */ 1889 if (!departing) { 1890 error = if_delmulti(parent, 1891 (struct sockaddr *)&mc->mc_addr); 1892 if (error) 1893 if_printf(ifp, 1894 "Failed to delete multicast address from parent: %d\n", 1895 error); 1896 } 1897 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1898 NET_EPOCH_CALL(vlan_mc_free, &mc->mc_epoch_ctx); 1899 } 1900 1901 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1902 1903 vlan_remhash(trunk, ifv); 1904 ifv->ifv_trunk = NULL; 1905 1906 /* 1907 * Check if we were the last. 1908 */ 1909 if (trunk->refcnt == 0) { 1910 parent->if_vlantrunk = NULL; 1911 NET_EPOCH_WAIT(); 1912 trunk_destroy(trunk); 1913 } 1914 } 1915 1916 /* Disconnect from parent. */ 1917 if (ifv->ifv_pflags) 1918 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1919 ifp->if_mtu = ETHERMTU; 1920 ifp->if_link_state = LINK_STATE_UNKNOWN; 1921 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1922 1923 /* 1924 * Only dispatch an event if vlan was 1925 * attached, otherwise there is nothing 1926 * to cleanup anyway. 1927 */ 1928 if (parent != NULL) 1929 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1930 } 1931 1932 /* Handle a reference counted flag that should be set on the parent as well */ 1933 static int 1934 vlan_setflag(struct ifnet *ifp, int flag, int status, 1935 int (*func)(struct ifnet *, int)) 1936 { 1937 struct ifvlan *ifv; 1938 int error; 1939 1940 VLAN_SXLOCK_ASSERT(); 1941 1942 ifv = ifp->if_softc; 1943 status = status ? (ifp->if_flags & flag) : 0; 1944 /* Now "status" contains the flag value or 0 */ 1945 1946 /* 1947 * See if recorded parent's status is different from what 1948 * we want it to be. If it is, flip it. We record parent's 1949 * status in ifv_pflags so that we won't clear parent's flag 1950 * we haven't set. In fact, we don't clear or set parent's 1951 * flags directly, but get or release references to them. 1952 * That's why we can be sure that recorded flags still are 1953 * in accord with actual parent's flags. 1954 */ 1955 if (status != (ifv->ifv_pflags & flag)) { 1956 error = (*func)(PARENT(ifv), status); 1957 if (error) 1958 return (error); 1959 ifv->ifv_pflags &= ~flag; 1960 ifv->ifv_pflags |= status; 1961 } 1962 return (0); 1963 } 1964 1965 /* 1966 * Handle IFF_* flags that require certain changes on the parent: 1967 * if "status" is true, update parent's flags respective to our if_flags; 1968 * if "status" is false, forcedly clear the flags set on parent. 1969 */ 1970 static int 1971 vlan_setflags(struct ifnet *ifp, int status) 1972 { 1973 int error, i; 1974 1975 for (i = 0; vlan_pflags[i].flag; i++) { 1976 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1977 status, vlan_pflags[i].func); 1978 if (error) 1979 return (error); 1980 } 1981 return (0); 1982 } 1983 1984 /* Inform all vlans that their parent has changed link state */ 1985 static void 1986 vlan_link_state(struct ifnet *ifp) 1987 { 1988 struct epoch_tracker et; 1989 struct ifvlantrunk *trunk; 1990 struct ifvlan *ifv; 1991 1992 NET_EPOCH_ENTER(et); 1993 trunk = ifp->if_vlantrunk; 1994 if (trunk == NULL) { 1995 NET_EPOCH_EXIT(et); 1996 return; 1997 } 1998 1999 TRUNK_WLOCK(trunk); 2000 VLAN_FOREACH(ifv, trunk) { 2001 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 2002 if_link_state_change(ifv->ifv_ifp, 2003 trunk->parent->if_link_state); 2004 } 2005 TRUNK_WUNLOCK(trunk); 2006 NET_EPOCH_EXIT(et); 2007 } 2008 2009 #ifdef IPSEC_OFFLOAD 2010 #define VLAN_IPSEC_METHOD(exp) \ 2011 if_t p; \ 2012 struct ifvlan *ifv; \ 2013 int error; \ 2014 \ 2015 ifv = ifp->if_softc; \ 2016 VLAN_SLOCK(); \ 2017 if (TRUNK(ifv) != NULL) { \ 2018 p = PARENT(ifv); \ 2019 if_ref(p); \ 2020 error = p->if_ipsec_accel_m->exp; \ 2021 if_rele(p); \ 2022 } else { \ 2023 error = ENXIO; \ 2024 } \ 2025 VLAN_SUNLOCK(); \ 2026 return (error); 2027 2028 2029 static int 2030 vlan_if_spdadd(if_t ifp, void *sp, void *inp, void **priv) 2031 { 2032 VLAN_IPSEC_METHOD(if_spdadd(ifp, sp, inp, priv)); 2033 } 2034 2035 static int 2036 vlan_if_spddel(if_t ifp, void *sp, void *priv) 2037 { 2038 VLAN_IPSEC_METHOD(if_spddel(ifp, sp, priv)); 2039 } 2040 2041 static int 2042 vlan_if_sa_newkey(if_t ifp, void *sav, u_int drv_spi, void **privp) 2043 { 2044 VLAN_IPSEC_METHOD(if_sa_newkey(ifp, sav, drv_spi, privp)); 2045 } 2046 2047 static int 2048 vlan_if_sa_deinstall(if_t ifp, u_int drv_spi, void *priv) 2049 { 2050 VLAN_IPSEC_METHOD(if_sa_deinstall(ifp, drv_spi, priv)); 2051 } 2052 2053 static int 2054 vlan_if_sa_cnt(if_t ifp, void *sa, uint32_t drv_spi, void *priv, 2055 struct seclifetime *lt) 2056 { 2057 VLAN_IPSEC_METHOD(if_sa_cnt(ifp, sa, drv_spi, priv, lt)); 2058 } 2059 2060 static int 2061 vlan_if_ipsec_hwassist(if_t ifp, void *sav, u_int drv_spi,void *priv) 2062 { 2063 if_t trunk; 2064 2065 NET_EPOCH_ASSERT(); 2066 trunk = vlan_trunkdev(ifp); 2067 if (trunk == NULL) 2068 return (0); 2069 return (trunk->if_ipsec_accel_m->if_hwassist(trunk, sav, 2070 drv_spi, priv)); 2071 } 2072 2073 static const struct if_ipsec_accel_methods vlan_if_ipsec_accel_methods = { 2074 .if_spdadd = vlan_if_spdadd, 2075 .if_spddel = vlan_if_spddel, 2076 .if_sa_newkey = vlan_if_sa_newkey, 2077 .if_sa_deinstall = vlan_if_sa_deinstall, 2078 .if_sa_cnt = vlan_if_sa_cnt, 2079 .if_hwassist = vlan_if_ipsec_hwassist, 2080 }; 2081 2082 #undef VLAN_IPSEC_METHOD 2083 #endif /* IPSEC_OFFLOAD */ 2084 2085 static void 2086 vlan_capabilities(struct ifvlan *ifv) 2087 { 2088 struct ifnet *p; 2089 struct ifnet *ifp; 2090 struct ifnet_hw_tsomax hw_tsomax; 2091 int cap = 0, ena = 0, mena, cap2 = 0, ena2 = 0; 2092 int mena2 __unused; 2093 u_long hwa = 0; 2094 2095 NET_EPOCH_ASSERT(); 2096 VLAN_SXLOCK_ASSERT(); 2097 2098 p = PARENT(ifv); 2099 ifp = ifv->ifv_ifp; 2100 2101 /* Mask parent interface enabled capabilities disabled by user. */ 2102 mena = p->if_capenable & ifv->ifv_capenable; 2103 mena2 = p->if_capenable2 & ifv->ifv_capenable2; 2104 2105 /* 2106 * If the parent interface can do checksum offloading 2107 * on VLANs, then propagate its hardware-assisted 2108 * checksumming flags. Also assert that checksum 2109 * offloading requires hardware VLAN tagging. 2110 */ 2111 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 2112 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 2113 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 2114 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 2115 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 2116 if (ena & IFCAP_TXCSUM) 2117 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 2118 CSUM_UDP | CSUM_SCTP); 2119 if (ena & IFCAP_TXCSUM_IPV6) 2120 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 2121 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 2122 } 2123 2124 /* 2125 * If the parent interface can do TSO on VLANs then 2126 * propagate the hardware-assisted flag. TSO on VLANs 2127 * does not necessarily require hardware VLAN tagging. 2128 */ 2129 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 2130 if_hw_tsomax_common(p, &hw_tsomax); 2131 if_hw_tsomax_update(ifp, &hw_tsomax); 2132 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 2133 cap |= p->if_capabilities & IFCAP_TSO; 2134 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 2135 ena |= mena & IFCAP_TSO; 2136 if (ena & IFCAP_TSO) 2137 hwa |= p->if_hwassist & CSUM_TSO; 2138 } 2139 2140 /* 2141 * If the parent interface can do LRO and checksum offloading on 2142 * VLANs, then guess it may do LRO on VLANs. False positive here 2143 * cost nothing, while false negative may lead to some confusions. 2144 */ 2145 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 2146 cap |= p->if_capabilities & IFCAP_LRO; 2147 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 2148 ena |= mena & IFCAP_LRO; 2149 2150 /* 2151 * If the parent interface can offload TCP connections over VLANs then 2152 * propagate its TOE capability to the VLAN interface. 2153 * 2154 * All TOE drivers in the tree today can deal with VLANs. If this 2155 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 2156 * with its own bit. 2157 */ 2158 #define IFCAP_VLAN_TOE IFCAP_TOE 2159 if (p->if_capabilities & IFCAP_VLAN_TOE) 2160 cap |= p->if_capabilities & IFCAP_TOE; 2161 if (p->if_capenable & IFCAP_VLAN_TOE) { 2162 SETTOEDEV(ifp, TOEDEV(p)); 2163 ena |= mena & IFCAP_TOE; 2164 } 2165 2166 /* 2167 * If the parent interface supports dynamic link state, so does the 2168 * VLAN interface. 2169 */ 2170 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 2171 ena |= (mena & IFCAP_LINKSTATE); 2172 2173 #ifdef RATELIMIT 2174 /* 2175 * If the parent interface supports ratelimiting, so does the 2176 * VLAN interface. 2177 */ 2178 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 2179 ena |= (mena & IFCAP_TXRTLMT); 2180 #endif 2181 2182 /* 2183 * If the parent interface supports unmapped mbufs, so does 2184 * the VLAN interface. Note that this should be fine even for 2185 * interfaces that don't support hardware tagging as headers 2186 * are prepended in normal mbufs to unmapped mbufs holding 2187 * payload data. 2188 */ 2189 cap |= (p->if_capabilities & IFCAP_MEXTPG); 2190 ena |= (mena & IFCAP_MEXTPG); 2191 2192 /* 2193 * If the parent interface can offload encryption and segmentation 2194 * of TLS records over TCP, propagate it's capability to the VLAN 2195 * interface. 2196 * 2197 * All TLS drivers in the tree today can deal with VLANs. If 2198 * this ever changes, then a new IFCAP_VLAN_TXTLS can be 2199 * defined. 2200 */ 2201 if (p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT)) 2202 cap |= p->if_capabilities & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT); 2203 if (p->if_capenable & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT)) 2204 ena |= mena & (IFCAP_TXTLS | IFCAP_TXTLS_RTLMT); 2205 2206 ifp->if_capabilities = cap; 2207 ifp->if_capenable = ena; 2208 ifp->if_hwassist = hwa; 2209 2210 #ifdef IPSEC_OFFLOAD 2211 cap2 |= p->if_capabilities2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD); 2212 ena2 |= mena2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD); 2213 ifp->if_ipsec_accel_m = &vlan_if_ipsec_accel_methods; 2214 #endif 2215 2216 ifp->if_capabilities2 = cap2; 2217 ifp->if_capenable2 = ena2; 2218 } 2219 2220 static void 2221 vlan_trunk_capabilities(struct ifnet *ifp) 2222 { 2223 struct epoch_tracker et; 2224 struct ifvlantrunk *trunk; 2225 struct ifvlan *ifv; 2226 2227 VLAN_SLOCK(); 2228 trunk = ifp->if_vlantrunk; 2229 if (trunk == NULL) { 2230 VLAN_SUNLOCK(); 2231 return; 2232 } 2233 NET_EPOCH_ENTER(et); 2234 VLAN_FOREACH(ifv, trunk) 2235 vlan_capabilities(ifv); 2236 NET_EPOCH_EXIT(et); 2237 VLAN_SUNLOCK(); 2238 } 2239 2240 static int 2241 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2242 { 2243 struct ifnet *p; 2244 struct ifreq *ifr; 2245 #ifdef INET 2246 struct ifaddr *ifa; 2247 #endif 2248 struct ifvlan *ifv; 2249 struct ifvlantrunk *trunk; 2250 struct vlanreq vlr; 2251 int error = 0, oldmtu; 2252 2253 ifr = (struct ifreq *)data; 2254 #ifdef INET 2255 ifa = (struct ifaddr *) data; 2256 #endif 2257 ifv = ifp->if_softc; 2258 2259 switch (cmd) { 2260 case SIOCSIFADDR: 2261 ifp->if_flags |= IFF_UP; 2262 #ifdef INET 2263 if (ifa->ifa_addr->sa_family == AF_INET) 2264 arp_ifinit(ifp, ifa); 2265 #endif 2266 break; 2267 case SIOCGIFADDR: 2268 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 2269 ifp->if_addrlen); 2270 break; 2271 case SIOCGIFMEDIA: 2272 VLAN_SLOCK(); 2273 if (TRUNK(ifv) != NULL) { 2274 p = PARENT(ifv); 2275 if_ref(p); 2276 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 2277 if_rele(p); 2278 /* Limit the result to the parent's current config. */ 2279 if (error == 0) { 2280 struct ifmediareq *ifmr; 2281 2282 ifmr = (struct ifmediareq *)data; 2283 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 2284 ifmr->ifm_count = 1; 2285 error = copyout(&ifmr->ifm_current, 2286 ifmr->ifm_ulist, 2287 sizeof(int)); 2288 } 2289 } 2290 } else { 2291 error = EINVAL; 2292 } 2293 VLAN_SUNLOCK(); 2294 break; 2295 2296 case SIOCSIFMEDIA: 2297 error = EINVAL; 2298 break; 2299 2300 case SIOCSIFMTU: 2301 /* 2302 * Set the interface MTU. 2303 */ 2304 VLAN_SLOCK(); 2305 trunk = TRUNK(ifv); 2306 if (trunk != NULL) { 2307 TRUNK_WLOCK(trunk); 2308 if (ifr->ifr_mtu > 2309 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 2310 ifr->ifr_mtu < 2311 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 2312 error = EINVAL; 2313 else 2314 ifp->if_mtu = ifr->ifr_mtu; 2315 TRUNK_WUNLOCK(trunk); 2316 } else 2317 error = EINVAL; 2318 VLAN_SUNLOCK(); 2319 break; 2320 2321 case SIOCSETVLAN: 2322 #ifdef VIMAGE 2323 /* 2324 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 2325 * interface to be delegated to a jail without allowing the 2326 * jail to change what underlying interface/VID it is 2327 * associated with. We are not entirely convinced that this 2328 * is the right way to accomplish that policy goal. 2329 */ 2330 if (ifp->if_vnet != ifp->if_home_vnet) { 2331 error = EPERM; 2332 break; 2333 } 2334 #endif 2335 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 2336 if (error) 2337 break; 2338 if (vlr.vlr_parent[0] == '\0') { 2339 vlan_unconfig(ifp); 2340 break; 2341 } 2342 p = ifunit_ref(vlr.vlr_parent); 2343 if (p == NULL) { 2344 error = ENOENT; 2345 break; 2346 } 2347 2348 /* 2349 * If the ifp is in a bridge, do not allow setting the device 2350 * to a bridge; this prevents having a bridge SVI as a bridge 2351 * member (which is not permitted). 2352 */ 2353 if (ifp->if_bridge != NULL && p->if_type == IFT_BRIDGE) { 2354 if_rele(p); 2355 error = EINVAL; 2356 break; 2357 } 2358 2359 if (vlr.vlr_proto == 0) 2360 vlr.vlr_proto = ETHERTYPE_VLAN; 2361 oldmtu = ifp->if_mtu; 2362 error = vlan_config(ifv, p, vlr.vlr_tag, vlr.vlr_proto); 2363 if_rele(p); 2364 2365 /* 2366 * VLAN MTU may change during addition of the vlandev. 2367 * If it did, do network layer specific procedure. 2368 */ 2369 if (ifp->if_mtu != oldmtu) 2370 if_notifymtu(ifp); 2371 break; 2372 2373 case SIOCGETVLAN: 2374 #ifdef VIMAGE 2375 if (ifp->if_vnet != ifp->if_home_vnet) { 2376 error = EPERM; 2377 break; 2378 } 2379 #endif 2380 bzero(&vlr, sizeof(vlr)); 2381 VLAN_SLOCK(); 2382 if (TRUNK(ifv) != NULL) { 2383 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 2384 sizeof(vlr.vlr_parent)); 2385 vlr.vlr_tag = ifv->ifv_vid; 2386 vlr.vlr_proto = ifv->ifv_proto; 2387 } 2388 VLAN_SUNLOCK(); 2389 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 2390 break; 2391 2392 case SIOCSIFFLAGS: 2393 /* 2394 * We should propagate selected flags to the parent, 2395 * e.g., promiscuous mode. 2396 */ 2397 VLAN_SLOCK(); 2398 if (TRUNK(ifv) != NULL) 2399 error = vlan_setflags(ifp, 1); 2400 VLAN_SUNLOCK(); 2401 break; 2402 2403 case SIOCADDMULTI: 2404 case SIOCDELMULTI: 2405 /* 2406 * If we don't have a parent, just remember the membership for 2407 * when we do. 2408 * 2409 * XXX We need the rmlock here to avoid sleeping while 2410 * holding in6_multi_mtx. 2411 */ 2412 VLAN_XLOCK(); 2413 trunk = TRUNK(ifv); 2414 if (trunk != NULL) 2415 error = vlan_setmulti(ifp); 2416 VLAN_XUNLOCK(); 2417 2418 break; 2419 case SIOCGVLANPCP: 2420 #ifdef VIMAGE 2421 if (ifp->if_vnet != ifp->if_home_vnet) { 2422 error = EPERM; 2423 break; 2424 } 2425 #endif 2426 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 2427 break; 2428 2429 case SIOCSVLANPCP: 2430 #ifdef VIMAGE 2431 if (ifp->if_vnet != ifp->if_home_vnet) { 2432 error = EPERM; 2433 break; 2434 } 2435 #endif 2436 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 2437 if (error) 2438 break; 2439 if (ifr->ifr_vlan_pcp > VLAN_PCP_MAX) { 2440 error = EINVAL; 2441 break; 2442 } 2443 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 2444 ifp->if_pcp = ifv->ifv_pcp; 2445 /* broadcast event about PCP change */ 2446 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 2447 break; 2448 2449 case SIOCSIFCAP: 2450 VLAN_SLOCK(); 2451 ifv->ifv_capenable = ifr->ifr_reqcap; 2452 trunk = TRUNK(ifv); 2453 if (trunk != NULL) { 2454 struct epoch_tracker et; 2455 2456 NET_EPOCH_ENTER(et); 2457 vlan_capabilities(ifv); 2458 NET_EPOCH_EXIT(et); 2459 } 2460 VLAN_SUNLOCK(); 2461 break; 2462 2463 default: 2464 error = EINVAL; 2465 break; 2466 } 2467 2468 return (error); 2469 } 2470 2471 #if defined(KERN_TLS) || defined(RATELIMIT) 2472 static int 2473 vlan_snd_tag_alloc(struct ifnet *ifp, 2474 union if_snd_tag_alloc_params *params, 2475 struct m_snd_tag **ppmt) 2476 { 2477 struct epoch_tracker et; 2478 const struct if_snd_tag_sw *sw; 2479 struct vlan_snd_tag *vst; 2480 struct ifvlan *ifv; 2481 struct ifnet *parent; 2482 struct m_snd_tag *mst; 2483 int error; 2484 2485 NET_EPOCH_ENTER(et); 2486 ifv = ifp->if_softc; 2487 2488 switch (params->hdr.type) { 2489 #ifdef RATELIMIT 2490 case IF_SND_TAG_TYPE_UNLIMITED: 2491 sw = &vlan_snd_tag_ul_sw; 2492 break; 2493 case IF_SND_TAG_TYPE_RATE_LIMIT: 2494 sw = &vlan_snd_tag_rl_sw; 2495 break; 2496 #endif 2497 #ifdef KERN_TLS 2498 case IF_SND_TAG_TYPE_TLS: 2499 sw = &vlan_snd_tag_tls_sw; 2500 break; 2501 case IF_SND_TAG_TYPE_TLS_RX: 2502 sw = NULL; 2503 if (params->tls_rx.vlan_id != 0) 2504 goto failure; 2505 params->tls_rx.vlan_id = ifv->ifv_vid; 2506 break; 2507 #ifdef RATELIMIT 2508 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT: 2509 sw = &vlan_snd_tag_tls_rl_sw; 2510 break; 2511 #endif 2512 #endif 2513 default: 2514 goto failure; 2515 } 2516 2517 if (ifv->ifv_trunk != NULL) 2518 parent = PARENT(ifv); 2519 else 2520 parent = NULL; 2521 if (parent == NULL) 2522 goto failure; 2523 if_ref(parent); 2524 NET_EPOCH_EXIT(et); 2525 2526 if (sw != NULL) { 2527 vst = malloc(sizeof(*vst), M_VLAN, M_NOWAIT); 2528 if (vst == NULL) { 2529 if_rele(parent); 2530 return (ENOMEM); 2531 } 2532 } else 2533 vst = NULL; 2534 2535 error = m_snd_tag_alloc(parent, params, &mst); 2536 if_rele(parent); 2537 if (error) { 2538 free(vst, M_VLAN); 2539 return (error); 2540 } 2541 2542 if (sw != NULL) { 2543 m_snd_tag_init(&vst->com, ifp, sw); 2544 vst->tag = mst; 2545 2546 *ppmt = &vst->com; 2547 } else 2548 *ppmt = mst; 2549 2550 return (0); 2551 failure: 2552 NET_EPOCH_EXIT(et); 2553 return (EOPNOTSUPP); 2554 } 2555 2556 static struct m_snd_tag * 2557 vlan_next_snd_tag(struct m_snd_tag *mst) 2558 { 2559 struct vlan_snd_tag *vst; 2560 2561 vst = mst_to_vst(mst); 2562 return (vst->tag); 2563 } 2564 2565 static int 2566 vlan_snd_tag_modify(struct m_snd_tag *mst, 2567 union if_snd_tag_modify_params *params) 2568 { 2569 struct vlan_snd_tag *vst; 2570 2571 vst = mst_to_vst(mst); 2572 return (vst->tag->sw->snd_tag_modify(vst->tag, params)); 2573 } 2574 2575 static int 2576 vlan_snd_tag_query(struct m_snd_tag *mst, 2577 union if_snd_tag_query_params *params) 2578 { 2579 struct vlan_snd_tag *vst; 2580 2581 vst = mst_to_vst(mst); 2582 return (vst->tag->sw->snd_tag_query(vst->tag, params)); 2583 } 2584 2585 static void 2586 vlan_snd_tag_free(struct m_snd_tag *mst) 2587 { 2588 struct vlan_snd_tag *vst; 2589 2590 vst = mst_to_vst(mst); 2591 m_snd_tag_rele(vst->tag); 2592 free(vst, M_VLAN); 2593 } 2594 2595 static void 2596 vlan_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q) 2597 { 2598 /* 2599 * For vlan, we have an indirect 2600 * interface. The caller needs to 2601 * get a ratelimit tag on the actual 2602 * interface the flow will go on. 2603 */ 2604 q->rate_table = NULL; 2605 q->flags = RT_IS_INDIRECT; 2606 q->max_flows = 0; 2607 q->number_of_rates = 0; 2608 } 2609 2610 #endif 2611