1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1980, 1986, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)if.c 8.5 (Berkeley) 1/9/95 32 * $FreeBSD$ 33 */ 34 35 #include "opt_compat.h" 36 #include "opt_inet6.h" 37 #include "opt_inet.h" 38 39 #include <sys/param.h> 40 #include <sys/types.h> 41 #include <sys/conf.h> 42 #include <sys/malloc.h> 43 #include <sys/sbuf.h> 44 #include <sys/bus.h> 45 #include <sys/mbuf.h> 46 #include <sys/systm.h> 47 #include <sys/priv.h> 48 #include <sys/proc.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/protosw.h> 52 #include <sys/kernel.h> 53 #include <sys/lock.h> 54 #include <sys/refcount.h> 55 #include <sys/module.h> 56 #include <sys/rwlock.h> 57 #include <sys/sockio.h> 58 #include <sys/syslog.h> 59 #include <sys/sysctl.h> 60 #include <sys/sysent.h> 61 #include <sys/taskqueue.h> 62 #include <sys/domain.h> 63 #include <sys/jail.h> 64 #include <sys/priv.h> 65 66 #include <machine/stdarg.h> 67 #include <vm/uma.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_arp.h> 73 #include <net/if_clone.h> 74 #include <net/if_dl.h> 75 #include <net/if_types.h> 76 #include <net/if_var.h> 77 #include <net/if_media.h> 78 #include <net/if_vlan_var.h> 79 #include <net/radix.h> 80 #include <net/route.h> 81 #include <net/vnet.h> 82 83 #if defined(INET) || defined(INET6) 84 #include <net/ethernet.h> 85 #include <netinet/in.h> 86 #include <netinet/in_var.h> 87 #include <netinet/ip.h> 88 #include <netinet/ip_carp.h> 89 #ifdef INET 90 #include <netinet/if_ether.h> 91 #endif /* INET */ 92 #ifdef INET6 93 #include <netinet6/in6_var.h> 94 #include <netinet6/in6_ifattach.h> 95 #endif /* INET6 */ 96 #endif /* INET || INET6 */ 97 98 #include <security/mac/mac_framework.h> 99 100 #ifdef COMPAT_FREEBSD32 101 #include <sys/mount.h> 102 #include <compat/freebsd32/freebsd32.h> 103 104 struct ifreq_buffer32 { 105 uint32_t length; /* (size_t) */ 106 uint32_t buffer; /* (void *) */ 107 }; 108 109 /* 110 * Interface request structure used for socket 111 * ioctl's. All interface ioctl's must have parameter 112 * definitions which begin with ifr_name. The 113 * remainder may be interface specific. 114 */ 115 struct ifreq32 { 116 char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ 117 union { 118 struct sockaddr ifru_addr; 119 struct sockaddr ifru_dstaddr; 120 struct sockaddr ifru_broadaddr; 121 struct ifreq_buffer32 ifru_buffer; 122 short ifru_flags[2]; 123 short ifru_index; 124 int ifru_jid; 125 int ifru_metric; 126 int ifru_mtu; 127 int ifru_phys; 128 int ifru_media; 129 uint32_t ifru_data; 130 int ifru_cap[2]; 131 u_int ifru_fib; 132 u_char ifru_vlan_pcp; 133 } ifr_ifru; 134 }; 135 CTASSERT(sizeof(struct ifreq) == sizeof(struct ifreq32)); 136 CTASSERT(__offsetof(struct ifreq, ifr_ifru) == 137 __offsetof(struct ifreq32, ifr_ifru)); 138 #endif 139 140 union ifreq_union { 141 struct ifreq ifr; 142 #ifdef COMPAT_FREEBSD32 143 struct ifreq32 ifr32; 144 #endif 145 }; 146 147 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 148 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 149 150 SYSCTL_INT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN, 151 &ifqmaxlen, 0, "max send queue size"); 152 153 /* Log link state change events */ 154 static int log_link_state_change = 1; 155 156 SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, 157 &log_link_state_change, 0, 158 "log interface link state change events"); 159 160 /* Log promiscuous mode change events */ 161 static int log_promisc_mode_change = 1; 162 163 SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RDTUN, 164 &log_promisc_mode_change, 1, 165 "log promiscuous mode change events"); 166 167 /* Interface description */ 168 static unsigned int ifdescr_maxlen = 1024; 169 SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW, 170 &ifdescr_maxlen, 0, 171 "administrative maximum length for interface description"); 172 173 static MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions"); 174 175 /* global sx for non-critical path ifdescr */ 176 static struct sx ifdescr_sx; 177 SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr"); 178 179 void (*bridge_linkstate_p)(struct ifnet *ifp); 180 void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); 181 void (*lagg_linkstate_p)(struct ifnet *ifp, int state); 182 /* These are external hooks for CARP. */ 183 void (*carp_linkstate_p)(struct ifnet *ifp); 184 void (*carp_demote_adj_p)(int, char *); 185 int (*carp_master_p)(struct ifaddr *); 186 #if defined(INET) || defined(INET6) 187 int (*carp_forus_p)(struct ifnet *ifp, u_char *dhost); 188 int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m, 189 const struct sockaddr *sa); 190 int (*carp_ioctl_p)(struct ifreq *, u_long, struct thread *); 191 int (*carp_attach_p)(struct ifaddr *, int); 192 void (*carp_detach_p)(struct ifaddr *, bool); 193 #endif 194 #ifdef INET 195 int (*carp_iamatch_p)(struct ifaddr *, uint8_t **); 196 #endif 197 #ifdef INET6 198 struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6); 199 caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m, 200 const struct in6_addr *taddr); 201 #endif 202 203 struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; 204 205 /* 206 * XXX: Style; these should be sorted alphabetically, and unprototyped 207 * static functions should be prototyped. Currently they are sorted by 208 * declaration order. 209 */ 210 static void if_attachdomain(void *); 211 static void if_attachdomain1(struct ifnet *); 212 static int ifconf(u_long, caddr_t); 213 static void if_freemulti(struct ifmultiaddr *); 214 static void if_grow(void); 215 static void if_input_default(struct ifnet *, struct mbuf *); 216 static int if_requestencap_default(struct ifnet *, struct if_encap_req *); 217 static void if_route(struct ifnet *, int flag, int fam); 218 static int if_setflag(struct ifnet *, int, int, int *, int); 219 static int if_transmit(struct ifnet *ifp, struct mbuf *m); 220 static void if_unroute(struct ifnet *, int flag, int fam); 221 static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); 222 static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *); 223 static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); 224 static void do_link_state_change(void *, int); 225 static int if_getgroup(struct ifgroupreq *, struct ifnet *); 226 static int if_getgroupmembers(struct ifgroupreq *); 227 static void if_delgroups(struct ifnet *); 228 static void if_attach_internal(struct ifnet *, int, struct if_clone *); 229 static int if_detach_internal(struct ifnet *, int, struct if_clone **); 230 #ifdef VIMAGE 231 static void if_vmove(struct ifnet *, struct vnet *); 232 #endif 233 234 #ifdef INET6 235 /* 236 * XXX: declare here to avoid to include many inet6 related files.. 237 * should be more generalized? 238 */ 239 extern void nd6_setmtu(struct ifnet *); 240 #endif 241 242 /* ipsec helper hooks */ 243 VNET_DEFINE(struct hhook_head *, ipsec_hhh_in[HHOOK_IPSEC_COUNT]); 244 VNET_DEFINE(struct hhook_head *, ipsec_hhh_out[HHOOK_IPSEC_COUNT]); 245 246 VNET_DEFINE(int, if_index); 247 int ifqmaxlen = IFQ_MAXLEN; 248 VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */ 249 VNET_DEFINE(struct ifgrouphead, ifg_head); 250 251 static VNET_DEFINE(int, if_indexlim) = 8; 252 253 /* Table of ifnet by index. */ 254 VNET_DEFINE(struct ifnet **, ifindex_table); 255 256 #define V_if_indexlim VNET(if_indexlim) 257 #define V_ifindex_table VNET(ifindex_table) 258 259 /* 260 * The global network interface list (V_ifnet) and related state (such as 261 * if_index, if_indexlim, and ifindex_table) are protected by an sxlock and 262 * an rwlock. Either may be acquired shared to stablize the list, but both 263 * must be acquired writable to modify the list. This model allows us to 264 * both stablize the interface list during interrupt thread processing, but 265 * also to stablize it over long-running ioctls, without introducing priority 266 * inversions and deadlocks. 267 */ 268 struct rwlock ifnet_rwlock; 269 RW_SYSINIT_FLAGS(ifnet_rw, &ifnet_rwlock, "ifnet_rw", RW_RECURSE); 270 struct sx ifnet_sxlock; 271 SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE); 272 273 /* 274 * The allocation of network interfaces is a rather non-atomic affair; we 275 * need to select an index before we are ready to expose the interface for 276 * use, so will use this pointer value to indicate reservation. 277 */ 278 #define IFNET_HOLD (void *)(uintptr_t)(-1) 279 280 static if_com_alloc_t *if_com_alloc[256]; 281 static if_com_free_t *if_com_free[256]; 282 283 static MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); 284 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 285 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 286 287 struct ifnet * 288 ifnet_byindex_locked(u_short idx) 289 { 290 291 if (idx > V_if_index) 292 return (NULL); 293 if (V_ifindex_table[idx] == IFNET_HOLD) 294 return (NULL); 295 return (V_ifindex_table[idx]); 296 } 297 298 struct ifnet * 299 ifnet_byindex(u_short idx) 300 { 301 struct ifnet *ifp; 302 303 IFNET_RLOCK_NOSLEEP(); 304 ifp = ifnet_byindex_locked(idx); 305 IFNET_RUNLOCK_NOSLEEP(); 306 return (ifp); 307 } 308 309 struct ifnet * 310 ifnet_byindex_ref(u_short idx) 311 { 312 struct ifnet *ifp; 313 314 IFNET_RLOCK_NOSLEEP(); 315 ifp = ifnet_byindex_locked(idx); 316 if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { 317 IFNET_RUNLOCK_NOSLEEP(); 318 return (NULL); 319 } 320 if_ref(ifp); 321 IFNET_RUNLOCK_NOSLEEP(); 322 return (ifp); 323 } 324 325 /* 326 * Allocate an ifindex array entry; return 0 on success or an error on 327 * failure. 328 */ 329 static u_short 330 ifindex_alloc(void) 331 { 332 u_short idx; 333 334 IFNET_WLOCK_ASSERT(); 335 retry: 336 /* 337 * Try to find an empty slot below V_if_index. If we fail, take the 338 * next slot. 339 */ 340 for (idx = 1; idx <= V_if_index; idx++) { 341 if (V_ifindex_table[idx] == NULL) 342 break; 343 } 344 345 /* Catch if_index overflow. */ 346 if (idx >= V_if_indexlim) { 347 if_grow(); 348 goto retry; 349 } 350 if (idx > V_if_index) 351 V_if_index = idx; 352 return (idx); 353 } 354 355 static void 356 ifindex_free_locked(u_short idx) 357 { 358 359 IFNET_WLOCK_ASSERT(); 360 361 V_ifindex_table[idx] = NULL; 362 while (V_if_index > 0 && 363 V_ifindex_table[V_if_index] == NULL) 364 V_if_index--; 365 } 366 367 static void 368 ifindex_free(u_short idx) 369 { 370 371 IFNET_WLOCK(); 372 ifindex_free_locked(idx); 373 IFNET_WUNLOCK(); 374 } 375 376 static void 377 ifnet_setbyindex_locked(u_short idx, struct ifnet *ifp) 378 { 379 380 IFNET_WLOCK_ASSERT(); 381 382 V_ifindex_table[idx] = ifp; 383 } 384 385 static void 386 ifnet_setbyindex(u_short idx, struct ifnet *ifp) 387 { 388 389 IFNET_WLOCK(); 390 ifnet_setbyindex_locked(idx, ifp); 391 IFNET_WUNLOCK(); 392 } 393 394 struct ifaddr * 395 ifaddr_byindex(u_short idx) 396 { 397 struct ifnet *ifp; 398 struct ifaddr *ifa = NULL; 399 400 IFNET_RLOCK_NOSLEEP(); 401 ifp = ifnet_byindex_locked(idx); 402 if (ifp != NULL && (ifa = ifp->if_addr) != NULL) 403 ifa_ref(ifa); 404 IFNET_RUNLOCK_NOSLEEP(); 405 return (ifa); 406 } 407 408 /* 409 * Network interface utility routines. 410 * 411 * Routines with ifa_ifwith* names take sockaddr *'s as 412 * parameters. 413 */ 414 415 static void 416 vnet_if_init(const void *unused __unused) 417 { 418 419 TAILQ_INIT(&V_ifnet); 420 TAILQ_INIT(&V_ifg_head); 421 IFNET_WLOCK(); 422 if_grow(); /* create initial table */ 423 IFNET_WUNLOCK(); 424 vnet_if_clone_init(); 425 } 426 VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_SECOND, vnet_if_init, 427 NULL); 428 429 #ifdef VIMAGE 430 static void 431 vnet_if_uninit(const void *unused __unused) 432 { 433 434 VNET_ASSERT(TAILQ_EMPTY(&V_ifnet), ("%s:%d tailq &V_ifnet=%p " 435 "not empty", __func__, __LINE__, &V_ifnet)); 436 VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head), ("%s:%d tailq &V_ifg_head=%p " 437 "not empty", __func__, __LINE__, &V_ifg_head)); 438 439 free((caddr_t)V_ifindex_table, M_IFNET); 440 } 441 VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 442 vnet_if_uninit, NULL); 443 444 static void 445 vnet_if_return(const void *unused __unused) 446 { 447 struct ifnet *ifp, *nifp; 448 449 /* Return all inherited interfaces to their parent vnets. */ 450 TAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) { 451 if (ifp->if_home_vnet != ifp->if_vnet) 452 if_vmove(ifp, ifp->if_home_vnet); 453 } 454 } 455 VNET_SYSUNINIT(vnet_if_return, SI_SUB_VNET_DONE, SI_ORDER_ANY, 456 vnet_if_return, NULL); 457 #endif 458 459 static void 460 if_grow(void) 461 { 462 int oldlim; 463 u_int n; 464 struct ifnet **e; 465 466 IFNET_WLOCK_ASSERT(); 467 oldlim = V_if_indexlim; 468 IFNET_WUNLOCK(); 469 n = (oldlim << 1) * sizeof(*e); 470 e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); 471 IFNET_WLOCK(); 472 if (V_if_indexlim != oldlim) { 473 free(e, M_IFNET); 474 return; 475 } 476 if (V_ifindex_table != NULL) { 477 memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); 478 free((caddr_t)V_ifindex_table, M_IFNET); 479 } 480 V_if_indexlim <<= 1; 481 V_ifindex_table = e; 482 } 483 484 /* 485 * Allocate a struct ifnet and an index for an interface. A layer 2 486 * common structure will also be allocated if an allocation routine is 487 * registered for the passed type. 488 */ 489 struct ifnet * 490 if_alloc(u_char type) 491 { 492 struct ifnet *ifp; 493 u_short idx; 494 495 ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO); 496 IFNET_WLOCK(); 497 idx = ifindex_alloc(); 498 ifnet_setbyindex_locked(idx, IFNET_HOLD); 499 IFNET_WUNLOCK(); 500 ifp->if_index = idx; 501 ifp->if_type = type; 502 ifp->if_alloctype = type; 503 #ifdef VIMAGE 504 ifp->if_vnet = curvnet; 505 #endif 506 if (if_com_alloc[type] != NULL) { 507 ifp->if_l2com = if_com_alloc[type](type, ifp); 508 if (ifp->if_l2com == NULL) { 509 free(ifp, M_IFNET); 510 ifindex_free(idx); 511 return (NULL); 512 } 513 } 514 515 IF_ADDR_LOCK_INIT(ifp); 516 TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); 517 ifp->if_afdata_initialized = 0; 518 IF_AFDATA_LOCK_INIT(ifp); 519 TAILQ_INIT(&ifp->if_addrhead); 520 TAILQ_INIT(&ifp->if_multiaddrs); 521 TAILQ_INIT(&ifp->if_groups); 522 #ifdef MAC 523 mac_ifnet_init(ifp); 524 #endif 525 ifq_init(&ifp->if_snd, ifp); 526 527 refcount_init(&ifp->if_refcount, 1); /* Index reference. */ 528 for (int i = 0; i < IFCOUNTERS; i++) 529 ifp->if_counters[i] = counter_u64_alloc(M_WAITOK); 530 ifp->if_get_counter = if_get_counter_default; 531 ifp->if_pcp = IFNET_PCP_NONE; 532 ifnet_setbyindex(ifp->if_index, ifp); 533 return (ifp); 534 } 535 536 /* 537 * Do the actual work of freeing a struct ifnet, and layer 2 common 538 * structure. This call is made when the last reference to an 539 * interface is released. 540 */ 541 static void 542 if_free_internal(struct ifnet *ifp) 543 { 544 545 KASSERT((ifp->if_flags & IFF_DYING), 546 ("if_free_internal: interface not dying")); 547 548 if (if_com_free[ifp->if_alloctype] != NULL) 549 if_com_free[ifp->if_alloctype](ifp->if_l2com, 550 ifp->if_alloctype); 551 552 #ifdef MAC 553 mac_ifnet_destroy(ifp); 554 #endif /* MAC */ 555 if (ifp->if_description != NULL) 556 free(ifp->if_description, M_IFDESCR); 557 IF_AFDATA_DESTROY(ifp); 558 IF_ADDR_LOCK_DESTROY(ifp); 559 ifq_delete(&ifp->if_snd); 560 561 for (int i = 0; i < IFCOUNTERS; i++) 562 counter_u64_free(ifp->if_counters[i]); 563 564 free(ifp, M_IFNET); 565 } 566 567 /* 568 * Deregister an interface and free the associated storage. 569 */ 570 void 571 if_free(struct ifnet *ifp) 572 { 573 574 ifp->if_flags |= IFF_DYING; /* XXX: Locking */ 575 576 CURVNET_SET_QUIET(ifp->if_vnet); 577 IFNET_WLOCK(); 578 KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), 579 ("%s: freeing unallocated ifnet", ifp->if_xname)); 580 581 ifindex_free_locked(ifp->if_index); 582 IFNET_WUNLOCK(); 583 584 if (refcount_release(&ifp->if_refcount)) 585 if_free_internal(ifp); 586 CURVNET_RESTORE(); 587 } 588 589 /* 590 * Interfaces to keep an ifnet type-stable despite the possibility of the 591 * driver calling if_free(). If there are additional references, we defer 592 * freeing the underlying data structure. 593 */ 594 void 595 if_ref(struct ifnet *ifp) 596 { 597 598 /* We don't assert the ifnet list lock here, but arguably should. */ 599 refcount_acquire(&ifp->if_refcount); 600 } 601 602 void 603 if_rele(struct ifnet *ifp) 604 { 605 606 if (!refcount_release(&ifp->if_refcount)) 607 return; 608 if_free_internal(ifp); 609 } 610 611 void 612 ifq_init(struct ifaltq *ifq, struct ifnet *ifp) 613 { 614 615 mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF); 616 617 if (ifq->ifq_maxlen == 0) 618 ifq->ifq_maxlen = ifqmaxlen; 619 620 ifq->altq_type = 0; 621 ifq->altq_disc = NULL; 622 ifq->altq_flags &= ALTQF_CANTCHANGE; 623 ifq->altq_tbr = NULL; 624 ifq->altq_ifp = ifp; 625 } 626 627 void 628 ifq_delete(struct ifaltq *ifq) 629 { 630 mtx_destroy(&ifq->ifq_mtx); 631 } 632 633 /* 634 * Perform generic interface initialization tasks and attach the interface 635 * to the list of "active" interfaces. If vmove flag is set on entry 636 * to if_attach_internal(), perform only a limited subset of initialization 637 * tasks, given that we are moving from one vnet to another an ifnet which 638 * has already been fully initialized. 639 * 640 * Note that if_detach_internal() removes group membership unconditionally 641 * even when vmove flag is set, and if_attach_internal() adds only IFG_ALL. 642 * Thus, when if_vmove() is applied to a cloned interface, group membership 643 * is lost while a cloned one always joins a group whose name is 644 * ifc->ifc_name. To recover this after if_detach_internal() and 645 * if_attach_internal(), the cloner should be specified to 646 * if_attach_internal() via ifc. If it is non-NULL, if_attach_internal() 647 * attempts to join a group whose name is ifc->ifc_name. 648 * 649 * XXX: 650 * - The decision to return void and thus require this function to 651 * succeed is questionable. 652 * - We should probably do more sanity checking. For instance we don't 653 * do anything to insure if_xname is unique or non-empty. 654 */ 655 void 656 if_attach(struct ifnet *ifp) 657 { 658 659 if_attach_internal(ifp, 0, NULL); 660 } 661 662 /* 663 * Compute the least common TSO limit. 664 */ 665 void 666 if_hw_tsomax_common(if_t ifp, struct ifnet_hw_tsomax *pmax) 667 { 668 /* 669 * 1) If there is no limit currently, take the limit from 670 * the network adapter. 671 * 672 * 2) If the network adapter has a limit below the current 673 * limit, apply it. 674 */ 675 if (pmax->tsomaxbytes == 0 || (ifp->if_hw_tsomax != 0 && 676 ifp->if_hw_tsomax < pmax->tsomaxbytes)) { 677 pmax->tsomaxbytes = ifp->if_hw_tsomax; 678 } 679 if (pmax->tsomaxsegcount == 0 || (ifp->if_hw_tsomaxsegcount != 0 && 680 ifp->if_hw_tsomaxsegcount < pmax->tsomaxsegcount)) { 681 pmax->tsomaxsegcount = ifp->if_hw_tsomaxsegcount; 682 } 683 if (pmax->tsomaxsegsize == 0 || (ifp->if_hw_tsomaxsegsize != 0 && 684 ifp->if_hw_tsomaxsegsize < pmax->tsomaxsegsize)) { 685 pmax->tsomaxsegsize = ifp->if_hw_tsomaxsegsize; 686 } 687 } 688 689 /* 690 * Update TSO limit of a network adapter. 691 * 692 * Returns zero if no change. Else non-zero. 693 */ 694 int 695 if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *pmax) 696 { 697 int retval = 0; 698 if (ifp->if_hw_tsomax != pmax->tsomaxbytes) { 699 ifp->if_hw_tsomax = pmax->tsomaxbytes; 700 retval++; 701 } 702 if (ifp->if_hw_tsomaxsegsize != pmax->tsomaxsegsize) { 703 ifp->if_hw_tsomaxsegsize = pmax->tsomaxsegsize; 704 retval++; 705 } 706 if (ifp->if_hw_tsomaxsegcount != pmax->tsomaxsegcount) { 707 ifp->if_hw_tsomaxsegcount = pmax->tsomaxsegcount; 708 retval++; 709 } 710 return (retval); 711 } 712 713 static void 714 if_attach_internal(struct ifnet *ifp, int vmove, struct if_clone *ifc) 715 { 716 unsigned socksize, ifasize; 717 int namelen, masklen; 718 struct sockaddr_dl *sdl; 719 struct ifaddr *ifa; 720 721 if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) 722 panic ("%s: BUG: if_attach called without if_alloc'd input()\n", 723 ifp->if_xname); 724 725 #ifdef VIMAGE 726 ifp->if_vnet = curvnet; 727 if (ifp->if_home_vnet == NULL) 728 ifp->if_home_vnet = curvnet; 729 #endif 730 731 if_addgroup(ifp, IFG_ALL); 732 733 /* Restore group membership for cloned interfaces. */ 734 if (vmove && ifc != NULL) 735 if_clone_addgroup(ifp, ifc); 736 737 getmicrotime(&ifp->if_lastchange); 738 ifp->if_epoch = time_uptime; 739 740 KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) || 741 (ifp->if_transmit != NULL && ifp->if_qflush != NULL), 742 ("transmit and qflush must both either be set or both be NULL")); 743 if (ifp->if_transmit == NULL) { 744 ifp->if_transmit = if_transmit; 745 ifp->if_qflush = if_qflush; 746 } 747 if (ifp->if_input == NULL) 748 ifp->if_input = if_input_default; 749 750 if (ifp->if_requestencap == NULL) 751 ifp->if_requestencap = if_requestencap_default; 752 753 if (!vmove) { 754 #ifdef MAC 755 mac_ifnet_create(ifp); 756 #endif 757 758 /* 759 * Create a Link Level name for this device. 760 */ 761 namelen = strlen(ifp->if_xname); 762 /* 763 * Always save enough space for any possiable name so we 764 * can do a rename in place later. 765 */ 766 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; 767 socksize = masklen + ifp->if_addrlen; 768 if (socksize < sizeof(*sdl)) 769 socksize = sizeof(*sdl); 770 socksize = roundup2(socksize, sizeof(long)); 771 ifasize = sizeof(*ifa) + 2 * socksize; 772 ifa = ifa_alloc(ifasize, M_WAITOK); 773 sdl = (struct sockaddr_dl *)(ifa + 1); 774 sdl->sdl_len = socksize; 775 sdl->sdl_family = AF_LINK; 776 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 777 sdl->sdl_nlen = namelen; 778 sdl->sdl_index = ifp->if_index; 779 sdl->sdl_type = ifp->if_type; 780 ifp->if_addr = ifa; 781 ifa->ifa_ifp = ifp; 782 ifa->ifa_rtrequest = link_rtrequest; 783 ifa->ifa_addr = (struct sockaddr *)sdl; 784 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 785 ifa->ifa_netmask = (struct sockaddr *)sdl; 786 sdl->sdl_len = masklen; 787 while (namelen != 0) 788 sdl->sdl_data[--namelen] = 0xff; 789 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); 790 /* Reliably crash if used uninitialized. */ 791 ifp->if_broadcastaddr = NULL; 792 793 if (ifp->if_type == IFT_ETHER) { 794 ifp->if_hw_addr = malloc(ifp->if_addrlen, M_IFADDR, 795 M_WAITOK | M_ZERO); 796 } 797 798 #if defined(INET) || defined(INET6) 799 /* Use defaults for TSO, if nothing is set */ 800 if (ifp->if_hw_tsomax == 0 && 801 ifp->if_hw_tsomaxsegcount == 0 && 802 ifp->if_hw_tsomaxsegsize == 0) { 803 /* 804 * The TSO defaults needs to be such that an 805 * NFS mbuf list of 35 mbufs totalling just 806 * below 64K works and that a chain of mbufs 807 * can be defragged into at most 32 segments: 808 */ 809 ifp->if_hw_tsomax = min(IP_MAXPACKET, (32 * MCLBYTES) - 810 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 811 ifp->if_hw_tsomaxsegcount = 35; 812 ifp->if_hw_tsomaxsegsize = 2048; /* 2K */ 813 814 /* XXX some drivers set IFCAP_TSO after ethernet attach */ 815 if (ifp->if_capabilities & IFCAP_TSO) { 816 if_printf(ifp, "Using defaults for TSO: %u/%u/%u\n", 817 ifp->if_hw_tsomax, 818 ifp->if_hw_tsomaxsegcount, 819 ifp->if_hw_tsomaxsegsize); 820 } 821 } 822 #endif 823 } 824 #ifdef VIMAGE 825 else { 826 /* 827 * Update the interface index in the link layer address 828 * of the interface. 829 */ 830 for (ifa = ifp->if_addr; ifa != NULL; 831 ifa = TAILQ_NEXT(ifa, ifa_link)) { 832 if (ifa->ifa_addr->sa_family == AF_LINK) { 833 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 834 sdl->sdl_index = ifp->if_index; 835 } 836 } 837 } 838 #endif 839 840 IFNET_WLOCK(); 841 TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); 842 #ifdef VIMAGE 843 curvnet->vnet_ifcnt++; 844 #endif 845 IFNET_WUNLOCK(); 846 847 if (domain_init_status >= 2) 848 if_attachdomain1(ifp); 849 850 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 851 if (IS_DEFAULT_VNET(curvnet)) 852 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 853 854 /* Announce the interface. */ 855 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 856 } 857 858 static void 859 if_attachdomain(void *dummy) 860 { 861 struct ifnet *ifp; 862 863 TAILQ_FOREACH(ifp, &V_ifnet, if_link) 864 if_attachdomain1(ifp); 865 } 866 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, 867 if_attachdomain, NULL); 868 869 static void 870 if_attachdomain1(struct ifnet *ifp) 871 { 872 struct domain *dp; 873 874 /* 875 * Since dp->dom_ifattach calls malloc() with M_WAITOK, we 876 * cannot lock ifp->if_afdata initialization, entirely. 877 */ 878 IF_AFDATA_LOCK(ifp); 879 if (ifp->if_afdata_initialized >= domain_init_status) { 880 IF_AFDATA_UNLOCK(ifp); 881 log(LOG_WARNING, "%s called more than once on %s\n", 882 __func__, ifp->if_xname); 883 return; 884 } 885 ifp->if_afdata_initialized = domain_init_status; 886 IF_AFDATA_UNLOCK(ifp); 887 888 /* address family dependent data region */ 889 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 890 for (dp = domains; dp; dp = dp->dom_next) { 891 if (dp->dom_ifattach) 892 ifp->if_afdata[dp->dom_family] = 893 (*dp->dom_ifattach)(ifp); 894 } 895 } 896 897 /* 898 * Remove any unicast or broadcast network addresses from an interface. 899 */ 900 void 901 if_purgeaddrs(struct ifnet *ifp) 902 { 903 struct ifaddr *ifa, *next; 904 905 /* XXX cannot hold IF_ADDR_WLOCK over called functions. */ 906 TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) { 907 if (ifa->ifa_addr->sa_family == AF_LINK) 908 continue; 909 #ifdef INET 910 /* XXX: Ugly!! ad hoc just for INET */ 911 if (ifa->ifa_addr->sa_family == AF_INET) { 912 struct ifaliasreq ifr; 913 914 bzero(&ifr, sizeof(ifr)); 915 ifr.ifra_addr = *ifa->ifa_addr; 916 if (ifa->ifa_dstaddr) 917 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 918 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, 919 NULL) == 0) 920 continue; 921 } 922 #endif /* INET */ 923 #ifdef INET6 924 if (ifa->ifa_addr->sa_family == AF_INET6) { 925 in6_purgeaddr(ifa); 926 /* ifp_addrhead is already updated */ 927 continue; 928 } 929 #endif /* INET6 */ 930 IF_ADDR_WLOCK(ifp); 931 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 932 IF_ADDR_WUNLOCK(ifp); 933 ifa_free(ifa); 934 } 935 } 936 937 /* 938 * Remove any multicast network addresses from an interface when an ifnet 939 * is going away. 940 */ 941 static void 942 if_purgemaddrs(struct ifnet *ifp) 943 { 944 struct ifmultiaddr *ifma; 945 struct ifmultiaddr *next; 946 947 IF_ADDR_WLOCK(ifp); 948 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) 949 if_delmulti_locked(ifp, ifma, 1); 950 IF_ADDR_WUNLOCK(ifp); 951 } 952 953 /* 954 * Detach an interface, removing it from the list of "active" interfaces. 955 * If vmove flag is set on entry to if_detach_internal(), perform only a 956 * limited subset of cleanup tasks, given that we are moving an ifnet from 957 * one vnet to another, where it must be fully operational. 958 * 959 * XXXRW: There are some significant questions about event ordering, and 960 * how to prevent things from starting to use the interface during detach. 961 */ 962 void 963 if_detach(struct ifnet *ifp) 964 { 965 966 CURVNET_SET_QUIET(ifp->if_vnet); 967 if_detach_internal(ifp, 0, NULL); 968 CURVNET_RESTORE(); 969 } 970 971 /* 972 * The vmove flag, if set, indicates that we are called from a callpath 973 * that is moving an interface to a different vnet instance. 974 * 975 * The shutdown flag, if set, indicates that we are called in the 976 * process of shutting down a vnet instance. Currently only the 977 * vnet_if_return SYSUNINIT function sets it. Note: we can be called 978 * on a vnet instance shutdown without this flag being set, e.g., when 979 * the cloned interfaces are destoyed as first thing of teardown. 980 */ 981 static int 982 if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp) 983 { 984 struct ifaddr *ifa; 985 int i; 986 struct domain *dp; 987 struct ifnet *iter; 988 int found = 0; 989 #ifdef VIMAGE 990 int shutdown; 991 992 shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && 993 ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; 994 #endif 995 IFNET_WLOCK(); 996 TAILQ_FOREACH(iter, &V_ifnet, if_link) 997 if (iter == ifp) { 998 TAILQ_REMOVE(&V_ifnet, ifp, if_link); 999 found = 1; 1000 break; 1001 } 1002 IFNET_WUNLOCK(); 1003 if (!found) { 1004 /* 1005 * While we would want to panic here, we cannot 1006 * guarantee that the interface is indeed still on 1007 * the list given we don't hold locks all the way. 1008 */ 1009 return (ENOENT); 1010 #if 0 1011 if (vmove) 1012 panic("%s: ifp=%p not on the ifnet tailq %p", 1013 __func__, ifp, &V_ifnet); 1014 else 1015 return; /* XXX this should panic as well? */ 1016 #endif 1017 } 1018 1019 /* 1020 * At this point we know the interface still was on the ifnet list 1021 * and we removed it so we are in a stable state. 1022 */ 1023 #ifdef VIMAGE 1024 curvnet->vnet_ifcnt--; 1025 #endif 1026 1027 /* 1028 * In any case (destroy or vmove) detach us from the groups 1029 * and remove/wait for pending events on the taskq. 1030 * XXX-BZ in theory an interface could still enqueue a taskq change? 1031 */ 1032 if_delgroups(ifp); 1033 1034 taskqueue_drain(taskqueue_swi, &ifp->if_linktask); 1035 1036 /* 1037 * Check if this is a cloned interface or not. Must do even if 1038 * shutting down as a if_vmove_reclaim() would move the ifp and 1039 * the if_clone_addgroup() will have a corrupted string overwise 1040 * from a gibberish pointer. 1041 */ 1042 if (vmove && ifcp != NULL) 1043 *ifcp = if_clone_findifc(ifp); 1044 1045 if_down(ifp); 1046 1047 #ifdef VIMAGE 1048 /* 1049 * On VNET shutdown abort here as the stack teardown will do all 1050 * the work top-down for us. 1051 */ 1052 if (shutdown) { 1053 /* 1054 * In case of a vmove we are done here without error. 1055 * If we would signal an error it would lead to the same 1056 * abort as if we did not find the ifnet anymore. 1057 * if_detach() calls us in void context and does not care 1058 * about an early abort notification, so life is splendid :) 1059 */ 1060 goto finish_vnet_shutdown; 1061 } 1062 #endif 1063 1064 /* 1065 * At this point we are not tearing down a VNET and are either 1066 * going to destroy or vmove the interface and have to cleanup 1067 * accordingly. 1068 */ 1069 1070 /* 1071 * Remove routes and flush queues. 1072 */ 1073 #ifdef ALTQ 1074 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 1075 altq_disable(&ifp->if_snd); 1076 if (ALTQ_IS_ATTACHED(&ifp->if_snd)) 1077 altq_detach(&ifp->if_snd); 1078 #endif 1079 1080 if_purgeaddrs(ifp); 1081 1082 #ifdef INET 1083 in_ifdetach(ifp); 1084 #endif 1085 1086 #ifdef INET6 1087 /* 1088 * Remove all IPv6 kernel structs related to ifp. This should be done 1089 * before removing routing entries below, since IPv6 interface direct 1090 * routes are expected to be removed by the IPv6-specific kernel API. 1091 * Otherwise, the kernel will detect some inconsistency and bark it. 1092 */ 1093 in6_ifdetach(ifp); 1094 #endif 1095 if_purgemaddrs(ifp); 1096 1097 /* Announce that the interface is gone. */ 1098 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1099 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 1100 if (IS_DEFAULT_VNET(curvnet)) 1101 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 1102 1103 if (!vmove) { 1104 /* 1105 * Prevent further calls into the device driver via ifnet. 1106 */ 1107 if_dead(ifp); 1108 1109 /* 1110 * Remove link ifaddr pointer and maybe decrement if_index. 1111 * Clean up all addresses. 1112 */ 1113 free(ifp->if_hw_addr, M_IFADDR); 1114 ifp->if_hw_addr = NULL; 1115 ifp->if_addr = NULL; 1116 1117 /* We can now free link ifaddr. */ 1118 IF_ADDR_WLOCK(ifp); 1119 if (!TAILQ_EMPTY(&ifp->if_addrhead)) { 1120 ifa = TAILQ_FIRST(&ifp->if_addrhead); 1121 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 1122 IF_ADDR_WUNLOCK(ifp); 1123 ifa_free(ifa); 1124 } else 1125 IF_ADDR_WUNLOCK(ifp); 1126 } 1127 1128 rt_flushifroutes(ifp); 1129 1130 #ifdef VIMAGE 1131 finish_vnet_shutdown: 1132 #endif 1133 /* 1134 * We cannot hold the lock over dom_ifdetach calls as they might 1135 * sleep, for example trying to drain a callout, thus open up the 1136 * theoretical race with re-attaching. 1137 */ 1138 IF_AFDATA_LOCK(ifp); 1139 i = ifp->if_afdata_initialized; 1140 ifp->if_afdata_initialized = 0; 1141 IF_AFDATA_UNLOCK(ifp); 1142 for (dp = domains; i > 0 && dp; dp = dp->dom_next) { 1143 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) { 1144 (*dp->dom_ifdetach)(ifp, 1145 ifp->if_afdata[dp->dom_family]); 1146 ifp->if_afdata[dp->dom_family] = NULL; 1147 } 1148 } 1149 1150 return (0); 1151 } 1152 1153 #ifdef VIMAGE 1154 /* 1155 * if_vmove() performs a limited version of if_detach() in current 1156 * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. 1157 * An attempt is made to shrink if_index in current vnet, find an 1158 * unused if_index in target vnet and calls if_grow() if necessary, 1159 * and finally find an unused if_xname for the target vnet. 1160 */ 1161 static void 1162 if_vmove(struct ifnet *ifp, struct vnet *new_vnet) 1163 { 1164 struct if_clone *ifc; 1165 u_int bif_dlt, bif_hdrlen; 1166 int rc; 1167 1168 /* 1169 * if_detach_internal() will call the eventhandler to notify 1170 * interface departure. That will detach if_bpf. We need to 1171 * safe the dlt and hdrlen so we can re-attach it later. 1172 */ 1173 bpf_get_bp_params(ifp->if_bpf, &bif_dlt, &bif_hdrlen); 1174 1175 /* 1176 * Detach from current vnet, but preserve LLADDR info, do not 1177 * mark as dead etc. so that the ifnet can be reattached later. 1178 * If we cannot find it, we lost the race to someone else. 1179 */ 1180 rc = if_detach_internal(ifp, 1, &ifc); 1181 if (rc != 0) 1182 return; 1183 1184 /* 1185 * Unlink the ifnet from ifindex_table[] in current vnet, and shrink 1186 * the if_index for that vnet if possible. 1187 * 1188 * NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized, 1189 * or we'd lock on one vnet and unlock on another. 1190 */ 1191 IFNET_WLOCK(); 1192 ifindex_free_locked(ifp->if_index); 1193 IFNET_WUNLOCK(); 1194 1195 /* 1196 * Perform interface-specific reassignment tasks, if provided by 1197 * the driver. 1198 */ 1199 if (ifp->if_reassign != NULL) 1200 ifp->if_reassign(ifp, new_vnet, NULL); 1201 1202 /* 1203 * Switch to the context of the target vnet. 1204 */ 1205 CURVNET_SET_QUIET(new_vnet); 1206 1207 IFNET_WLOCK(); 1208 ifp->if_index = ifindex_alloc(); 1209 ifnet_setbyindex_locked(ifp->if_index, ifp); 1210 IFNET_WUNLOCK(); 1211 1212 if_attach_internal(ifp, 1, ifc); 1213 1214 if (ifp->if_bpf == NULL) 1215 bpfattach(ifp, bif_dlt, bif_hdrlen); 1216 1217 CURVNET_RESTORE(); 1218 } 1219 1220 /* 1221 * Move an ifnet to or from another child prison/vnet, specified by the jail id. 1222 */ 1223 static int 1224 if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid) 1225 { 1226 struct prison *pr; 1227 struct ifnet *difp; 1228 int shutdown; 1229 1230 /* Try to find the prison within our visibility. */ 1231 sx_slock(&allprison_lock); 1232 pr = prison_find_child(td->td_ucred->cr_prison, jid); 1233 sx_sunlock(&allprison_lock); 1234 if (pr == NULL) 1235 return (ENXIO); 1236 prison_hold_locked(pr); 1237 mtx_unlock(&pr->pr_mtx); 1238 1239 /* Do not try to move the iface from and to the same prison. */ 1240 if (pr->pr_vnet == ifp->if_vnet) { 1241 prison_free(pr); 1242 return (EEXIST); 1243 } 1244 1245 /* Make sure the named iface does not exists in the dst. prison/vnet. */ 1246 /* XXX Lock interfaces to avoid races. */ 1247 CURVNET_SET_QUIET(pr->pr_vnet); 1248 difp = ifunit(ifname); 1249 if (difp != NULL) { 1250 CURVNET_RESTORE(); 1251 prison_free(pr); 1252 return (EEXIST); 1253 } 1254 1255 /* Make sure the VNET is stable. */ 1256 shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && 1257 ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; 1258 if (shutdown) { 1259 CURVNET_RESTORE(); 1260 prison_free(pr); 1261 return (EBUSY); 1262 } 1263 CURVNET_RESTORE(); 1264 1265 /* Move the interface into the child jail/vnet. */ 1266 if_vmove(ifp, pr->pr_vnet); 1267 1268 /* Report the new if_xname back to the userland. */ 1269 sprintf(ifname, "%s", ifp->if_xname); 1270 1271 prison_free(pr); 1272 return (0); 1273 } 1274 1275 static int 1276 if_vmove_reclaim(struct thread *td, char *ifname, int jid) 1277 { 1278 struct prison *pr; 1279 struct vnet *vnet_dst; 1280 struct ifnet *ifp; 1281 int shutdown; 1282 1283 /* Try to find the prison within our visibility. */ 1284 sx_slock(&allprison_lock); 1285 pr = prison_find_child(td->td_ucred->cr_prison, jid); 1286 sx_sunlock(&allprison_lock); 1287 if (pr == NULL) 1288 return (ENXIO); 1289 prison_hold_locked(pr); 1290 mtx_unlock(&pr->pr_mtx); 1291 1292 /* Make sure the named iface exists in the source prison/vnet. */ 1293 CURVNET_SET(pr->pr_vnet); 1294 ifp = ifunit(ifname); /* XXX Lock to avoid races. */ 1295 if (ifp == NULL) { 1296 CURVNET_RESTORE(); 1297 prison_free(pr); 1298 return (ENXIO); 1299 } 1300 1301 /* Do not try to move the iface from and to the same prison. */ 1302 vnet_dst = TD_TO_VNET(td); 1303 if (vnet_dst == ifp->if_vnet) { 1304 CURVNET_RESTORE(); 1305 prison_free(pr); 1306 return (EEXIST); 1307 } 1308 1309 /* Make sure the VNET is stable. */ 1310 shutdown = (ifp->if_vnet->vnet_state > SI_SUB_VNET && 1311 ifp->if_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; 1312 if (shutdown) { 1313 CURVNET_RESTORE(); 1314 prison_free(pr); 1315 return (EBUSY); 1316 } 1317 1318 /* Get interface back from child jail/vnet. */ 1319 if_vmove(ifp, vnet_dst); 1320 CURVNET_RESTORE(); 1321 1322 /* Report the new if_xname back to the userland. */ 1323 sprintf(ifname, "%s", ifp->if_xname); 1324 1325 prison_free(pr); 1326 return (0); 1327 } 1328 #endif /* VIMAGE */ 1329 1330 /* 1331 * Add a group to an interface 1332 */ 1333 int 1334 if_addgroup(struct ifnet *ifp, const char *groupname) 1335 { 1336 struct ifg_list *ifgl; 1337 struct ifg_group *ifg = NULL; 1338 struct ifg_member *ifgm; 1339 int new = 0; 1340 1341 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1342 groupname[strlen(groupname) - 1] <= '9') 1343 return (EINVAL); 1344 1345 IFNET_WLOCK(); 1346 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1347 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { 1348 IFNET_WUNLOCK(); 1349 return (EEXIST); 1350 } 1351 1352 if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, 1353 M_NOWAIT)) == NULL) { 1354 IFNET_WUNLOCK(); 1355 return (ENOMEM); 1356 } 1357 1358 if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), 1359 M_TEMP, M_NOWAIT)) == NULL) { 1360 free(ifgl, M_TEMP); 1361 IFNET_WUNLOCK(); 1362 return (ENOMEM); 1363 } 1364 1365 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1366 if (!strcmp(ifg->ifg_group, groupname)) 1367 break; 1368 1369 if (ifg == NULL) { 1370 if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), 1371 M_TEMP, M_NOWAIT)) == NULL) { 1372 free(ifgl, M_TEMP); 1373 free(ifgm, M_TEMP); 1374 IFNET_WUNLOCK(); 1375 return (ENOMEM); 1376 } 1377 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1378 ifg->ifg_refcnt = 0; 1379 TAILQ_INIT(&ifg->ifg_members); 1380 TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); 1381 new = 1; 1382 } 1383 1384 ifg->ifg_refcnt++; 1385 ifgl->ifgl_group = ifg; 1386 ifgm->ifgm_ifp = ifp; 1387 1388 IF_ADDR_WLOCK(ifp); 1389 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1390 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1391 IF_ADDR_WUNLOCK(ifp); 1392 1393 IFNET_WUNLOCK(); 1394 1395 if (new) 1396 EVENTHANDLER_INVOKE(group_attach_event, ifg); 1397 EVENTHANDLER_INVOKE(group_change_event, groupname); 1398 1399 return (0); 1400 } 1401 1402 /* 1403 * Remove a group from an interface 1404 */ 1405 int 1406 if_delgroup(struct ifnet *ifp, const char *groupname) 1407 { 1408 struct ifg_list *ifgl; 1409 struct ifg_member *ifgm; 1410 1411 IFNET_WLOCK(); 1412 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1413 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1414 break; 1415 if (ifgl == NULL) { 1416 IFNET_WUNLOCK(); 1417 return (ENOENT); 1418 } 1419 1420 IF_ADDR_WLOCK(ifp); 1421 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1422 IF_ADDR_WUNLOCK(ifp); 1423 1424 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1425 if (ifgm->ifgm_ifp == ifp) 1426 break; 1427 1428 if (ifgm != NULL) { 1429 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1430 free(ifgm, M_TEMP); 1431 } 1432 1433 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1434 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1435 IFNET_WUNLOCK(); 1436 EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); 1437 free(ifgl->ifgl_group, M_TEMP); 1438 } else 1439 IFNET_WUNLOCK(); 1440 1441 free(ifgl, M_TEMP); 1442 1443 EVENTHANDLER_INVOKE(group_change_event, groupname); 1444 1445 return (0); 1446 } 1447 1448 /* 1449 * Remove an interface from all groups 1450 */ 1451 static void 1452 if_delgroups(struct ifnet *ifp) 1453 { 1454 struct ifg_list *ifgl; 1455 struct ifg_member *ifgm; 1456 char groupname[IFNAMSIZ]; 1457 1458 IFNET_WLOCK(); 1459 while (!TAILQ_EMPTY(&ifp->if_groups)) { 1460 ifgl = TAILQ_FIRST(&ifp->if_groups); 1461 1462 strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); 1463 1464 IF_ADDR_WLOCK(ifp); 1465 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1466 IF_ADDR_WUNLOCK(ifp); 1467 1468 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1469 if (ifgm->ifgm_ifp == ifp) 1470 break; 1471 1472 if (ifgm != NULL) { 1473 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, 1474 ifgm_next); 1475 free(ifgm, M_TEMP); 1476 } 1477 1478 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1479 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1480 IFNET_WUNLOCK(); 1481 EVENTHANDLER_INVOKE(group_detach_event, 1482 ifgl->ifgl_group); 1483 free(ifgl->ifgl_group, M_TEMP); 1484 } else 1485 IFNET_WUNLOCK(); 1486 1487 free(ifgl, M_TEMP); 1488 1489 EVENTHANDLER_INVOKE(group_change_event, groupname); 1490 1491 IFNET_WLOCK(); 1492 } 1493 IFNET_WUNLOCK(); 1494 } 1495 1496 /* 1497 * Stores all groups from an interface in memory pointed 1498 * to by data 1499 */ 1500 static int 1501 if_getgroup(struct ifgroupreq *data, struct ifnet *ifp) 1502 { 1503 int len, error; 1504 struct ifg_list *ifgl; 1505 struct ifg_req ifgrq, *ifgp; 1506 struct ifgroupreq *ifgr = data; 1507 1508 if (ifgr->ifgr_len == 0) { 1509 IF_ADDR_RLOCK(ifp); 1510 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1511 ifgr->ifgr_len += sizeof(struct ifg_req); 1512 IF_ADDR_RUNLOCK(ifp); 1513 return (0); 1514 } 1515 1516 len = ifgr->ifgr_len; 1517 ifgp = ifgr->ifgr_groups; 1518 /* XXX: wire */ 1519 IF_ADDR_RLOCK(ifp); 1520 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1521 if (len < sizeof(ifgrq)) { 1522 IF_ADDR_RUNLOCK(ifp); 1523 return (EINVAL); 1524 } 1525 bzero(&ifgrq, sizeof ifgrq); 1526 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1527 sizeof(ifgrq.ifgrq_group)); 1528 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1529 IF_ADDR_RUNLOCK(ifp); 1530 return (error); 1531 } 1532 len -= sizeof(ifgrq); 1533 ifgp++; 1534 } 1535 IF_ADDR_RUNLOCK(ifp); 1536 1537 return (0); 1538 } 1539 1540 /* 1541 * Stores all members of a group in memory pointed to by data 1542 */ 1543 static int 1544 if_getgroupmembers(struct ifgroupreq *data) 1545 { 1546 struct ifgroupreq *ifgr = data; 1547 struct ifg_group *ifg; 1548 struct ifg_member *ifgm; 1549 struct ifg_req ifgrq, *ifgp; 1550 int len, error; 1551 1552 IFNET_RLOCK(); 1553 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1554 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1555 break; 1556 if (ifg == NULL) { 1557 IFNET_RUNLOCK(); 1558 return (ENOENT); 1559 } 1560 1561 if (ifgr->ifgr_len == 0) { 1562 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1563 ifgr->ifgr_len += sizeof(ifgrq); 1564 IFNET_RUNLOCK(); 1565 return (0); 1566 } 1567 1568 len = ifgr->ifgr_len; 1569 ifgp = ifgr->ifgr_groups; 1570 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1571 if (len < sizeof(ifgrq)) { 1572 IFNET_RUNLOCK(); 1573 return (EINVAL); 1574 } 1575 bzero(&ifgrq, sizeof ifgrq); 1576 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1577 sizeof(ifgrq.ifgrq_member)); 1578 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1579 IFNET_RUNLOCK(); 1580 return (error); 1581 } 1582 len -= sizeof(ifgrq); 1583 ifgp++; 1584 } 1585 IFNET_RUNLOCK(); 1586 1587 return (0); 1588 } 1589 1590 /* 1591 * Return counter values from counter(9)s stored in ifnet. 1592 */ 1593 uint64_t 1594 if_get_counter_default(struct ifnet *ifp, ift_counter cnt) 1595 { 1596 1597 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); 1598 1599 return (counter_u64_fetch(ifp->if_counters[cnt])); 1600 } 1601 1602 /* 1603 * Increase an ifnet counter. Usually used for counters shared 1604 * between the stack and a driver, but function supports them all. 1605 */ 1606 void 1607 if_inc_counter(struct ifnet *ifp, ift_counter cnt, int64_t inc) 1608 { 1609 1610 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt)); 1611 1612 counter_u64_add(ifp->if_counters[cnt], inc); 1613 } 1614 1615 /* 1616 * Copy data from ifnet to userland API structure if_data. 1617 */ 1618 void 1619 if_data_copy(struct ifnet *ifp, struct if_data *ifd) 1620 { 1621 1622 ifd->ifi_type = ifp->if_type; 1623 ifd->ifi_physical = 0; 1624 ifd->ifi_addrlen = ifp->if_addrlen; 1625 ifd->ifi_hdrlen = ifp->if_hdrlen; 1626 ifd->ifi_link_state = ifp->if_link_state; 1627 ifd->ifi_vhid = 0; 1628 ifd->ifi_datalen = sizeof(struct if_data); 1629 ifd->ifi_mtu = ifp->if_mtu; 1630 ifd->ifi_metric = ifp->if_metric; 1631 ifd->ifi_baudrate = ifp->if_baudrate; 1632 ifd->ifi_hwassist = ifp->if_hwassist; 1633 ifd->ifi_epoch = ifp->if_epoch; 1634 ifd->ifi_lastchange = ifp->if_lastchange; 1635 1636 ifd->ifi_ipackets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); 1637 ifd->ifi_ierrors = ifp->if_get_counter(ifp, IFCOUNTER_IERRORS); 1638 ifd->ifi_opackets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); 1639 ifd->ifi_oerrors = ifp->if_get_counter(ifp, IFCOUNTER_OERRORS); 1640 ifd->ifi_collisions = ifp->if_get_counter(ifp, IFCOUNTER_COLLISIONS); 1641 ifd->ifi_ibytes = ifp->if_get_counter(ifp, IFCOUNTER_IBYTES); 1642 ifd->ifi_obytes = ifp->if_get_counter(ifp, IFCOUNTER_OBYTES); 1643 ifd->ifi_imcasts = ifp->if_get_counter(ifp, IFCOUNTER_IMCASTS); 1644 ifd->ifi_omcasts = ifp->if_get_counter(ifp, IFCOUNTER_OMCASTS); 1645 ifd->ifi_iqdrops = ifp->if_get_counter(ifp, IFCOUNTER_IQDROPS); 1646 ifd->ifi_oqdrops = ifp->if_get_counter(ifp, IFCOUNTER_OQDROPS); 1647 ifd->ifi_noproto = ifp->if_get_counter(ifp, IFCOUNTER_NOPROTO); 1648 } 1649 1650 /* 1651 * Wrapper functions for struct ifnet address list locking macros. These are 1652 * used by kernel modules to avoid encoding programming interface or binary 1653 * interface assumptions that may be violated when kernel-internal locking 1654 * approaches change. 1655 */ 1656 void 1657 if_addr_rlock(struct ifnet *ifp) 1658 { 1659 1660 IF_ADDR_RLOCK(ifp); 1661 } 1662 1663 void 1664 if_addr_runlock(struct ifnet *ifp) 1665 { 1666 1667 IF_ADDR_RUNLOCK(ifp); 1668 } 1669 1670 void 1671 if_maddr_rlock(if_t ifp) 1672 { 1673 1674 IF_ADDR_RLOCK((struct ifnet *)ifp); 1675 } 1676 1677 void 1678 if_maddr_runlock(if_t ifp) 1679 { 1680 1681 IF_ADDR_RUNLOCK((struct ifnet *)ifp); 1682 } 1683 1684 /* 1685 * Initialization, destruction and refcounting functions for ifaddrs. 1686 */ 1687 struct ifaddr * 1688 ifa_alloc(size_t size, int flags) 1689 { 1690 struct ifaddr *ifa; 1691 1692 KASSERT(size >= sizeof(struct ifaddr), 1693 ("%s: invalid size %zu", __func__, size)); 1694 1695 ifa = malloc(size, M_IFADDR, M_ZERO | flags); 1696 if (ifa == NULL) 1697 return (NULL); 1698 1699 if ((ifa->ifa_opackets = counter_u64_alloc(flags)) == NULL) 1700 goto fail; 1701 if ((ifa->ifa_ipackets = counter_u64_alloc(flags)) == NULL) 1702 goto fail; 1703 if ((ifa->ifa_obytes = counter_u64_alloc(flags)) == NULL) 1704 goto fail; 1705 if ((ifa->ifa_ibytes = counter_u64_alloc(flags)) == NULL) 1706 goto fail; 1707 1708 refcount_init(&ifa->ifa_refcnt, 1); 1709 1710 return (ifa); 1711 1712 fail: 1713 /* free(NULL) is okay */ 1714 counter_u64_free(ifa->ifa_opackets); 1715 counter_u64_free(ifa->ifa_ipackets); 1716 counter_u64_free(ifa->ifa_obytes); 1717 counter_u64_free(ifa->ifa_ibytes); 1718 free(ifa, M_IFADDR); 1719 1720 return (NULL); 1721 } 1722 1723 void 1724 ifa_ref(struct ifaddr *ifa) 1725 { 1726 1727 refcount_acquire(&ifa->ifa_refcnt); 1728 } 1729 1730 void 1731 ifa_free(struct ifaddr *ifa) 1732 { 1733 1734 if (refcount_release(&ifa->ifa_refcnt)) { 1735 counter_u64_free(ifa->ifa_opackets); 1736 counter_u64_free(ifa->ifa_ipackets); 1737 counter_u64_free(ifa->ifa_obytes); 1738 counter_u64_free(ifa->ifa_ibytes); 1739 free(ifa, M_IFADDR); 1740 } 1741 } 1742 1743 static int 1744 ifa_maintain_loopback_route(int cmd, const char *otype, struct ifaddr *ifa, 1745 struct sockaddr *ia) 1746 { 1747 int error; 1748 struct rt_addrinfo info; 1749 struct sockaddr_dl null_sdl; 1750 struct ifnet *ifp; 1751 1752 ifp = ifa->ifa_ifp; 1753 1754 bzero(&info, sizeof(info)); 1755 if (cmd != RTM_DELETE) 1756 info.rti_ifp = V_loif; 1757 info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC | RTF_PINNED; 1758 info.rti_info[RTAX_DST] = ia; 1759 info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl; 1760 link_init_sdl(ifp, (struct sockaddr *)&null_sdl, ifp->if_type); 1761 1762 error = rtrequest1_fib(cmd, &info, NULL, ifp->if_fib); 1763 1764 if (error != 0) 1765 log(LOG_DEBUG, "%s: %s failed for interface %s: %u\n", 1766 __func__, otype, if_name(ifp), error); 1767 1768 return (error); 1769 } 1770 1771 int 1772 ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) 1773 { 1774 1775 return (ifa_maintain_loopback_route(RTM_ADD, "insertion", ifa, ia)); 1776 } 1777 1778 int 1779 ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) 1780 { 1781 1782 return (ifa_maintain_loopback_route(RTM_DELETE, "deletion", ifa, ia)); 1783 } 1784 1785 int 1786 ifa_switch_loopback_route(struct ifaddr *ifa, struct sockaddr *ia) 1787 { 1788 1789 return (ifa_maintain_loopback_route(RTM_CHANGE, "switch", ifa, ia)); 1790 } 1791 1792 /* 1793 * XXX: Because sockaddr_dl has deeper structure than the sockaddr 1794 * structs used to represent other address families, it is necessary 1795 * to perform a different comparison. 1796 */ 1797 1798 #define sa_dl_equal(a1, a2) \ 1799 ((((const struct sockaddr_dl *)(a1))->sdl_len == \ 1800 ((const struct sockaddr_dl *)(a2))->sdl_len) && \ 1801 (bcmp(CLLADDR((const struct sockaddr_dl *)(a1)), \ 1802 CLLADDR((const struct sockaddr_dl *)(a2)), \ 1803 ((const struct sockaddr_dl *)(a1))->sdl_alen) == 0)) 1804 1805 /* 1806 * Locate an interface based on a complete address. 1807 */ 1808 /*ARGSUSED*/ 1809 static struct ifaddr * 1810 ifa_ifwithaddr_internal(const struct sockaddr *addr, int getref) 1811 { 1812 struct ifnet *ifp; 1813 struct ifaddr *ifa; 1814 1815 IFNET_RLOCK_NOSLEEP(); 1816 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1817 IF_ADDR_RLOCK(ifp); 1818 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1819 if (ifa->ifa_addr->sa_family != addr->sa_family) 1820 continue; 1821 if (sa_equal(addr, ifa->ifa_addr)) { 1822 if (getref) 1823 ifa_ref(ifa); 1824 IF_ADDR_RUNLOCK(ifp); 1825 goto done; 1826 } 1827 /* IP6 doesn't have broadcast */ 1828 if ((ifp->if_flags & IFF_BROADCAST) && 1829 ifa->ifa_broadaddr && 1830 ifa->ifa_broadaddr->sa_len != 0 && 1831 sa_equal(ifa->ifa_broadaddr, addr)) { 1832 if (getref) 1833 ifa_ref(ifa); 1834 IF_ADDR_RUNLOCK(ifp); 1835 goto done; 1836 } 1837 } 1838 IF_ADDR_RUNLOCK(ifp); 1839 } 1840 ifa = NULL; 1841 done: 1842 IFNET_RUNLOCK_NOSLEEP(); 1843 return (ifa); 1844 } 1845 1846 struct ifaddr * 1847 ifa_ifwithaddr(const struct sockaddr *addr) 1848 { 1849 1850 return (ifa_ifwithaddr_internal(addr, 1)); 1851 } 1852 1853 int 1854 ifa_ifwithaddr_check(const struct sockaddr *addr) 1855 { 1856 1857 return (ifa_ifwithaddr_internal(addr, 0) != NULL); 1858 } 1859 1860 /* 1861 * Locate an interface based on the broadcast address. 1862 */ 1863 /* ARGSUSED */ 1864 struct ifaddr * 1865 ifa_ifwithbroadaddr(const struct sockaddr *addr, int fibnum) 1866 { 1867 struct ifnet *ifp; 1868 struct ifaddr *ifa; 1869 1870 IFNET_RLOCK_NOSLEEP(); 1871 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1872 if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) 1873 continue; 1874 IF_ADDR_RLOCK(ifp); 1875 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1876 if (ifa->ifa_addr->sa_family != addr->sa_family) 1877 continue; 1878 if ((ifp->if_flags & IFF_BROADCAST) && 1879 ifa->ifa_broadaddr && 1880 ifa->ifa_broadaddr->sa_len != 0 && 1881 sa_equal(ifa->ifa_broadaddr, addr)) { 1882 ifa_ref(ifa); 1883 IF_ADDR_RUNLOCK(ifp); 1884 goto done; 1885 } 1886 } 1887 IF_ADDR_RUNLOCK(ifp); 1888 } 1889 ifa = NULL; 1890 done: 1891 IFNET_RUNLOCK_NOSLEEP(); 1892 return (ifa); 1893 } 1894 1895 /* 1896 * Locate the point to point interface with a given destination address. 1897 */ 1898 /*ARGSUSED*/ 1899 struct ifaddr * 1900 ifa_ifwithdstaddr(const struct sockaddr *addr, int fibnum) 1901 { 1902 struct ifnet *ifp; 1903 struct ifaddr *ifa; 1904 1905 IFNET_RLOCK_NOSLEEP(); 1906 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1907 if ((ifp->if_flags & IFF_POINTOPOINT) == 0) 1908 continue; 1909 if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) 1910 continue; 1911 IF_ADDR_RLOCK(ifp); 1912 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1913 if (ifa->ifa_addr->sa_family != addr->sa_family) 1914 continue; 1915 if (ifa->ifa_dstaddr != NULL && 1916 sa_equal(addr, ifa->ifa_dstaddr)) { 1917 ifa_ref(ifa); 1918 IF_ADDR_RUNLOCK(ifp); 1919 goto done; 1920 } 1921 } 1922 IF_ADDR_RUNLOCK(ifp); 1923 } 1924 ifa = NULL; 1925 done: 1926 IFNET_RUNLOCK_NOSLEEP(); 1927 return (ifa); 1928 } 1929 1930 /* 1931 * Find an interface on a specific network. If many, choice 1932 * is most specific found. 1933 */ 1934 struct ifaddr * 1935 ifa_ifwithnet(const struct sockaddr *addr, int ignore_ptp, int fibnum) 1936 { 1937 struct ifnet *ifp; 1938 struct ifaddr *ifa; 1939 struct ifaddr *ifa_maybe = NULL; 1940 u_int af = addr->sa_family; 1941 const char *addr_data = addr->sa_data, *cplim; 1942 1943 /* 1944 * AF_LINK addresses can be looked up directly by their index number, 1945 * so do that if we can. 1946 */ 1947 if (af == AF_LINK) { 1948 const struct sockaddr_dl *sdl = (const struct sockaddr_dl *)addr; 1949 if (sdl->sdl_index && sdl->sdl_index <= V_if_index) 1950 return (ifaddr_byindex(sdl->sdl_index)); 1951 } 1952 1953 /* 1954 * Scan though each interface, looking for ones that have addresses 1955 * in this address family and the requested fib. Maintain a reference 1956 * on ifa_maybe once we find one, as we release the IF_ADDR_RLOCK() that 1957 * kept it stable when we move onto the next interface. 1958 */ 1959 IFNET_RLOCK_NOSLEEP(); 1960 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1961 if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum)) 1962 continue; 1963 IF_ADDR_RLOCK(ifp); 1964 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1965 const char *cp, *cp2, *cp3; 1966 1967 if (ifa->ifa_addr->sa_family != af) 1968 next: continue; 1969 if (af == AF_INET && 1970 ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) { 1971 /* 1972 * This is a bit broken as it doesn't 1973 * take into account that the remote end may 1974 * be a single node in the network we are 1975 * looking for. 1976 * The trouble is that we don't know the 1977 * netmask for the remote end. 1978 */ 1979 if (ifa->ifa_dstaddr != NULL && 1980 sa_equal(addr, ifa->ifa_dstaddr)) { 1981 ifa_ref(ifa); 1982 IF_ADDR_RUNLOCK(ifp); 1983 goto done; 1984 } 1985 } else { 1986 /* 1987 * Scan all the bits in the ifa's address. 1988 * If a bit dissagrees with what we are 1989 * looking for, mask it with the netmask 1990 * to see if it really matters. 1991 * (A byte at a time) 1992 */ 1993 if (ifa->ifa_netmask == 0) 1994 continue; 1995 cp = addr_data; 1996 cp2 = ifa->ifa_addr->sa_data; 1997 cp3 = ifa->ifa_netmask->sa_data; 1998 cplim = ifa->ifa_netmask->sa_len 1999 + (char *)ifa->ifa_netmask; 2000 while (cp3 < cplim) 2001 if ((*cp++ ^ *cp2++) & *cp3++) 2002 goto next; /* next address! */ 2003 /* 2004 * If the netmask of what we just found 2005 * is more specific than what we had before 2006 * (if we had one), or if the virtual status 2007 * of new prefix is better than of the old one, 2008 * then remember the new one before continuing 2009 * to search for an even better one. 2010 */ 2011 if (ifa_maybe == NULL || 2012 ifa_preferred(ifa_maybe, ifa) || 2013 rn_refines((caddr_t)ifa->ifa_netmask, 2014 (caddr_t)ifa_maybe->ifa_netmask)) { 2015 if (ifa_maybe != NULL) 2016 ifa_free(ifa_maybe); 2017 ifa_maybe = ifa; 2018 ifa_ref(ifa_maybe); 2019 } 2020 } 2021 } 2022 IF_ADDR_RUNLOCK(ifp); 2023 } 2024 ifa = ifa_maybe; 2025 ifa_maybe = NULL; 2026 done: 2027 IFNET_RUNLOCK_NOSLEEP(); 2028 if (ifa_maybe != NULL) 2029 ifa_free(ifa_maybe); 2030 return (ifa); 2031 } 2032 2033 /* 2034 * Find an interface address specific to an interface best matching 2035 * a given address. 2036 */ 2037 struct ifaddr * 2038 ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) 2039 { 2040 struct ifaddr *ifa; 2041 const char *cp, *cp2, *cp3; 2042 char *cplim; 2043 struct ifaddr *ifa_maybe = NULL; 2044 u_int af = addr->sa_family; 2045 2046 if (af >= AF_MAX) 2047 return (NULL); 2048 IF_ADDR_RLOCK(ifp); 2049 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 2050 if (ifa->ifa_addr->sa_family != af) 2051 continue; 2052 if (ifa_maybe == NULL) 2053 ifa_maybe = ifa; 2054 if (ifa->ifa_netmask == 0) { 2055 if (sa_equal(addr, ifa->ifa_addr) || 2056 (ifa->ifa_dstaddr && 2057 sa_equal(addr, ifa->ifa_dstaddr))) 2058 goto done; 2059 continue; 2060 } 2061 if (ifp->if_flags & IFF_POINTOPOINT) { 2062 if (sa_equal(addr, ifa->ifa_dstaddr)) 2063 goto done; 2064 } else { 2065 cp = addr->sa_data; 2066 cp2 = ifa->ifa_addr->sa_data; 2067 cp3 = ifa->ifa_netmask->sa_data; 2068 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 2069 for (; cp3 < cplim; cp3++) 2070 if ((*cp++ ^ *cp2++) & *cp3) 2071 break; 2072 if (cp3 == cplim) 2073 goto done; 2074 } 2075 } 2076 ifa = ifa_maybe; 2077 done: 2078 if (ifa != NULL) 2079 ifa_ref(ifa); 2080 IF_ADDR_RUNLOCK(ifp); 2081 return (ifa); 2082 } 2083 2084 /* 2085 * See whether new ifa is better than current one: 2086 * 1) A non-virtual one is preferred over virtual. 2087 * 2) A virtual in master state preferred over any other state. 2088 * 2089 * Used in several address selecting functions. 2090 */ 2091 int 2092 ifa_preferred(struct ifaddr *cur, struct ifaddr *next) 2093 { 2094 2095 return (cur->ifa_carp && (!next->ifa_carp || 2096 ((*carp_master_p)(next) && !(*carp_master_p)(cur)))); 2097 } 2098 2099 #include <net/if_llatbl.h> 2100 2101 /* 2102 * Default action when installing a route with a Link Level gateway. 2103 * Lookup an appropriate real ifa to point to. 2104 * This should be moved to /sys/net/link.c eventually. 2105 */ 2106 static void 2107 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) 2108 { 2109 struct ifaddr *ifa, *oifa; 2110 struct sockaddr *dst; 2111 struct ifnet *ifp; 2112 2113 if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == NULL) || 2114 ((ifp = ifa->ifa_ifp) == NULL) || ((dst = rt_key(rt)) == NULL)) 2115 return; 2116 ifa = ifaof_ifpforaddr(dst, ifp); 2117 if (ifa) { 2118 oifa = rt->rt_ifa; 2119 rt->rt_ifa = ifa; 2120 ifa_free(oifa); 2121 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 2122 ifa->ifa_rtrequest(cmd, rt, info); 2123 } 2124 } 2125 2126 struct sockaddr_dl * 2127 link_alloc_sdl(size_t size, int flags) 2128 { 2129 2130 return (malloc(size, M_TEMP, flags)); 2131 } 2132 2133 void 2134 link_free_sdl(struct sockaddr *sa) 2135 { 2136 free(sa, M_TEMP); 2137 } 2138 2139 /* 2140 * Fills in given sdl with interface basic info. 2141 * Returns pointer to filled sdl. 2142 */ 2143 struct sockaddr_dl * 2144 link_init_sdl(struct ifnet *ifp, struct sockaddr *paddr, u_char iftype) 2145 { 2146 struct sockaddr_dl *sdl; 2147 2148 sdl = (struct sockaddr_dl *)paddr; 2149 memset(sdl, 0, sizeof(struct sockaddr_dl)); 2150 sdl->sdl_len = sizeof(struct sockaddr_dl); 2151 sdl->sdl_family = AF_LINK; 2152 sdl->sdl_index = ifp->if_index; 2153 sdl->sdl_type = iftype; 2154 2155 return (sdl); 2156 } 2157 2158 /* 2159 * Mark an interface down and notify protocols of 2160 * the transition. 2161 */ 2162 static void 2163 if_unroute(struct ifnet *ifp, int flag, int fam) 2164 { 2165 struct ifaddr *ifa; 2166 2167 KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); 2168 2169 ifp->if_flags &= ~flag; 2170 getmicrotime(&ifp->if_lastchange); 2171 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 2172 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 2173 pfctlinput(PRC_IFDOWN, ifa->ifa_addr); 2174 ifp->if_qflush(ifp); 2175 2176 if (ifp->if_carp) 2177 (*carp_linkstate_p)(ifp); 2178 rt_ifmsg(ifp); 2179 } 2180 2181 /* 2182 * Mark an interface up and notify protocols of 2183 * the transition. 2184 */ 2185 static void 2186 if_route(struct ifnet *ifp, int flag, int fam) 2187 { 2188 struct ifaddr *ifa; 2189 2190 KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); 2191 2192 ifp->if_flags |= flag; 2193 getmicrotime(&ifp->if_lastchange); 2194 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 2195 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 2196 pfctlinput(PRC_IFUP, ifa->ifa_addr); 2197 if (ifp->if_carp) 2198 (*carp_linkstate_p)(ifp); 2199 rt_ifmsg(ifp); 2200 #ifdef INET6 2201 in6_if_up(ifp); 2202 #endif 2203 } 2204 2205 void (*vlan_link_state_p)(struct ifnet *); /* XXX: private from if_vlan */ 2206 void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ 2207 struct ifnet *(*vlan_trunkdev_p)(struct ifnet *); 2208 struct ifnet *(*vlan_devat_p)(struct ifnet *, uint16_t); 2209 int (*vlan_tag_p)(struct ifnet *, uint16_t *); 2210 int (*vlan_setcookie_p)(struct ifnet *, void *); 2211 void *(*vlan_cookie_p)(struct ifnet *); 2212 2213 /* 2214 * Handle a change in the interface link state. To avoid LORs 2215 * between driver lock and upper layer locks, as well as possible 2216 * recursions, we post event to taskqueue, and all job 2217 * is done in static do_link_state_change(). 2218 */ 2219 void 2220 if_link_state_change(struct ifnet *ifp, int link_state) 2221 { 2222 /* Return if state hasn't changed. */ 2223 if (ifp->if_link_state == link_state) 2224 return; 2225 2226 ifp->if_link_state = link_state; 2227 2228 taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); 2229 } 2230 2231 static void 2232 do_link_state_change(void *arg, int pending) 2233 { 2234 struct ifnet *ifp = (struct ifnet *)arg; 2235 int link_state = ifp->if_link_state; 2236 CURVNET_SET(ifp->if_vnet); 2237 2238 /* Notify that the link state has changed. */ 2239 rt_ifmsg(ifp); 2240 if (ifp->if_vlantrunk != NULL) 2241 (*vlan_link_state_p)(ifp); 2242 2243 if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) && 2244 ifp->if_l2com != NULL) 2245 (*ng_ether_link_state_p)(ifp, link_state); 2246 if (ifp->if_carp) 2247 (*carp_linkstate_p)(ifp); 2248 if (ifp->if_bridge) 2249 (*bridge_linkstate_p)(ifp); 2250 if (ifp->if_lagg) 2251 (*lagg_linkstate_p)(ifp, link_state); 2252 2253 if (IS_DEFAULT_VNET(curvnet)) 2254 devctl_notify("IFNET", ifp->if_xname, 2255 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", 2256 NULL); 2257 if (pending > 1) 2258 if_printf(ifp, "%d link states coalesced\n", pending); 2259 if (log_link_state_change) 2260 log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname, 2261 (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); 2262 EVENTHANDLER_INVOKE(ifnet_link_event, ifp, link_state); 2263 CURVNET_RESTORE(); 2264 } 2265 2266 /* 2267 * Mark an interface down and notify protocols of 2268 * the transition. 2269 */ 2270 void 2271 if_down(struct ifnet *ifp) 2272 { 2273 2274 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_DOWN); 2275 if_unroute(ifp, IFF_UP, AF_UNSPEC); 2276 } 2277 2278 /* 2279 * Mark an interface up and notify protocols of 2280 * the transition. 2281 */ 2282 void 2283 if_up(struct ifnet *ifp) 2284 { 2285 2286 if_route(ifp, IFF_UP, AF_UNSPEC); 2287 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_UP); 2288 } 2289 2290 /* 2291 * Flush an interface queue. 2292 */ 2293 void 2294 if_qflush(struct ifnet *ifp) 2295 { 2296 struct mbuf *m, *n; 2297 struct ifaltq *ifq; 2298 2299 ifq = &ifp->if_snd; 2300 IFQ_LOCK(ifq); 2301 #ifdef ALTQ 2302 if (ALTQ_IS_ENABLED(ifq)) 2303 ALTQ_PURGE(ifq); 2304 #endif 2305 n = ifq->ifq_head; 2306 while ((m = n) != NULL) { 2307 n = m->m_nextpkt; 2308 m_freem(m); 2309 } 2310 ifq->ifq_head = 0; 2311 ifq->ifq_tail = 0; 2312 ifq->ifq_len = 0; 2313 IFQ_UNLOCK(ifq); 2314 } 2315 2316 /* 2317 * Map interface name to interface structure pointer, with or without 2318 * returning a reference. 2319 */ 2320 struct ifnet * 2321 ifunit_ref(const char *name) 2322 { 2323 struct ifnet *ifp; 2324 2325 IFNET_RLOCK_NOSLEEP(); 2326 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 2327 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && 2328 !(ifp->if_flags & IFF_DYING)) 2329 break; 2330 } 2331 if (ifp != NULL) 2332 if_ref(ifp); 2333 IFNET_RUNLOCK_NOSLEEP(); 2334 return (ifp); 2335 } 2336 2337 struct ifnet * 2338 ifunit(const char *name) 2339 { 2340 struct ifnet *ifp; 2341 2342 IFNET_RLOCK_NOSLEEP(); 2343 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 2344 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) 2345 break; 2346 } 2347 IFNET_RUNLOCK_NOSLEEP(); 2348 return (ifp); 2349 } 2350 2351 static void * 2352 ifr_buffer_get_buffer(struct thread *td, void *data) 2353 { 2354 union ifreq_union *ifrup; 2355 2356 ifrup = data; 2357 #ifdef COMPAT_FREEBSD32 2358 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) 2359 return ((void *)(uintptr_t) 2360 ifrup->ifr32.ifr_ifru.ifru_buffer.buffer); 2361 #endif 2362 return (ifrup->ifr.ifr_ifru.ifru_buffer.buffer); 2363 } 2364 2365 static void 2366 ifr_buffer_set_buffer_null(struct thread *td, void *data) 2367 { 2368 union ifreq_union *ifrup; 2369 2370 ifrup = data; 2371 #ifdef COMPAT_FREEBSD32 2372 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) 2373 ifrup->ifr32.ifr_ifru.ifru_buffer.buffer = 0; 2374 else 2375 #endif 2376 ifrup->ifr.ifr_ifru.ifru_buffer.buffer = NULL; 2377 } 2378 2379 static size_t 2380 ifr_buffer_get_length(struct thread *td, void *data) 2381 { 2382 union ifreq_union *ifrup; 2383 2384 ifrup = data; 2385 #ifdef COMPAT_FREEBSD32 2386 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) 2387 return (ifrup->ifr32.ifr_ifru.ifru_buffer.length); 2388 #endif 2389 return (ifrup->ifr.ifr_ifru.ifru_buffer.length); 2390 } 2391 2392 static void 2393 ifr_buffer_set_length(struct thread *td, void *data, size_t len) 2394 { 2395 union ifreq_union *ifrup; 2396 2397 ifrup = data; 2398 #ifdef COMPAT_FREEBSD32 2399 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) 2400 ifrup->ifr32.ifr_ifru.ifru_buffer.length = len; 2401 else 2402 #endif 2403 ifrup->ifr.ifr_ifru.ifru_buffer.length = len; 2404 } 2405 2406 /* 2407 * Hardware specific interface ioctls. 2408 */ 2409 static int 2410 ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) 2411 { 2412 struct ifreq *ifr; 2413 int error = 0, do_ifup = 0; 2414 int new_flags, temp_flags; 2415 size_t namelen, onamelen; 2416 size_t descrlen; 2417 char *descrbuf, *odescrbuf; 2418 char new_name[IFNAMSIZ]; 2419 struct ifaddr *ifa; 2420 struct sockaddr_dl *sdl; 2421 2422 ifr = (struct ifreq *)data; 2423 switch (cmd) { 2424 case SIOCGIFINDEX: 2425 ifr->ifr_index = ifp->if_index; 2426 break; 2427 2428 case SIOCGIFFLAGS: 2429 temp_flags = ifp->if_flags | ifp->if_drv_flags; 2430 ifr->ifr_flags = temp_flags & 0xffff; 2431 ifr->ifr_flagshigh = temp_flags >> 16; 2432 break; 2433 2434 case SIOCGIFCAP: 2435 ifr->ifr_reqcap = ifp->if_capabilities; 2436 ifr->ifr_curcap = ifp->if_capenable; 2437 break; 2438 2439 #ifdef MAC 2440 case SIOCGIFMAC: 2441 error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); 2442 break; 2443 #endif 2444 2445 case SIOCGIFMETRIC: 2446 ifr->ifr_metric = ifp->if_metric; 2447 break; 2448 2449 case SIOCGIFMTU: 2450 ifr->ifr_mtu = ifp->if_mtu; 2451 break; 2452 2453 case SIOCGIFPHYS: 2454 /* XXXGL: did this ever worked? */ 2455 ifr->ifr_phys = 0; 2456 break; 2457 2458 case SIOCGIFDESCR: 2459 error = 0; 2460 sx_slock(&ifdescr_sx); 2461 if (ifp->if_description == NULL) 2462 error = ENOMSG; 2463 else { 2464 /* space for terminating nul */ 2465 descrlen = strlen(ifp->if_description) + 1; 2466 if (ifr_buffer_get_length(td, ifr) < descrlen) 2467 ifr_buffer_set_buffer_null(td, ifr); 2468 else 2469 error = copyout(ifp->if_description, 2470 ifr_buffer_get_buffer(td, ifr), descrlen); 2471 ifr_buffer_set_length(td, ifr, descrlen); 2472 } 2473 sx_sunlock(&ifdescr_sx); 2474 break; 2475 2476 case SIOCSIFDESCR: 2477 error = priv_check(td, PRIV_NET_SETIFDESCR); 2478 if (error) 2479 return (error); 2480 2481 /* 2482 * Copy only (length-1) bytes to make sure that 2483 * if_description is always nul terminated. The 2484 * length parameter is supposed to count the 2485 * terminating nul in. 2486 */ 2487 if (ifr_buffer_get_length(td, ifr) > ifdescr_maxlen) 2488 return (ENAMETOOLONG); 2489 else if (ifr_buffer_get_length(td, ifr) == 0) 2490 descrbuf = NULL; 2491 else { 2492 descrbuf = malloc(ifr_buffer_get_length(td, ifr), 2493 M_IFDESCR, M_WAITOK | M_ZERO); 2494 error = copyin(ifr_buffer_get_buffer(td, ifr), descrbuf, 2495 ifr_buffer_get_length(td, ifr) - 1); 2496 if (error) { 2497 free(descrbuf, M_IFDESCR); 2498 break; 2499 } 2500 } 2501 2502 sx_xlock(&ifdescr_sx); 2503 odescrbuf = ifp->if_description; 2504 ifp->if_description = descrbuf; 2505 sx_xunlock(&ifdescr_sx); 2506 2507 getmicrotime(&ifp->if_lastchange); 2508 free(odescrbuf, M_IFDESCR); 2509 break; 2510 2511 case SIOCGIFFIB: 2512 ifr->ifr_fib = ifp->if_fib; 2513 break; 2514 2515 case SIOCSIFFIB: 2516 error = priv_check(td, PRIV_NET_SETIFFIB); 2517 if (error) 2518 return (error); 2519 if (ifr->ifr_fib >= rt_numfibs) 2520 return (EINVAL); 2521 2522 ifp->if_fib = ifr->ifr_fib; 2523 break; 2524 2525 case SIOCSIFFLAGS: 2526 error = priv_check(td, PRIV_NET_SETIFFLAGS); 2527 if (error) 2528 return (error); 2529 /* 2530 * Currently, no driver owned flags pass the IFF_CANTCHANGE 2531 * check, so we don't need special handling here yet. 2532 */ 2533 new_flags = (ifr->ifr_flags & 0xffff) | 2534 (ifr->ifr_flagshigh << 16); 2535 if (ifp->if_flags & IFF_UP && 2536 (new_flags & IFF_UP) == 0) { 2537 if_down(ifp); 2538 } else if (new_flags & IFF_UP && 2539 (ifp->if_flags & IFF_UP) == 0) { 2540 do_ifup = 1; 2541 } 2542 /* See if permanently promiscuous mode bit is about to flip */ 2543 if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { 2544 if (new_flags & IFF_PPROMISC) 2545 ifp->if_flags |= IFF_PROMISC; 2546 else if (ifp->if_pcount == 0) 2547 ifp->if_flags &= ~IFF_PROMISC; 2548 if (log_promisc_mode_change) 2549 log(LOG_INFO, "%s: permanently promiscuous mode %s\n", 2550 ifp->if_xname, 2551 ((new_flags & IFF_PPROMISC) ? 2552 "enabled" : "disabled")); 2553 } 2554 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 2555 (new_flags &~ IFF_CANTCHANGE); 2556 if (ifp->if_ioctl) { 2557 (void) (*ifp->if_ioctl)(ifp, cmd, data); 2558 } 2559 if (do_ifup) 2560 if_up(ifp); 2561 getmicrotime(&ifp->if_lastchange); 2562 break; 2563 2564 case SIOCSIFCAP: 2565 error = priv_check(td, PRIV_NET_SETIFCAP); 2566 if (error) 2567 return (error); 2568 if (ifp->if_ioctl == NULL) 2569 return (EOPNOTSUPP); 2570 if (ifr->ifr_reqcap & ~ifp->if_capabilities) 2571 return (EINVAL); 2572 error = (*ifp->if_ioctl)(ifp, cmd, data); 2573 if (error == 0) 2574 getmicrotime(&ifp->if_lastchange); 2575 break; 2576 2577 #ifdef MAC 2578 case SIOCSIFMAC: 2579 error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); 2580 break; 2581 #endif 2582 2583 case SIOCSIFNAME: 2584 error = priv_check(td, PRIV_NET_SETIFNAME); 2585 if (error) 2586 return (error); 2587 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 2588 if (error != 0) 2589 return (error); 2590 if (new_name[0] == '\0') 2591 return (EINVAL); 2592 if (new_name[IFNAMSIZ-1] != '\0') { 2593 new_name[IFNAMSIZ-1] = '\0'; 2594 if (strlen(new_name) == IFNAMSIZ-1) 2595 return (EINVAL); 2596 } 2597 if (ifunit(new_name) != NULL) 2598 return (EEXIST); 2599 2600 /* 2601 * XXX: Locking. Nothing else seems to lock if_flags, 2602 * and there are numerous other races with the 2603 * ifunit() checks not being atomic with namespace 2604 * changes (renames, vmoves, if_attach, etc). 2605 */ 2606 ifp->if_flags |= IFF_RENAMING; 2607 2608 /* Announce the departure of the interface. */ 2609 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 2610 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 2611 2612 log(LOG_INFO, "%s: changing name to '%s'\n", 2613 ifp->if_xname, new_name); 2614 2615 IF_ADDR_WLOCK(ifp); 2616 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 2617 ifa = ifp->if_addr; 2618 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 2619 namelen = strlen(new_name); 2620 onamelen = sdl->sdl_nlen; 2621 /* 2622 * Move the address if needed. This is safe because we 2623 * allocate space for a name of length IFNAMSIZ when we 2624 * create this in if_attach(). 2625 */ 2626 if (namelen != onamelen) { 2627 bcopy(sdl->sdl_data + onamelen, 2628 sdl->sdl_data + namelen, sdl->sdl_alen); 2629 } 2630 bcopy(new_name, sdl->sdl_data, namelen); 2631 sdl->sdl_nlen = namelen; 2632 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2633 bzero(sdl->sdl_data, onamelen); 2634 while (namelen != 0) 2635 sdl->sdl_data[--namelen] = 0xff; 2636 IF_ADDR_WUNLOCK(ifp); 2637 2638 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 2639 /* Announce the return of the interface. */ 2640 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2641 2642 ifp->if_flags &= ~IFF_RENAMING; 2643 break; 2644 2645 #ifdef VIMAGE 2646 case SIOCSIFVNET: 2647 error = priv_check(td, PRIV_NET_SETIFVNET); 2648 if (error) 2649 return (error); 2650 error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid); 2651 break; 2652 #endif 2653 2654 case SIOCSIFMETRIC: 2655 error = priv_check(td, PRIV_NET_SETIFMETRIC); 2656 if (error) 2657 return (error); 2658 ifp->if_metric = ifr->ifr_metric; 2659 getmicrotime(&ifp->if_lastchange); 2660 break; 2661 2662 case SIOCSIFPHYS: 2663 error = priv_check(td, PRIV_NET_SETIFPHYS); 2664 if (error) 2665 return (error); 2666 if (ifp->if_ioctl == NULL) 2667 return (EOPNOTSUPP); 2668 error = (*ifp->if_ioctl)(ifp, cmd, data); 2669 if (error == 0) 2670 getmicrotime(&ifp->if_lastchange); 2671 break; 2672 2673 case SIOCSIFMTU: 2674 { 2675 u_long oldmtu = ifp->if_mtu; 2676 2677 error = priv_check(td, PRIV_NET_SETIFMTU); 2678 if (error) 2679 return (error); 2680 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) 2681 return (EINVAL); 2682 if (ifp->if_ioctl == NULL) 2683 return (EOPNOTSUPP); 2684 error = (*ifp->if_ioctl)(ifp, cmd, data); 2685 if (error == 0) { 2686 getmicrotime(&ifp->if_lastchange); 2687 rt_ifmsg(ifp); 2688 } 2689 /* 2690 * If the link MTU changed, do network layer specific procedure. 2691 */ 2692 if (ifp->if_mtu != oldmtu) { 2693 #ifdef INET6 2694 nd6_setmtu(ifp); 2695 #endif 2696 rt_updatemtu(ifp); 2697 } 2698 break; 2699 } 2700 2701 case SIOCADDMULTI: 2702 case SIOCDELMULTI: 2703 if (cmd == SIOCADDMULTI) 2704 error = priv_check(td, PRIV_NET_ADDMULTI); 2705 else 2706 error = priv_check(td, PRIV_NET_DELMULTI); 2707 if (error) 2708 return (error); 2709 2710 /* Don't allow group membership on non-multicast interfaces. */ 2711 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2712 return (EOPNOTSUPP); 2713 2714 /* Don't let users screw up protocols' entries. */ 2715 if (ifr->ifr_addr.sa_family != AF_LINK) 2716 return (EINVAL); 2717 2718 if (cmd == SIOCADDMULTI) { 2719 struct ifmultiaddr *ifma; 2720 2721 /* 2722 * Userland is only permitted to join groups once 2723 * via the if_addmulti() KPI, because it cannot hold 2724 * struct ifmultiaddr * between calls. It may also 2725 * lose a race while we check if the membership 2726 * already exists. 2727 */ 2728 IF_ADDR_RLOCK(ifp); 2729 ifma = if_findmulti(ifp, &ifr->ifr_addr); 2730 IF_ADDR_RUNLOCK(ifp); 2731 if (ifma != NULL) 2732 error = EADDRINUSE; 2733 else 2734 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2735 } else { 2736 error = if_delmulti(ifp, &ifr->ifr_addr); 2737 } 2738 if (error == 0) 2739 getmicrotime(&ifp->if_lastchange); 2740 break; 2741 2742 case SIOCSIFPHYADDR: 2743 case SIOCDIFPHYADDR: 2744 #ifdef INET6 2745 case SIOCSIFPHYADDR_IN6: 2746 #endif 2747 case SIOCSIFMEDIA: 2748 case SIOCSIFGENERIC: 2749 error = priv_check(td, PRIV_NET_HWIOCTL); 2750 if (error) 2751 return (error); 2752 if (ifp->if_ioctl == NULL) 2753 return (EOPNOTSUPP); 2754 error = (*ifp->if_ioctl)(ifp, cmd, data); 2755 if (error == 0) 2756 getmicrotime(&ifp->if_lastchange); 2757 break; 2758 2759 case SIOCGIFSTATUS: 2760 case SIOCGIFPSRCADDR: 2761 case SIOCGIFPDSTADDR: 2762 case SIOCGIFMEDIA: 2763 case SIOCGIFXMEDIA: 2764 case SIOCGIFGENERIC: 2765 case SIOCGIFRSSKEY: 2766 case SIOCGIFRSSHASH: 2767 if (ifp->if_ioctl == NULL) 2768 return (EOPNOTSUPP); 2769 error = (*ifp->if_ioctl)(ifp, cmd, data); 2770 break; 2771 2772 case SIOCSIFLLADDR: 2773 error = priv_check(td, PRIV_NET_SETLLADDR); 2774 if (error) 2775 return (error); 2776 error = if_setlladdr(ifp, 2777 ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); 2778 break; 2779 2780 case SIOCGHWADDR: 2781 error = if_gethwaddr(ifp, ifr); 2782 break; 2783 2784 case SIOCAIFGROUP: 2785 { 2786 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2787 2788 error = priv_check(td, PRIV_NET_ADDIFGROUP); 2789 if (error) 2790 return (error); 2791 if ((error = if_addgroup(ifp, ifgr->ifgr_group))) 2792 return (error); 2793 break; 2794 } 2795 2796 case SIOCGIFGROUP: 2797 if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp))) 2798 return (error); 2799 break; 2800 2801 case SIOCDIFGROUP: 2802 { 2803 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2804 2805 error = priv_check(td, PRIV_NET_DELIFGROUP); 2806 if (error) 2807 return (error); 2808 if ((error = if_delgroup(ifp, ifgr->ifgr_group))) 2809 return (error); 2810 break; 2811 } 2812 2813 default: 2814 error = ENOIOCTL; 2815 break; 2816 } 2817 return (error); 2818 } 2819 2820 #ifdef COMPAT_FREEBSD32 2821 struct ifconf32 { 2822 int32_t ifc_len; 2823 union { 2824 uint32_t ifcu_buf; 2825 uint32_t ifcu_req; 2826 } ifc_ifcu; 2827 }; 2828 #define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) 2829 #endif 2830 2831 /* 2832 * Interface ioctls. 2833 */ 2834 int 2835 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) 2836 { 2837 struct ifnet *ifp; 2838 struct ifreq *ifr; 2839 int error; 2840 int oif_flags; 2841 #ifdef VIMAGE 2842 int shutdown; 2843 #endif 2844 2845 CURVNET_SET(so->so_vnet); 2846 #ifdef VIMAGE 2847 /* Make sure the VNET is stable. */ 2848 shutdown = (so->so_vnet->vnet_state > SI_SUB_VNET && 2849 so->so_vnet->vnet_state < SI_SUB_VNET_DONE) ? 1 : 0; 2850 if (shutdown) { 2851 CURVNET_RESTORE(); 2852 return (EBUSY); 2853 } 2854 #endif 2855 2856 2857 switch (cmd) { 2858 case SIOCGIFCONF: 2859 error = ifconf(cmd, data); 2860 CURVNET_RESTORE(); 2861 return (error); 2862 2863 #ifdef COMPAT_FREEBSD32 2864 case SIOCGIFCONF32: 2865 { 2866 struct ifconf32 *ifc32; 2867 struct ifconf ifc; 2868 2869 ifc32 = (struct ifconf32 *)data; 2870 ifc.ifc_len = ifc32->ifc_len; 2871 ifc.ifc_buf = PTRIN(ifc32->ifc_buf); 2872 2873 error = ifconf(SIOCGIFCONF, (void *)&ifc); 2874 CURVNET_RESTORE(); 2875 if (error == 0) 2876 ifc32->ifc_len = ifc.ifc_len; 2877 return (error); 2878 } 2879 #endif 2880 } 2881 ifr = (struct ifreq *)data; 2882 2883 switch (cmd) { 2884 #ifdef VIMAGE 2885 case SIOCSIFRVNET: 2886 error = priv_check(td, PRIV_NET_SETIFVNET); 2887 if (error == 0) 2888 error = if_vmove_reclaim(td, ifr->ifr_name, 2889 ifr->ifr_jid); 2890 CURVNET_RESTORE(); 2891 return (error); 2892 #endif 2893 case SIOCIFCREATE: 2894 case SIOCIFCREATE2: 2895 error = priv_check(td, PRIV_NET_IFCREATE); 2896 if (error == 0) 2897 error = if_clone_create(ifr->ifr_name, 2898 sizeof(ifr->ifr_name), 2899 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL); 2900 CURVNET_RESTORE(); 2901 return (error); 2902 case SIOCIFDESTROY: 2903 error = priv_check(td, PRIV_NET_IFDESTROY); 2904 if (error == 0) 2905 error = if_clone_destroy(ifr->ifr_name); 2906 CURVNET_RESTORE(); 2907 return (error); 2908 2909 case SIOCIFGCLONERS: 2910 error = if_clone_list((struct if_clonereq *)data); 2911 CURVNET_RESTORE(); 2912 return (error); 2913 case SIOCGIFGMEMB: 2914 error = if_getgroupmembers((struct ifgroupreq *)data); 2915 CURVNET_RESTORE(); 2916 return (error); 2917 #if defined(INET) || defined(INET6) 2918 case SIOCSVH: 2919 case SIOCGVH: 2920 if (carp_ioctl_p == NULL) 2921 error = EPROTONOSUPPORT; 2922 else 2923 error = (*carp_ioctl_p)(ifr, cmd, td); 2924 CURVNET_RESTORE(); 2925 return (error); 2926 #endif 2927 } 2928 2929 ifp = ifunit_ref(ifr->ifr_name); 2930 if (ifp == NULL) { 2931 CURVNET_RESTORE(); 2932 return (ENXIO); 2933 } 2934 2935 error = ifhwioctl(cmd, ifp, data, td); 2936 if (error != ENOIOCTL) { 2937 if_rele(ifp); 2938 CURVNET_RESTORE(); 2939 return (error); 2940 } 2941 2942 oif_flags = ifp->if_flags; 2943 if (so->so_proto == NULL) { 2944 if_rele(ifp); 2945 CURVNET_RESTORE(); 2946 return (EOPNOTSUPP); 2947 } 2948 2949 /* 2950 * Pass the request on to the socket control method, and if the 2951 * latter returns EOPNOTSUPP, directly to the interface. 2952 * 2953 * Make an exception for the legacy SIOCSIF* requests. Drivers 2954 * trust SIOCSIFADDR et al to come from an already privileged 2955 * layer, and do not perform any credentials checks or input 2956 * validation. 2957 */ 2958 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 2959 ifp, td)); 2960 if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL && 2961 cmd != SIOCSIFADDR && cmd != SIOCSIFBRDADDR && 2962 cmd != SIOCSIFDSTADDR && cmd != SIOCSIFNETMASK) 2963 error = (*ifp->if_ioctl)(ifp, cmd, data); 2964 2965 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2966 #ifdef INET6 2967 if (ifp->if_flags & IFF_UP) 2968 in6_if_up(ifp); 2969 #endif 2970 } 2971 if_rele(ifp); 2972 CURVNET_RESTORE(); 2973 return (error); 2974 } 2975 2976 /* 2977 * The code common to handling reference counted flags, 2978 * e.g., in ifpromisc() and if_allmulti(). 2979 * The "pflag" argument can specify a permanent mode flag to check, 2980 * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. 2981 * 2982 * Only to be used on stack-owned flags, not driver-owned flags. 2983 */ 2984 static int 2985 if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) 2986 { 2987 struct ifreq ifr; 2988 int error; 2989 int oldflags, oldcount; 2990 2991 /* Sanity checks to catch programming errors */ 2992 KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0, 2993 ("%s: setting driver-owned flag %d", __func__, flag)); 2994 2995 if (onswitch) 2996 KASSERT(*refcount >= 0, 2997 ("%s: increment negative refcount %d for flag %d", 2998 __func__, *refcount, flag)); 2999 else 3000 KASSERT(*refcount > 0, 3001 ("%s: decrement non-positive refcount %d for flag %d", 3002 __func__, *refcount, flag)); 3003 3004 /* In case this mode is permanent, just touch refcount */ 3005 if (ifp->if_flags & pflag) { 3006 *refcount += onswitch ? 1 : -1; 3007 return (0); 3008 } 3009 3010 /* Save ifnet parameters for if_ioctl() may fail */ 3011 oldcount = *refcount; 3012 oldflags = ifp->if_flags; 3013 3014 /* 3015 * See if we aren't the only and touching refcount is enough. 3016 * Actually toggle interface flag if we are the first or last. 3017 */ 3018 if (onswitch) { 3019 if ((*refcount)++) 3020 return (0); 3021 ifp->if_flags |= flag; 3022 } else { 3023 if (--(*refcount)) 3024 return (0); 3025 ifp->if_flags &= ~flag; 3026 } 3027 3028 /* Call down the driver since we've changed interface flags */ 3029 if (ifp->if_ioctl == NULL) { 3030 error = EOPNOTSUPP; 3031 goto recover; 3032 } 3033 ifr.ifr_flags = ifp->if_flags & 0xffff; 3034 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3035 error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3036 if (error) 3037 goto recover; 3038 /* Notify userland that interface flags have changed */ 3039 rt_ifmsg(ifp); 3040 return (0); 3041 3042 recover: 3043 /* Recover after driver error */ 3044 *refcount = oldcount; 3045 ifp->if_flags = oldflags; 3046 return (error); 3047 } 3048 3049 /* 3050 * Set/clear promiscuous mode on interface ifp based on the truth value 3051 * of pswitch. The calls are reference counted so that only the first 3052 * "on" request actually has an effect, as does the final "off" request. 3053 * Results are undefined if the "off" and "on" requests are not matched. 3054 */ 3055 int 3056 ifpromisc(struct ifnet *ifp, int pswitch) 3057 { 3058 int error; 3059 int oldflags = ifp->if_flags; 3060 3061 error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, 3062 &ifp->if_pcount, pswitch); 3063 /* If promiscuous mode status has changed, log a message */ 3064 if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC) && 3065 log_promisc_mode_change) 3066 log(LOG_INFO, "%s: promiscuous mode %s\n", 3067 ifp->if_xname, 3068 (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); 3069 return (error); 3070 } 3071 3072 /* 3073 * Return interface configuration 3074 * of system. List may be used 3075 * in later ioctl's (above) to get 3076 * other information. 3077 */ 3078 /*ARGSUSED*/ 3079 static int 3080 ifconf(u_long cmd, caddr_t data) 3081 { 3082 struct ifconf *ifc = (struct ifconf *)data; 3083 struct ifnet *ifp; 3084 struct ifaddr *ifa; 3085 struct ifreq ifr; 3086 struct sbuf *sb; 3087 int error, full = 0, valid_len, max_len; 3088 3089 /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ 3090 max_len = MAXPHYS - 1; 3091 3092 /* Prevent hostile input from being able to crash the system */ 3093 if (ifc->ifc_len <= 0) 3094 return (EINVAL); 3095 3096 again: 3097 if (ifc->ifc_len <= max_len) { 3098 max_len = ifc->ifc_len; 3099 full = 1; 3100 } 3101 sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); 3102 max_len = 0; 3103 valid_len = 0; 3104 3105 IFNET_RLOCK(); 3106 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 3107 int addrs; 3108 3109 /* 3110 * Zero the ifr_name buffer to make sure we don't 3111 * disclose the contents of the stack. 3112 */ 3113 memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name)); 3114 3115 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 3116 >= sizeof(ifr.ifr_name)) { 3117 sbuf_delete(sb); 3118 IFNET_RUNLOCK(); 3119 return (ENAMETOOLONG); 3120 } 3121 3122 addrs = 0; 3123 IF_ADDR_RLOCK(ifp); 3124 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 3125 struct sockaddr *sa = ifa->ifa_addr; 3126 3127 if (prison_if(curthread->td_ucred, sa) != 0) 3128 continue; 3129 addrs++; 3130 if (sa->sa_len <= sizeof(*sa)) { 3131 ifr.ifr_addr = *sa; 3132 sbuf_bcat(sb, &ifr, sizeof(ifr)); 3133 max_len += sizeof(ifr); 3134 } else { 3135 sbuf_bcat(sb, &ifr, 3136 offsetof(struct ifreq, ifr_addr)); 3137 max_len += offsetof(struct ifreq, ifr_addr); 3138 sbuf_bcat(sb, sa, sa->sa_len); 3139 max_len += sa->sa_len; 3140 } 3141 3142 if (sbuf_error(sb) == 0) 3143 valid_len = sbuf_len(sb); 3144 } 3145 IF_ADDR_RUNLOCK(ifp); 3146 if (addrs == 0) { 3147 bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); 3148 sbuf_bcat(sb, &ifr, sizeof(ifr)); 3149 max_len += sizeof(ifr); 3150 3151 if (sbuf_error(sb) == 0) 3152 valid_len = sbuf_len(sb); 3153 } 3154 } 3155 IFNET_RUNLOCK(); 3156 3157 /* 3158 * If we didn't allocate enough space (uncommon), try again. If 3159 * we have already allocated as much space as we are allowed, 3160 * return what we've got. 3161 */ 3162 if (valid_len != max_len && !full) { 3163 sbuf_delete(sb); 3164 goto again; 3165 } 3166 3167 ifc->ifc_len = valid_len; 3168 sbuf_finish(sb); 3169 error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); 3170 sbuf_delete(sb); 3171 return (error); 3172 } 3173 3174 /* 3175 * Just like ifpromisc(), but for all-multicast-reception mode. 3176 */ 3177 int 3178 if_allmulti(struct ifnet *ifp, int onswitch) 3179 { 3180 3181 return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); 3182 } 3183 3184 struct ifmultiaddr * 3185 if_findmulti(struct ifnet *ifp, const struct sockaddr *sa) 3186 { 3187 struct ifmultiaddr *ifma; 3188 3189 IF_ADDR_LOCK_ASSERT(ifp); 3190 3191 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3192 if (sa->sa_family == AF_LINK) { 3193 if (sa_dl_equal(ifma->ifma_addr, sa)) 3194 break; 3195 } else { 3196 if (sa_equal(ifma->ifma_addr, sa)) 3197 break; 3198 } 3199 } 3200 3201 return ifma; 3202 } 3203 3204 /* 3205 * Allocate a new ifmultiaddr and initialize based on passed arguments. We 3206 * make copies of passed sockaddrs. The ifmultiaddr will not be added to 3207 * the ifnet multicast address list here, so the caller must do that and 3208 * other setup work (such as notifying the device driver). The reference 3209 * count is initialized to 1. 3210 */ 3211 static struct ifmultiaddr * 3212 if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, 3213 int mflags) 3214 { 3215 struct ifmultiaddr *ifma; 3216 struct sockaddr *dupsa; 3217 3218 ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | 3219 M_ZERO); 3220 if (ifma == NULL) 3221 return (NULL); 3222 3223 dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); 3224 if (dupsa == NULL) { 3225 free(ifma, M_IFMADDR); 3226 return (NULL); 3227 } 3228 bcopy(sa, dupsa, sa->sa_len); 3229 ifma->ifma_addr = dupsa; 3230 3231 ifma->ifma_ifp = ifp; 3232 ifma->ifma_refcount = 1; 3233 ifma->ifma_protospec = NULL; 3234 3235 if (llsa == NULL) { 3236 ifma->ifma_lladdr = NULL; 3237 return (ifma); 3238 } 3239 3240 dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); 3241 if (dupsa == NULL) { 3242 free(ifma->ifma_addr, M_IFMADDR); 3243 free(ifma, M_IFMADDR); 3244 return (NULL); 3245 } 3246 bcopy(llsa, dupsa, llsa->sa_len); 3247 ifma->ifma_lladdr = dupsa; 3248 3249 return (ifma); 3250 } 3251 3252 /* 3253 * if_freemulti: free ifmultiaddr structure and possibly attached related 3254 * addresses. The caller is responsible for implementing reference 3255 * counting, notifying the driver, handling routing messages, and releasing 3256 * any dependent link layer state. 3257 */ 3258 static void 3259 if_freemulti(struct ifmultiaddr *ifma) 3260 { 3261 3262 KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", 3263 ifma->ifma_refcount)); 3264 3265 if (ifma->ifma_lladdr != NULL) 3266 free(ifma->ifma_lladdr, M_IFMADDR); 3267 free(ifma->ifma_addr, M_IFMADDR); 3268 free(ifma, M_IFMADDR); 3269 } 3270 3271 /* 3272 * Register an additional multicast address with a network interface. 3273 * 3274 * - If the address is already present, bump the reference count on the 3275 * address and return. 3276 * - If the address is not link-layer, look up a link layer address. 3277 * - Allocate address structures for one or both addresses, and attach to the 3278 * multicast address list on the interface. If automatically adding a link 3279 * layer address, the protocol address will own a reference to the link 3280 * layer address, to be freed when it is freed. 3281 * - Notify the network device driver of an addition to the multicast address 3282 * list. 3283 * 3284 * 'sa' points to caller-owned memory with the desired multicast address. 3285 * 3286 * 'retifma' will be used to return a pointer to the resulting multicast 3287 * address reference, if desired. 3288 */ 3289 int 3290 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 3291 struct ifmultiaddr **retifma) 3292 { 3293 struct ifmultiaddr *ifma, *ll_ifma; 3294 struct sockaddr *llsa; 3295 struct sockaddr_dl sdl; 3296 int error; 3297 3298 /* 3299 * If the address is already present, return a new reference to it; 3300 * otherwise, allocate storage and set up a new address. 3301 */ 3302 IF_ADDR_WLOCK(ifp); 3303 ifma = if_findmulti(ifp, sa); 3304 if (ifma != NULL) { 3305 ifma->ifma_refcount++; 3306 if (retifma != NULL) 3307 *retifma = ifma; 3308 IF_ADDR_WUNLOCK(ifp); 3309 return (0); 3310 } 3311 3312 /* 3313 * The address isn't already present; resolve the protocol address 3314 * into a link layer address, and then look that up, bump its 3315 * refcount or allocate an ifma for that also. 3316 * Most link layer resolving functions returns address data which 3317 * fits inside default sockaddr_dl structure. However callback 3318 * can allocate another sockaddr structure, in that case we need to 3319 * free it later. 3320 */ 3321 llsa = NULL; 3322 ll_ifma = NULL; 3323 if (ifp->if_resolvemulti != NULL) { 3324 /* Provide called function with buffer size information */ 3325 sdl.sdl_len = sizeof(sdl); 3326 llsa = (struct sockaddr *)&sdl; 3327 error = ifp->if_resolvemulti(ifp, &llsa, sa); 3328 if (error) 3329 goto unlock_out; 3330 } 3331 3332 /* 3333 * Allocate the new address. Don't hook it up yet, as we may also 3334 * need to allocate a link layer multicast address. 3335 */ 3336 ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); 3337 if (ifma == NULL) { 3338 error = ENOMEM; 3339 goto free_llsa_out; 3340 } 3341 3342 /* 3343 * If a link layer address is found, we'll need to see if it's 3344 * already present in the address list, or allocate is as well. 3345 * When this block finishes, the link layer address will be on the 3346 * list. 3347 */ 3348 if (llsa != NULL) { 3349 ll_ifma = if_findmulti(ifp, llsa); 3350 if (ll_ifma == NULL) { 3351 ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); 3352 if (ll_ifma == NULL) { 3353 --ifma->ifma_refcount; 3354 if_freemulti(ifma); 3355 error = ENOMEM; 3356 goto free_llsa_out; 3357 } 3358 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, 3359 ifma_link); 3360 } else 3361 ll_ifma->ifma_refcount++; 3362 ifma->ifma_llifma = ll_ifma; 3363 } 3364 3365 /* 3366 * We now have a new multicast address, ifma, and possibly a new or 3367 * referenced link layer address. Add the primary address to the 3368 * ifnet address list. 3369 */ 3370 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 3371 3372 if (retifma != NULL) 3373 *retifma = ifma; 3374 3375 /* 3376 * Must generate the message while holding the lock so that 'ifma' 3377 * pointer is still valid. 3378 */ 3379 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 3380 IF_ADDR_WUNLOCK(ifp); 3381 3382 /* 3383 * We are certain we have added something, so call down to the 3384 * interface to let them know about it. 3385 */ 3386 if (ifp->if_ioctl != NULL) { 3387 (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0); 3388 } 3389 3390 if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) 3391 link_free_sdl(llsa); 3392 3393 return (0); 3394 3395 free_llsa_out: 3396 if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl)) 3397 link_free_sdl(llsa); 3398 3399 unlock_out: 3400 IF_ADDR_WUNLOCK(ifp); 3401 return (error); 3402 } 3403 3404 /* 3405 * Delete a multicast group membership by network-layer group address. 3406 * 3407 * Returns ENOENT if the entry could not be found. If ifp no longer 3408 * exists, results are undefined. This entry point should only be used 3409 * from subsystems which do appropriate locking to hold ifp for the 3410 * duration of the call. 3411 * Network-layer protocol domains must use if_delmulti_ifma(). 3412 */ 3413 int 3414 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 3415 { 3416 struct ifmultiaddr *ifma; 3417 int lastref; 3418 #ifdef INVARIANTS 3419 struct ifnet *oifp; 3420 3421 IFNET_RLOCK_NOSLEEP(); 3422 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 3423 if (ifp == oifp) 3424 break; 3425 if (ifp != oifp) 3426 ifp = NULL; 3427 IFNET_RUNLOCK_NOSLEEP(); 3428 3429 KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); 3430 #endif 3431 if (ifp == NULL) 3432 return (ENOENT); 3433 3434 IF_ADDR_WLOCK(ifp); 3435 lastref = 0; 3436 ifma = if_findmulti(ifp, sa); 3437 if (ifma != NULL) 3438 lastref = if_delmulti_locked(ifp, ifma, 0); 3439 IF_ADDR_WUNLOCK(ifp); 3440 3441 if (ifma == NULL) 3442 return (ENOENT); 3443 3444 if (lastref && ifp->if_ioctl != NULL) { 3445 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 3446 } 3447 3448 return (0); 3449 } 3450 3451 /* 3452 * Delete all multicast group membership for an interface. 3453 * Should be used to quickly flush all multicast filters. 3454 */ 3455 void 3456 if_delallmulti(struct ifnet *ifp) 3457 { 3458 struct ifmultiaddr *ifma; 3459 struct ifmultiaddr *next; 3460 3461 IF_ADDR_WLOCK(ifp); 3462 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) 3463 if_delmulti_locked(ifp, ifma, 0); 3464 IF_ADDR_WUNLOCK(ifp); 3465 } 3466 3467 /* 3468 * Delete a multicast group membership by group membership pointer. 3469 * Network-layer protocol domains must use this routine. 3470 * 3471 * It is safe to call this routine if the ifp disappeared. 3472 */ 3473 void 3474 if_delmulti_ifma(struct ifmultiaddr *ifma) 3475 { 3476 struct ifnet *ifp; 3477 int lastref; 3478 3479 ifp = ifma->ifma_ifp; 3480 #ifdef DIAGNOSTIC 3481 if (ifp == NULL) { 3482 printf("%s: ifma_ifp seems to be detached\n", __func__); 3483 } else { 3484 struct ifnet *oifp; 3485 3486 IFNET_RLOCK_NOSLEEP(); 3487 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 3488 if (ifp == oifp) 3489 break; 3490 if (ifp != oifp) { 3491 printf("%s: ifnet %p disappeared\n", __func__, ifp); 3492 ifp = NULL; 3493 } 3494 IFNET_RUNLOCK_NOSLEEP(); 3495 } 3496 #endif 3497 /* 3498 * If and only if the ifnet instance exists: Acquire the address lock. 3499 */ 3500 if (ifp != NULL) 3501 IF_ADDR_WLOCK(ifp); 3502 3503 lastref = if_delmulti_locked(ifp, ifma, 0); 3504 3505 if (ifp != NULL) { 3506 /* 3507 * If and only if the ifnet instance exists: 3508 * Release the address lock. 3509 * If the group was left: update the hardware hash filter. 3510 */ 3511 IF_ADDR_WUNLOCK(ifp); 3512 if (lastref && ifp->if_ioctl != NULL) { 3513 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 3514 } 3515 } 3516 } 3517 3518 /* 3519 * Perform deletion of network-layer and/or link-layer multicast address. 3520 * 3521 * Return 0 if the reference count was decremented. 3522 * Return 1 if the final reference was released, indicating that the 3523 * hardware hash filter should be reprogrammed. 3524 */ 3525 static int 3526 if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) 3527 { 3528 struct ifmultiaddr *ll_ifma; 3529 3530 if (ifp != NULL && ifma->ifma_ifp != NULL) { 3531 KASSERT(ifma->ifma_ifp == ifp, 3532 ("%s: inconsistent ifp %p", __func__, ifp)); 3533 IF_ADDR_WLOCK_ASSERT(ifp); 3534 } 3535 3536 ifp = ifma->ifma_ifp; 3537 3538 /* 3539 * If the ifnet is detaching, null out references to ifnet, 3540 * so that upper protocol layers will notice, and not attempt 3541 * to obtain locks for an ifnet which no longer exists. The 3542 * routing socket announcement must happen before the ifnet 3543 * instance is detached from the system. 3544 */ 3545 if (detaching) { 3546 #ifdef DIAGNOSTIC 3547 printf("%s: detaching ifnet instance %p\n", __func__, ifp); 3548 #endif 3549 /* 3550 * ifp may already be nulled out if we are being reentered 3551 * to delete the ll_ifma. 3552 */ 3553 if (ifp != NULL) { 3554 rt_newmaddrmsg(RTM_DELMADDR, ifma); 3555 ifma->ifma_ifp = NULL; 3556 } 3557 } 3558 3559 if (--ifma->ifma_refcount > 0) 3560 return 0; 3561 3562 /* 3563 * If this ifma is a network-layer ifma, a link-layer ifma may 3564 * have been associated with it. Release it first if so. 3565 */ 3566 ll_ifma = ifma->ifma_llifma; 3567 if (ll_ifma != NULL) { 3568 KASSERT(ifma->ifma_lladdr != NULL, 3569 ("%s: llifma w/o lladdr", __func__)); 3570 if (detaching) 3571 ll_ifma->ifma_ifp = NULL; /* XXX */ 3572 if (--ll_ifma->ifma_refcount == 0) { 3573 if (ifp != NULL) { 3574 TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, 3575 ifma_link); 3576 } 3577 if_freemulti(ll_ifma); 3578 } 3579 } 3580 3581 if (ifp != NULL) 3582 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 3583 3584 if_freemulti(ifma); 3585 3586 /* 3587 * The last reference to this instance of struct ifmultiaddr 3588 * was released; the hardware should be notified of this change. 3589 */ 3590 return 1; 3591 } 3592 3593 /* 3594 * Set the link layer address on an interface. 3595 * 3596 * At this time we only support certain types of interfaces, 3597 * and we don't allow the length of the address to change. 3598 * 3599 * Set noinline to be dtrace-friendly 3600 */ 3601 __noinline int 3602 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 3603 { 3604 struct sockaddr_dl *sdl; 3605 struct ifaddr *ifa; 3606 struct ifreq ifr; 3607 3608 IF_ADDR_RLOCK(ifp); 3609 ifa = ifp->if_addr; 3610 if (ifa == NULL) { 3611 IF_ADDR_RUNLOCK(ifp); 3612 return (EINVAL); 3613 } 3614 ifa_ref(ifa); 3615 IF_ADDR_RUNLOCK(ifp); 3616 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 3617 if (sdl == NULL) { 3618 ifa_free(ifa); 3619 return (EINVAL); 3620 } 3621 if (len != sdl->sdl_alen) { /* don't allow length to change */ 3622 ifa_free(ifa); 3623 return (EINVAL); 3624 } 3625 switch (ifp->if_type) { 3626 case IFT_ETHER: 3627 case IFT_FDDI: 3628 case IFT_XETHER: 3629 case IFT_ISO88025: 3630 case IFT_L2VLAN: 3631 case IFT_BRIDGE: 3632 case IFT_ARCNET: 3633 case IFT_IEEE8023ADLAG: 3634 bcopy(lladdr, LLADDR(sdl), len); 3635 ifa_free(ifa); 3636 break; 3637 default: 3638 ifa_free(ifa); 3639 return (ENODEV); 3640 } 3641 3642 /* 3643 * If the interface is already up, we need 3644 * to re-init it in order to reprogram its 3645 * address filter. 3646 */ 3647 if ((ifp->if_flags & IFF_UP) != 0) { 3648 if (ifp->if_ioctl) { 3649 ifp->if_flags &= ~IFF_UP; 3650 ifr.ifr_flags = ifp->if_flags & 0xffff; 3651 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3652 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3653 ifp->if_flags |= IFF_UP; 3654 ifr.ifr_flags = ifp->if_flags & 0xffff; 3655 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3656 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3657 } 3658 } 3659 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 3660 return (0); 3661 } 3662 3663 /* 3664 * Compat function for handling basic encapsulation requests. 3665 * Not converted stacks (FDDI, IB, ..) supports traditional 3666 * output model: ARP (and other similar L2 protocols) are handled 3667 * inside output routine, arpresolve/nd6_resolve() returns MAC 3668 * address instead of full prepend. 3669 * 3670 * This function creates calculated header==MAC for IPv4/IPv6 and 3671 * returns EAFNOSUPPORT (which is then handled in ARP code) for other 3672 * address families. 3673 */ 3674 static int 3675 if_requestencap_default(struct ifnet *ifp, struct if_encap_req *req) 3676 { 3677 3678 if (req->rtype != IFENCAP_LL) 3679 return (EOPNOTSUPP); 3680 3681 if (req->bufsize < req->lladdr_len) 3682 return (ENOMEM); 3683 3684 switch (req->family) { 3685 case AF_INET: 3686 case AF_INET6: 3687 break; 3688 default: 3689 return (EAFNOSUPPORT); 3690 } 3691 3692 /* Copy lladdr to storage as is */ 3693 memmove(req->buf, req->lladdr, req->lladdr_len); 3694 req->bufsize = req->lladdr_len; 3695 req->lladdr_off = 0; 3696 3697 return (0); 3698 } 3699 3700 /* 3701 * Get the link layer address that was read from the hardware at attach. 3702 * 3703 * This is only set by Ethernet NICs (IFT_ETHER), but laggX interfaces re-type 3704 * their component interfaces as IFT_IEEE8023ADLAG. 3705 */ 3706 int 3707 if_gethwaddr(struct ifnet *ifp, struct ifreq *ifr) 3708 { 3709 3710 if (ifp->if_hw_addr == NULL) 3711 return (ENODEV); 3712 3713 switch (ifp->if_type) { 3714 case IFT_ETHER: 3715 case IFT_IEEE8023ADLAG: 3716 bcopy(ifp->if_hw_addr, ifr->ifr_addr.sa_data, ifp->if_addrlen); 3717 return (0); 3718 default: 3719 return (ENODEV); 3720 } 3721 } 3722 3723 /* 3724 * The name argument must be a pointer to storage which will last as 3725 * long as the interface does. For physical devices, the result of 3726 * device_get_name(dev) is a good choice and for pseudo-devices a 3727 * static string works well. 3728 */ 3729 void 3730 if_initname(struct ifnet *ifp, const char *name, int unit) 3731 { 3732 ifp->if_dname = name; 3733 ifp->if_dunit = unit; 3734 if (unit != IF_DUNIT_NONE) 3735 snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 3736 else 3737 strlcpy(ifp->if_xname, name, IFNAMSIZ); 3738 } 3739 3740 int 3741 if_printf(struct ifnet *ifp, const char * fmt, ...) 3742 { 3743 va_list ap; 3744 int retval; 3745 3746 retval = printf("%s: ", ifp->if_xname); 3747 va_start(ap, fmt); 3748 retval += vprintf(fmt, ap); 3749 va_end(ap); 3750 return (retval); 3751 } 3752 3753 void 3754 if_start(struct ifnet *ifp) 3755 { 3756 3757 (*(ifp)->if_start)(ifp); 3758 } 3759 3760 /* 3761 * Backwards compatibility interface for drivers 3762 * that have not implemented it 3763 */ 3764 static int 3765 if_transmit(struct ifnet *ifp, struct mbuf *m) 3766 { 3767 int error; 3768 3769 IFQ_HANDOFF(ifp, m, error); 3770 return (error); 3771 } 3772 3773 static void 3774 if_input_default(struct ifnet *ifp __unused, struct mbuf *m) 3775 { 3776 3777 m_freem(m); 3778 } 3779 3780 int 3781 if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust) 3782 { 3783 int active = 0; 3784 3785 IF_LOCK(ifq); 3786 if (_IF_QFULL(ifq)) { 3787 IF_UNLOCK(ifq); 3788 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1); 3789 m_freem(m); 3790 return (0); 3791 } 3792 if (ifp != NULL) { 3793 if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len + adjust); 3794 if (m->m_flags & (M_BCAST|M_MCAST)) 3795 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 3796 active = ifp->if_drv_flags & IFF_DRV_OACTIVE; 3797 } 3798 _IF_ENQUEUE(ifq, m); 3799 IF_UNLOCK(ifq); 3800 if (ifp != NULL && !active) 3801 (*(ifp)->if_start)(ifp); 3802 return (1); 3803 } 3804 3805 void 3806 if_register_com_alloc(u_char type, 3807 if_com_alloc_t *a, if_com_free_t *f) 3808 { 3809 3810 KASSERT(if_com_alloc[type] == NULL, 3811 ("if_register_com_alloc: %d already registered", type)); 3812 KASSERT(if_com_free[type] == NULL, 3813 ("if_register_com_alloc: %d free already registered", type)); 3814 3815 if_com_alloc[type] = a; 3816 if_com_free[type] = f; 3817 } 3818 3819 void 3820 if_deregister_com_alloc(u_char type) 3821 { 3822 3823 KASSERT(if_com_alloc[type] != NULL, 3824 ("if_deregister_com_alloc: %d not registered", type)); 3825 KASSERT(if_com_free[type] != NULL, 3826 ("if_deregister_com_alloc: %d free not registered", type)); 3827 if_com_alloc[type] = NULL; 3828 if_com_free[type] = NULL; 3829 } 3830 3831 /* API for driver access to network stack owned ifnet.*/ 3832 uint64_t 3833 if_setbaudrate(struct ifnet *ifp, uint64_t baudrate) 3834 { 3835 uint64_t oldbrate; 3836 3837 oldbrate = ifp->if_baudrate; 3838 ifp->if_baudrate = baudrate; 3839 return (oldbrate); 3840 } 3841 3842 uint64_t 3843 if_getbaudrate(if_t ifp) 3844 { 3845 3846 return (((struct ifnet *)ifp)->if_baudrate); 3847 } 3848 3849 int 3850 if_setcapabilities(if_t ifp, int capabilities) 3851 { 3852 ((struct ifnet *)ifp)->if_capabilities = capabilities; 3853 return (0); 3854 } 3855 3856 int 3857 if_setcapabilitiesbit(if_t ifp, int setbit, int clearbit) 3858 { 3859 ((struct ifnet *)ifp)->if_capabilities |= setbit; 3860 ((struct ifnet *)ifp)->if_capabilities &= ~clearbit; 3861 3862 return (0); 3863 } 3864 3865 int 3866 if_getcapabilities(if_t ifp) 3867 { 3868 return ((struct ifnet *)ifp)->if_capabilities; 3869 } 3870 3871 int 3872 if_setcapenable(if_t ifp, int capabilities) 3873 { 3874 ((struct ifnet *)ifp)->if_capenable = capabilities; 3875 return (0); 3876 } 3877 3878 int 3879 if_setcapenablebit(if_t ifp, int setcap, int clearcap) 3880 { 3881 if(setcap) 3882 ((struct ifnet *)ifp)->if_capenable |= setcap; 3883 if(clearcap) 3884 ((struct ifnet *)ifp)->if_capenable &= ~clearcap; 3885 3886 return (0); 3887 } 3888 3889 const char * 3890 if_getdname(if_t ifp) 3891 { 3892 return ((struct ifnet *)ifp)->if_dname; 3893 } 3894 3895 int 3896 if_togglecapenable(if_t ifp, int togglecap) 3897 { 3898 ((struct ifnet *)ifp)->if_capenable ^= togglecap; 3899 return (0); 3900 } 3901 3902 int 3903 if_getcapenable(if_t ifp) 3904 { 3905 return ((struct ifnet *)ifp)->if_capenable; 3906 } 3907 3908 /* 3909 * This is largely undesirable because it ties ifnet to a device, but does 3910 * provide flexiblity for an embedded product vendor. Should be used with 3911 * the understanding that it violates the interface boundaries, and should be 3912 * a last resort only. 3913 */ 3914 int 3915 if_setdev(if_t ifp, void *dev) 3916 { 3917 return (0); 3918 } 3919 3920 int 3921 if_setdrvflagbits(if_t ifp, int set_flags, int clear_flags) 3922 { 3923 ((struct ifnet *)ifp)->if_drv_flags |= set_flags; 3924 ((struct ifnet *)ifp)->if_drv_flags &= ~clear_flags; 3925 3926 return (0); 3927 } 3928 3929 int 3930 if_getdrvflags(if_t ifp) 3931 { 3932 return ((struct ifnet *)ifp)->if_drv_flags; 3933 } 3934 3935 int 3936 if_setdrvflags(if_t ifp, int flags) 3937 { 3938 ((struct ifnet *)ifp)->if_drv_flags = flags; 3939 return (0); 3940 } 3941 3942 3943 int 3944 if_setflags(if_t ifp, int flags) 3945 { 3946 ((struct ifnet *)ifp)->if_flags = flags; 3947 return (0); 3948 } 3949 3950 int 3951 if_setflagbits(if_t ifp, int set, int clear) 3952 { 3953 ((struct ifnet *)ifp)->if_flags |= set; 3954 ((struct ifnet *)ifp)->if_flags &= ~clear; 3955 3956 return (0); 3957 } 3958 3959 int 3960 if_getflags(if_t ifp) 3961 { 3962 return ((struct ifnet *)ifp)->if_flags; 3963 } 3964 3965 int 3966 if_clearhwassist(if_t ifp) 3967 { 3968 ((struct ifnet *)ifp)->if_hwassist = 0; 3969 return (0); 3970 } 3971 3972 int 3973 if_sethwassistbits(if_t ifp, int toset, int toclear) 3974 { 3975 ((struct ifnet *)ifp)->if_hwassist |= toset; 3976 ((struct ifnet *)ifp)->if_hwassist &= ~toclear; 3977 3978 return (0); 3979 } 3980 3981 int 3982 if_sethwassist(if_t ifp, int hwassist_bit) 3983 { 3984 ((struct ifnet *)ifp)->if_hwassist = hwassist_bit; 3985 return (0); 3986 } 3987 3988 int 3989 if_gethwassist(if_t ifp) 3990 { 3991 return ((struct ifnet *)ifp)->if_hwassist; 3992 } 3993 3994 int 3995 if_setmtu(if_t ifp, int mtu) 3996 { 3997 ((struct ifnet *)ifp)->if_mtu = mtu; 3998 return (0); 3999 } 4000 4001 int 4002 if_getmtu(if_t ifp) 4003 { 4004 return ((struct ifnet *)ifp)->if_mtu; 4005 } 4006 4007 int 4008 if_getmtu_family(if_t ifp, int family) 4009 { 4010 struct domain *dp; 4011 4012 for (dp = domains; dp; dp = dp->dom_next) { 4013 if (dp->dom_family == family && dp->dom_ifmtu != NULL) 4014 return (dp->dom_ifmtu((struct ifnet *)ifp)); 4015 } 4016 4017 return (((struct ifnet *)ifp)->if_mtu); 4018 } 4019 4020 int 4021 if_setsoftc(if_t ifp, void *softc) 4022 { 4023 ((struct ifnet *)ifp)->if_softc = softc; 4024 return (0); 4025 } 4026 4027 void * 4028 if_getsoftc(if_t ifp) 4029 { 4030 return ((struct ifnet *)ifp)->if_softc; 4031 } 4032 4033 void 4034 if_setrcvif(struct mbuf *m, if_t ifp) 4035 { 4036 m->m_pkthdr.rcvif = (struct ifnet *)ifp; 4037 } 4038 4039 void 4040 if_setvtag(struct mbuf *m, uint16_t tag) 4041 { 4042 m->m_pkthdr.ether_vtag = tag; 4043 } 4044 4045 uint16_t 4046 if_getvtag(struct mbuf *m) 4047 { 4048 4049 return (m->m_pkthdr.ether_vtag); 4050 } 4051 4052 int 4053 if_sendq_empty(if_t ifp) 4054 { 4055 return IFQ_DRV_IS_EMPTY(&((struct ifnet *)ifp)->if_snd); 4056 } 4057 4058 struct ifaddr * 4059 if_getifaddr(if_t ifp) 4060 { 4061 return ((struct ifnet *)ifp)->if_addr; 4062 } 4063 4064 int 4065 if_getamcount(if_t ifp) 4066 { 4067 return ((struct ifnet *)ifp)->if_amcount; 4068 } 4069 4070 4071 int 4072 if_setsendqready(if_t ifp) 4073 { 4074 IFQ_SET_READY(&((struct ifnet *)ifp)->if_snd); 4075 return (0); 4076 } 4077 4078 int 4079 if_setsendqlen(if_t ifp, int tx_desc_count) 4080 { 4081 IFQ_SET_MAXLEN(&((struct ifnet *)ifp)->if_snd, tx_desc_count); 4082 ((struct ifnet *)ifp)->if_snd.ifq_drv_maxlen = tx_desc_count; 4083 4084 return (0); 4085 } 4086 4087 int 4088 if_vlantrunkinuse(if_t ifp) 4089 { 4090 return ((struct ifnet *)ifp)->if_vlantrunk != NULL?1:0; 4091 } 4092 4093 int 4094 if_input(if_t ifp, struct mbuf* sendmp) 4095 { 4096 (*((struct ifnet *)ifp)->if_input)((struct ifnet *)ifp, sendmp); 4097 return (0); 4098 4099 } 4100 4101 /* XXX */ 4102 #ifndef ETH_ADDR_LEN 4103 #define ETH_ADDR_LEN 6 4104 #endif 4105 4106 int 4107 if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max) 4108 { 4109 struct ifmultiaddr *ifma; 4110 uint8_t *lmta = (uint8_t *)mta; 4111 int mcnt = 0; 4112 4113 TAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { 4114 if (ifma->ifma_addr->sa_family != AF_LINK) 4115 continue; 4116 4117 if (mcnt == max) 4118 break; 4119 4120 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 4121 &lmta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 4122 mcnt++; 4123 } 4124 *cnt = mcnt; 4125 4126 return (0); 4127 } 4128 4129 int 4130 if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max) 4131 { 4132 int error; 4133 4134 if_maddr_rlock(ifp); 4135 error = if_setupmultiaddr(ifp, mta, cnt, max); 4136 if_maddr_runlock(ifp); 4137 return (error); 4138 } 4139 4140 int 4141 if_multiaddr_count(if_t ifp, int max) 4142 { 4143 struct ifmultiaddr *ifma; 4144 int count; 4145 4146 count = 0; 4147 if_maddr_rlock(ifp); 4148 TAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) { 4149 if (ifma->ifma_addr->sa_family != AF_LINK) 4150 continue; 4151 count++; 4152 if (count == max) 4153 break; 4154 } 4155 if_maddr_runlock(ifp); 4156 return (count); 4157 } 4158 4159 int 4160 if_multi_apply(struct ifnet *ifp, int (*filter)(void *, struct ifmultiaddr *, int), void *arg) 4161 { 4162 struct ifmultiaddr *ifma; 4163 int cnt = 0; 4164 4165 if_maddr_rlock(ifp); 4166 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 4167 cnt += filter(arg, ifma, cnt); 4168 if_maddr_runlock(ifp); 4169 return (cnt); 4170 } 4171 4172 struct mbuf * 4173 if_dequeue(if_t ifp) 4174 { 4175 struct mbuf *m; 4176 IFQ_DRV_DEQUEUE(&((struct ifnet *)ifp)->if_snd, m); 4177 4178 return (m); 4179 } 4180 4181 int 4182 if_sendq_prepend(if_t ifp, struct mbuf *m) 4183 { 4184 IFQ_DRV_PREPEND(&((struct ifnet *)ifp)->if_snd, m); 4185 return (0); 4186 } 4187 4188 int 4189 if_setifheaderlen(if_t ifp, int len) 4190 { 4191 ((struct ifnet *)ifp)->if_hdrlen = len; 4192 return (0); 4193 } 4194 4195 caddr_t 4196 if_getlladdr(if_t ifp) 4197 { 4198 return (IF_LLADDR((struct ifnet *)ifp)); 4199 } 4200 4201 void * 4202 if_gethandle(u_char type) 4203 { 4204 return (if_alloc(type)); 4205 } 4206 4207 void 4208 if_bpfmtap(if_t ifh, struct mbuf *m) 4209 { 4210 struct ifnet *ifp = (struct ifnet *)ifh; 4211 4212 BPF_MTAP(ifp, m); 4213 } 4214 4215 void 4216 if_etherbpfmtap(if_t ifh, struct mbuf *m) 4217 { 4218 struct ifnet *ifp = (struct ifnet *)ifh; 4219 4220 ETHER_BPF_MTAP(ifp, m); 4221 } 4222 4223 void 4224 if_vlancap(if_t ifh) 4225 { 4226 struct ifnet *ifp = (struct ifnet *)ifh; 4227 VLAN_CAPABILITIES(ifp); 4228 } 4229 4230 int 4231 if_sethwtsomax(if_t ifp, u_int if_hw_tsomax) 4232 { 4233 4234 ((struct ifnet *)ifp)->if_hw_tsomax = if_hw_tsomax; 4235 return (0); 4236 } 4237 4238 int 4239 if_sethwtsomaxsegcount(if_t ifp, u_int if_hw_tsomaxsegcount) 4240 { 4241 4242 ((struct ifnet *)ifp)->if_hw_tsomaxsegcount = if_hw_tsomaxsegcount; 4243 return (0); 4244 } 4245 4246 int 4247 if_sethwtsomaxsegsize(if_t ifp, u_int if_hw_tsomaxsegsize) 4248 { 4249 4250 ((struct ifnet *)ifp)->if_hw_tsomaxsegsize = if_hw_tsomaxsegsize; 4251 return (0); 4252 } 4253 4254 u_int 4255 if_gethwtsomax(if_t ifp) 4256 { 4257 4258 return (((struct ifnet *)ifp)->if_hw_tsomax); 4259 } 4260 4261 u_int 4262 if_gethwtsomaxsegcount(if_t ifp) 4263 { 4264 4265 return (((struct ifnet *)ifp)->if_hw_tsomaxsegcount); 4266 } 4267 4268 u_int 4269 if_gethwtsomaxsegsize(if_t ifp) 4270 { 4271 4272 return (((struct ifnet *)ifp)->if_hw_tsomaxsegsize); 4273 } 4274 4275 void 4276 if_setinitfn(if_t ifp, void (*init_fn)(void *)) 4277 { 4278 ((struct ifnet *)ifp)->if_init = init_fn; 4279 } 4280 4281 void 4282 if_setioctlfn(if_t ifp, int (*ioctl_fn)(if_t, u_long, caddr_t)) 4283 { 4284 ((struct ifnet *)ifp)->if_ioctl = (void *)ioctl_fn; 4285 } 4286 4287 void 4288 if_setstartfn(if_t ifp, void (*start_fn)(if_t)) 4289 { 4290 ((struct ifnet *)ifp)->if_start = (void *)start_fn; 4291 } 4292 4293 void 4294 if_settransmitfn(if_t ifp, if_transmit_fn_t start_fn) 4295 { 4296 ((struct ifnet *)ifp)->if_transmit = start_fn; 4297 } 4298 4299 void if_setqflushfn(if_t ifp, if_qflush_fn_t flush_fn) 4300 { 4301 ((struct ifnet *)ifp)->if_qflush = flush_fn; 4302 4303 } 4304 4305 void 4306 if_setgetcounterfn(if_t ifp, if_get_counter_t fn) 4307 { 4308 4309 ifp->if_get_counter = fn; 4310 } 4311 4312 /* Revisit these - These are inline functions originally. */ 4313 int 4314 drbr_inuse_drv(if_t ifh, struct buf_ring *br) 4315 { 4316 return drbr_inuse(ifh, br); 4317 } 4318 4319 struct mbuf* 4320 drbr_dequeue_drv(if_t ifh, struct buf_ring *br) 4321 { 4322 return drbr_dequeue(ifh, br); 4323 } 4324 4325 int 4326 drbr_needs_enqueue_drv(if_t ifh, struct buf_ring *br) 4327 { 4328 return drbr_needs_enqueue(ifh, br); 4329 } 4330 4331 int 4332 drbr_enqueue_drv(if_t ifh, struct buf_ring *br, struct mbuf *m) 4333 { 4334 return drbr_enqueue(ifh, br, m); 4335 4336 } 4337