1 /*- 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.5 (Berkeley) 1/9/95 30 * $FreeBSD$ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet6.h" 35 #include "opt_inet.h" 36 #include "opt_route.h" 37 #include "opt_mac.h" 38 #include "opt_carp.h" 39 40 #include <sys/param.h> 41 #include <sys/types.h> 42 #include <sys/conf.h> 43 #include <sys/malloc.h> 44 #include <sys/sbuf.h> 45 #include <sys/bus.h> 46 #include <sys/mbuf.h> 47 #include <sys/systm.h> 48 #include <sys/priv.h> 49 #include <sys/proc.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/protosw.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/refcount.h> 56 #include <sys/module.h> 57 #include <sys/rwlock.h> 58 #include <sys/sockio.h> 59 #include <sys/syslog.h> 60 #include <sys/sysctl.h> 61 #include <sys/taskqueue.h> 62 #include <sys/domain.h> 63 #include <sys/jail.h> 64 #include <sys/vimage.h> 65 #include <machine/stdarg.h> 66 #include <vm/uma.h> 67 68 #include <net/if.h> 69 #include <net/if_arp.h> 70 #include <net/if_clone.h> 71 #include <net/if_dl.h> 72 #include <net/if_types.h> 73 #include <net/if_var.h> 74 #include <net/radix.h> 75 #include <net/route.h> 76 #include <net/vnet.h> 77 78 #if defined(INET) || defined(INET6) 79 /*XXX*/ 80 #include <netinet/in.h> 81 #include <netinet/in_var.h> 82 #ifdef INET6 83 #include <netinet6/in6_var.h> 84 #include <netinet6/in6_ifattach.h> 85 #endif 86 #endif 87 #ifdef INET 88 #include <netinet/if_ether.h> 89 #include <netinet/vinet.h> 90 #endif 91 #ifdef DEV_CARP 92 #include <netinet/ip_carp.h> 93 #endif 94 95 #include <security/mac/mac_framework.h> 96 97 #ifndef VIMAGE 98 #ifndef VIMAGE_GLOBALS 99 struct vnet_net vnet_net_0; 100 #endif 101 #endif 102 103 static int slowtimo_started; 104 105 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 106 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 107 108 /* Log link state change events */ 109 static int log_link_state_change = 1; 110 111 SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, 112 &log_link_state_change, 0, 113 "log interface link state change events"); 114 115 void (*bstp_linkstate_p)(struct ifnet *ifp, int state); 116 void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); 117 void (*lagg_linkstate_p)(struct ifnet *ifp, int state); 118 119 struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; 120 121 /* 122 * XXX: Style; these should be sorted alphabetically, and unprototyped 123 * static functions should be prototyped. Currently they are sorted by 124 * declaration order. 125 */ 126 static void if_attachdomain(void *); 127 static void if_attachdomain1(struct ifnet *); 128 static int ifconf(u_long, caddr_t); 129 static void if_freemulti(struct ifmultiaddr *); 130 static void if_init(void *); 131 static void if_check(void *); 132 static void if_route(struct ifnet *, int flag, int fam); 133 static int if_setflag(struct ifnet *, int, int, int *, int); 134 static void if_slowtimo(void *); 135 static int if_transmit(struct ifnet *ifp, struct mbuf *m); 136 static void if_unroute(struct ifnet *, int flag, int fam); 137 static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); 138 static int if_rtdel(struct radix_node *, void *); 139 static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *); 140 static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); 141 static void do_link_state_change(void *, int); 142 static int if_getgroup(struct ifgroupreq *, struct ifnet *); 143 static int if_getgroupmembers(struct ifgroupreq *); 144 static void if_delgroups(struct ifnet *); 145 static void if_attach_internal(struct ifnet *, int); 146 static void if_detach_internal(struct ifnet *, int); 147 148 #ifdef INET6 149 /* 150 * XXX: declare here to avoid to include many inet6 related files.. 151 * should be more generalized? 152 */ 153 extern void nd6_setmtu(struct ifnet *); 154 #endif 155 156 static int vnet_net_iattach(const void *); 157 158 #ifdef VIMAGE_GLOBALS 159 struct ifnethead ifnet; /* depend on static init XXX */ 160 struct ifgrouphead ifg_head; 161 int if_index; 162 static int if_indexlim; 163 /* Table of ifnet/cdev by index. Locked with ifnet_lock. */ 164 static struct ifindex_entry *ifindex_table; 165 static struct knlist ifklist; 166 #endif 167 168 int ifqmaxlen = IFQ_MAXLEN; 169 struct rwlock ifnet_lock; 170 static if_com_alloc_t *if_com_alloc[256]; 171 static if_com_free_t *if_com_free[256]; 172 173 static void filt_netdetach(struct knote *kn); 174 static int filt_netdev(struct knote *kn, long hint); 175 176 static struct filterops netdev_filtops = 177 { 1, NULL, filt_netdetach, filt_netdev }; 178 179 #ifndef VIMAGE_GLOBALS 180 static struct vnet_symmap vnet_net_symmap[] = { 181 VNET_SYMMAP(net, ifnet), 182 VNET_SYMMAP(net, rt_tables), 183 VNET_SYMMAP(net, rtstat), 184 VNET_SYMMAP(net, rttrash), 185 VNET_SYMMAP_END 186 }; 187 188 static const vnet_modinfo_t vnet_net_modinfo = { 189 .vmi_id = VNET_MOD_NET, 190 .vmi_name = "net", 191 .vmi_size = sizeof(struct vnet_net), 192 .vmi_symmap = vnet_net_symmap, 193 .vmi_iattach = vnet_net_iattach 194 }; 195 #endif /* !VIMAGE_GLOBALS */ 196 197 /* 198 * System initialization 199 */ 200 SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_FIRST, if_init, NULL); 201 SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_check, NULL); 202 203 MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); 204 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 205 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 206 207 struct ifnet * 208 ifnet_byindex_locked(u_short idx) 209 { 210 INIT_VNET_NET(curvnet); 211 212 if (idx > V_if_index) 213 return (NULL); 214 return (V_ifindex_table[idx].ife_ifnet); 215 } 216 217 struct ifnet * 218 ifnet_byindex(u_short idx) 219 { 220 struct ifnet *ifp; 221 222 IFNET_RLOCK(); 223 ifp = ifnet_byindex_locked(idx); 224 IFNET_RUNLOCK(); 225 return (ifp); 226 } 227 228 struct ifnet * 229 ifnet_byindex_ref(u_short idx) 230 { 231 struct ifnet *ifp; 232 233 IFNET_RLOCK(); 234 ifp = ifnet_byindex_locked(idx); 235 if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { 236 IFNET_RUNLOCK(); 237 return (NULL); 238 } 239 if_ref(ifp); 240 IFNET_RUNLOCK(); 241 return (ifp); 242 } 243 244 static void 245 ifnet_setbyindex(u_short idx, struct ifnet *ifp) 246 { 247 INIT_VNET_NET(curvnet); 248 249 IFNET_WLOCK_ASSERT(); 250 251 V_ifindex_table[idx].ife_ifnet = ifp; 252 } 253 254 struct ifaddr * 255 ifaddr_byindex(u_short idx) 256 { 257 struct ifaddr *ifa; 258 259 IFNET_RLOCK(); 260 ifa = ifnet_byindex_locked(idx)->if_addr; 261 IFNET_RUNLOCK(); 262 return (ifa); 263 } 264 265 struct cdev * 266 ifdev_byindex(u_short idx) 267 { 268 INIT_VNET_NET(curvnet); 269 struct cdev *cdev; 270 271 IFNET_RLOCK(); 272 cdev = V_ifindex_table[idx].ife_dev; 273 IFNET_RUNLOCK(); 274 return (cdev); 275 } 276 277 static void 278 ifdev_setbyindex(u_short idx, struct cdev *cdev) 279 { 280 INIT_VNET_NET(curvnet); 281 282 IFNET_WLOCK(); 283 V_ifindex_table[idx].ife_dev = cdev; 284 IFNET_WUNLOCK(); 285 } 286 287 static d_open_t netopen; 288 static d_close_t netclose; 289 static d_ioctl_t netioctl; 290 static d_kqfilter_t netkqfilter; 291 292 static struct cdevsw net_cdevsw = { 293 .d_version = D_VERSION, 294 .d_flags = D_NEEDGIANT, 295 .d_open = netopen, 296 .d_close = netclose, 297 .d_ioctl = netioctl, 298 .d_name = "net", 299 .d_kqfilter = netkqfilter, 300 }; 301 302 static int 303 netopen(struct cdev *dev, int flag, int mode, struct thread *td) 304 { 305 return (0); 306 } 307 308 static int 309 netclose(struct cdev *dev, int flags, int fmt, struct thread *td) 310 { 311 return (0); 312 } 313 314 static int 315 netioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 316 { 317 struct ifnet *ifp; 318 int error, idx; 319 320 /* only support interface specific ioctls */ 321 if (IOCGROUP(cmd) != 'i') 322 return (EOPNOTSUPP); 323 idx = dev2unit(dev); 324 if (idx == 0) { 325 /* 326 * special network device, not interface. 327 */ 328 if (cmd == SIOCGIFCONF) 329 return (ifconf(cmd, data)); /* XXX remove cmd */ 330 #ifdef __amd64__ 331 if (cmd == SIOCGIFCONF32) 332 return (ifconf(cmd, data)); /* XXX remove cmd */ 333 #endif 334 return (EOPNOTSUPP); 335 } 336 337 ifp = ifnet_byindex(idx); 338 if (ifp == NULL) 339 return (ENXIO); 340 341 error = ifhwioctl(cmd, ifp, data, td); 342 if (error == ENOIOCTL) 343 error = EOPNOTSUPP; 344 return (error); 345 } 346 347 static int 348 netkqfilter(struct cdev *dev, struct knote *kn) 349 { 350 INIT_VNET_NET(curvnet); 351 struct knlist *klist; 352 struct ifnet *ifp; 353 int idx; 354 355 switch (kn->kn_filter) { 356 case EVFILT_NETDEV: 357 kn->kn_fop = &netdev_filtops; 358 break; 359 default: 360 return (EINVAL); 361 } 362 363 idx = dev2unit(dev); 364 if (idx == 0) { 365 klist = &V_ifklist; 366 } else { 367 ifp = ifnet_byindex(idx); 368 if (ifp == NULL) 369 return (1); 370 klist = &ifp->if_klist; 371 } 372 373 kn->kn_hook = (caddr_t)klist; 374 375 knlist_add(klist, kn, 0); 376 377 return (0); 378 } 379 380 static void 381 filt_netdetach(struct knote *kn) 382 { 383 struct knlist *klist = (struct knlist *)kn->kn_hook; 384 385 knlist_remove(klist, kn, 0); 386 } 387 388 static int 389 filt_netdev(struct knote *kn, long hint) 390 { 391 struct knlist *klist = (struct knlist *)kn->kn_hook; 392 393 /* 394 * Currently NOTE_EXIT is abused to indicate device detach. 395 */ 396 if (hint == NOTE_EXIT) { 397 kn->kn_data = NOTE_LINKINV; 398 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 399 knlist_remove_inevent(klist, kn); 400 return (1); 401 } 402 if (hint != 0) 403 kn->kn_data = hint; /* current status */ 404 if (kn->kn_sfflags & hint) 405 kn->kn_fflags |= hint; 406 return (kn->kn_fflags != 0); 407 } 408 409 /* 410 * Network interface utility routines. 411 * 412 * Routines with ifa_ifwith* names take sockaddr *'s as 413 * parameters. 414 */ 415 416 /* ARGSUSED*/ 417 static void 418 if_init(void *dummy __unused) 419 { 420 421 #ifndef VIMAGE_GLOBALS 422 vnet_mod_register(&vnet_net_modinfo); 423 #else 424 vnet_net_iattach(NULL); 425 #endif 426 427 IFNET_LOCK_INIT(); 428 ifdev_setbyindex(0, make_dev(&net_cdevsw, 0, UID_ROOT, GID_WHEEL, 429 0600, "network")); 430 if_clone_init(); 431 } 432 433 static int 434 vnet_net_iattach(const void *unused __unused) 435 { 436 INIT_VNET_NET(curvnet); 437 438 V_if_index = 0; 439 V_ifindex_table = NULL; 440 V_if_indexlim = 8; 441 442 TAILQ_INIT(&V_ifnet); 443 TAILQ_INIT(&V_ifg_head); 444 knlist_init(&V_ifklist, NULL, NULL, NULL, NULL); 445 if_grow(); /* create initial table */ 446 447 return (0); 448 } 449 450 void 451 if_grow(void) 452 { 453 INIT_VNET_NET(curvnet); 454 u_int n; 455 struct ifindex_entry *e; 456 457 V_if_indexlim <<= 1; 458 n = V_if_indexlim * sizeof(*e); 459 e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); 460 if (V_ifindex_table != NULL) { 461 memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); 462 free((caddr_t)V_ifindex_table, M_IFNET); 463 } 464 V_ifindex_table = e; 465 } 466 467 static void 468 if_check(void *dummy __unused) 469 { 470 471 /* 472 * If at least one interface added during boot uses 473 * if_watchdog then start the timer. 474 */ 475 if (slowtimo_started) 476 if_slowtimo(0); 477 } 478 479 /* 480 * Allocate a struct ifnet and an index for an interface. A layer 2 481 * common structure will also be allocated if an allocation routine is 482 * registered for the passed type. 483 */ 484 struct ifnet * 485 if_alloc(u_char type) 486 { 487 INIT_VNET_NET(curvnet); 488 struct ifnet *ifp; 489 490 ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO); 491 492 /* 493 * Try to find an empty slot below if_index. If we fail, take 494 * the next slot. 495 * 496 * XXX: should be locked! 497 */ 498 for (ifp->if_index = 1; ifp->if_index <= V_if_index; ifp->if_index++) { 499 if (ifnet_byindex(ifp->if_index) == NULL) 500 break; 501 } 502 /* Catch if_index overflow. */ 503 if (ifp->if_index < 1) { 504 free(ifp, M_IFNET); 505 return (NULL); 506 } 507 if (ifp->if_index > V_if_index) 508 V_if_index = ifp->if_index; 509 if (V_if_index >= V_if_indexlim) 510 if_grow(); 511 512 ifp->if_type = type; 513 ifp->if_alloctype = type; 514 515 if (if_com_alloc[type] != NULL) { 516 ifp->if_l2com = if_com_alloc[type](type, ifp); 517 if (ifp->if_l2com == NULL) { 518 free(ifp, M_IFNET); 519 return (NULL); 520 } 521 } 522 523 IF_ADDR_LOCK_INIT(ifp); 524 TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); 525 ifp->if_afdata_initialized = 0; 526 IF_AFDATA_LOCK_INIT(ifp); 527 TAILQ_INIT(&ifp->if_addrhead); 528 TAILQ_INIT(&ifp->if_prefixhead); 529 TAILQ_INIT(&ifp->if_multiaddrs); 530 TAILQ_INIT(&ifp->if_groups); 531 knlist_init(&ifp->if_klist, NULL, NULL, NULL, NULL); 532 #ifdef MAC 533 mac_ifnet_init(ifp); 534 #endif 535 536 refcount_init(&ifp->if_refcount, 1); /* Index reference. */ 537 IFNET_WLOCK(); 538 ifnet_setbyindex(ifp->if_index, ifp); 539 IFNET_WUNLOCK(); 540 return (ifp); 541 } 542 543 /* 544 * Do the actual work of freeing a struct ifnet, associated index, and layer 545 * 2 common structure. This call is made when the last reference to an 546 * interface is released. 547 */ 548 static void 549 if_free_internal(struct ifnet *ifp) 550 { 551 INIT_VNET_NET(curvnet); /* ifp->if_vnet is already NULL here */ 552 553 KASSERT((ifp->if_flags & IFF_DYING), 554 ("if_free_internal: interface not dying")); 555 556 IFNET_WLOCK(); 557 KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), 558 ("%s: freeing unallocated ifnet", ifp->if_xname)); 559 560 ifnet_setbyindex(ifp->if_index, NULL); 561 while (V_if_index > 0 && ifnet_byindex_locked(V_if_index) == NULL) 562 V_if_index--; 563 IFNET_WUNLOCK(); 564 565 if (if_com_free[ifp->if_alloctype] != NULL) 566 if_com_free[ifp->if_alloctype](ifp->if_l2com, 567 ifp->if_alloctype); 568 569 #ifdef MAC 570 mac_ifnet_destroy(ifp); 571 #endif /* MAC */ 572 KNOTE_UNLOCKED(&ifp->if_klist, NOTE_EXIT); 573 knlist_clear(&ifp->if_klist, 0); 574 knlist_destroy(&ifp->if_klist); 575 IF_AFDATA_DESTROY(ifp); 576 IF_ADDR_LOCK_DESTROY(ifp); 577 ifq_detach(&ifp->if_snd); 578 free(ifp, M_IFNET); 579 } 580 581 /* 582 * This version should only be called by intefaces that switch their type 583 * after calling if_alloc(). if_free_type() will go away again now that we 584 * have if_alloctype to cache the original allocation type. For now, assert 585 * that they match, since we require that in practice. 586 */ 587 void 588 if_free_type(struct ifnet *ifp, u_char type) 589 { 590 591 KASSERT(ifp->if_alloctype == type, 592 ("if_free_type: type (%d) != alloctype (%d)", type, 593 ifp->if_alloctype)); 594 595 ifp->if_flags |= IFF_DYING; /* XXX: Locking */ 596 if (!refcount_release(&ifp->if_refcount)) 597 return; 598 if_free_internal(ifp); 599 } 600 601 /* 602 * This is the normal version of if_free(), used by device drivers to free a 603 * detached network interface. The contents of if_free_type() will move into 604 * here when if_free_type() goes away. 605 */ 606 void 607 if_free(struct ifnet *ifp) 608 { 609 610 if_free_type(ifp, ifp->if_alloctype); 611 } 612 613 /* 614 * Interfaces to keep an ifnet type-stable despite the possibility of the 615 * driver calling if_free(). If there are additional references, we defer 616 * freeing the underlying data structure. 617 */ 618 void 619 if_ref(struct ifnet *ifp) 620 { 621 622 /* We don't assert the ifnet list lock here, but arguably should. */ 623 refcount_acquire(&ifp->if_refcount); 624 } 625 626 void 627 if_rele(struct ifnet *ifp) 628 { 629 630 if (!refcount_release(&ifp->if_refcount)) 631 return; 632 if_free_internal(ifp); 633 } 634 635 void 636 ifq_attach(struct ifaltq *ifq, struct ifnet *ifp) 637 { 638 639 mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF); 640 641 if (ifq->ifq_maxlen == 0) 642 ifq->ifq_maxlen = ifqmaxlen; 643 644 ifq->altq_type = 0; 645 ifq->altq_disc = NULL; 646 ifq->altq_flags &= ALTQF_CANTCHANGE; 647 ifq->altq_tbr = NULL; 648 ifq->altq_ifp = ifp; 649 } 650 651 void 652 ifq_detach(struct ifaltq *ifq) 653 { 654 mtx_destroy(&ifq->ifq_mtx); 655 } 656 657 /* 658 * Perform generic interface initalization tasks and attach the interface 659 * to the list of "active" interfaces. If vmove flag is set on entry 660 * to if_attach_internal(), perform only a limited subset of initialization 661 * tasks, given that we are moving from one vnet to another an ifnet which 662 * has already been fully initialized. 663 * 664 * XXX: 665 * - The decision to return void and thus require this function to 666 * succeed is questionable. 667 * - We should probably do more sanity checking. For instance we don't 668 * do anything to insure if_xname is unique or non-empty. 669 */ 670 void 671 if_attach(struct ifnet *ifp) 672 { 673 674 if_attach_internal(ifp, 0); 675 } 676 677 static void 678 if_attach_internal(struct ifnet *ifp, int vmove) 679 { 680 INIT_VNET_NET(curvnet); 681 unsigned socksize, ifasize; 682 int namelen, masklen; 683 struct sockaddr_dl *sdl; 684 struct ifaddr *ifa; 685 686 if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) 687 panic ("%s: BUG: if_attach called without if_alloc'd input()\n", 688 ifp->if_xname); 689 690 #ifdef VIMAGE 691 ifp->if_vnet = curvnet; 692 #endif 693 694 if_addgroup(ifp, IFG_ALL); 695 696 getmicrotime(&ifp->if_lastchange); 697 ifp->if_data.ifi_epoch = time_uptime; 698 ifp->if_data.ifi_datalen = sizeof(struct if_data); 699 700 KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) || 701 (ifp->if_transmit != NULL && ifp->if_qflush != NULL), 702 ("transmit and qflush must both either be set or both be NULL")); 703 if (ifp->if_transmit == NULL) { 704 ifp->if_transmit = if_transmit; 705 ifp->if_qflush = if_qflush; 706 } 707 708 if (!vmove) { 709 #ifdef MAC 710 mac_ifnet_create(ifp); 711 #endif 712 713 if (IS_DEFAULT_VNET(curvnet)) { 714 ifdev_setbyindex(ifp->if_index, make_dev(&net_cdevsw, 715 ifp->if_index, UID_ROOT, GID_WHEEL, 0600, "%s/%s", 716 net_cdevsw.d_name, ifp->if_xname)); 717 make_dev_alias(ifdev_byindex(ifp->if_index), "%s%d", 718 net_cdevsw.d_name, ifp->if_index); 719 } 720 721 ifq_attach(&ifp->if_snd, ifp); 722 723 /* 724 * Create a Link Level name for this device. 725 */ 726 namelen = strlen(ifp->if_xname); 727 /* 728 * Always save enough space for any possiable name so we 729 * can do a rename in place later. 730 */ 731 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; 732 socksize = masklen + ifp->if_addrlen; 733 if (socksize < sizeof(*sdl)) 734 socksize = sizeof(*sdl); 735 socksize = roundup2(socksize, sizeof(long)); 736 ifasize = sizeof(*ifa) + 2 * socksize; 737 ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO); 738 IFA_LOCK_INIT(ifa); 739 sdl = (struct sockaddr_dl *)(ifa + 1); 740 sdl->sdl_len = socksize; 741 sdl->sdl_family = AF_LINK; 742 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 743 sdl->sdl_nlen = namelen; 744 sdl->sdl_index = ifp->if_index; 745 sdl->sdl_type = ifp->if_type; 746 ifp->if_addr = ifa; 747 ifa->ifa_ifp = ifp; 748 ifa->ifa_rtrequest = link_rtrequest; 749 ifa->ifa_addr = (struct sockaddr *)sdl; 750 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 751 ifa->ifa_netmask = (struct sockaddr *)sdl; 752 sdl->sdl_len = masklen; 753 while (namelen != 0) 754 sdl->sdl_data[--namelen] = 0xff; 755 ifa->ifa_refcnt = 1; 756 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); 757 /* Reliably crash if used uninitialized. */ 758 ifp->if_broadcastaddr = NULL; 759 } 760 761 IFNET_WLOCK(); 762 TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); 763 #ifdef VIMAGE 764 curvnet->ifcnt++; 765 #endif 766 IFNET_WUNLOCK(); 767 768 if (domain_init_status >= 2) 769 if_attachdomain1(ifp); 770 771 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 772 if (IS_DEFAULT_VNET(curvnet)) 773 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 774 775 /* Announce the interface. */ 776 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 777 778 if (!vmove && ifp->if_watchdog != NULL) { 779 if_printf(ifp, 780 "WARNING: using obsoleted if_watchdog interface\n"); 781 782 /* 783 * Note that we need if_slowtimo(). If this happens after 784 * boot, then call if_slowtimo() directly. 785 */ 786 if (atomic_cmpset_int(&slowtimo_started, 0, 1) && !cold) 787 if_slowtimo(0); 788 } 789 } 790 791 static void 792 if_attachdomain(void *dummy) 793 { 794 INIT_VNET_NET(curvnet); 795 struct ifnet *ifp; 796 int s; 797 798 s = splnet(); 799 TAILQ_FOREACH(ifp, &V_ifnet, if_link) 800 if_attachdomain1(ifp); 801 splx(s); 802 } 803 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, 804 if_attachdomain, NULL); 805 806 static void 807 if_attachdomain1(struct ifnet *ifp) 808 { 809 struct domain *dp; 810 int s; 811 812 s = splnet(); 813 814 /* 815 * Since dp->dom_ifattach calls malloc() with M_WAITOK, we 816 * cannot lock ifp->if_afdata initialization, entirely. 817 */ 818 if (IF_AFDATA_TRYLOCK(ifp) == 0) { 819 splx(s); 820 return; 821 } 822 if (ifp->if_afdata_initialized >= domain_init_status) { 823 IF_AFDATA_UNLOCK(ifp); 824 splx(s); 825 printf("if_attachdomain called more than once on %s\n", 826 ifp->if_xname); 827 return; 828 } 829 ifp->if_afdata_initialized = domain_init_status; 830 IF_AFDATA_UNLOCK(ifp); 831 832 /* address family dependent data region */ 833 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 834 for (dp = domains; dp; dp = dp->dom_next) { 835 if (dp->dom_ifattach) 836 ifp->if_afdata[dp->dom_family] = 837 (*dp->dom_ifattach)(ifp); 838 } 839 840 splx(s); 841 } 842 843 /* 844 * Remove any unicast or broadcast network addresses from an interface. 845 */ 846 void 847 if_purgeaddrs(struct ifnet *ifp) 848 { 849 struct ifaddr *ifa, *next; 850 851 TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) { 852 if (ifa->ifa_addr->sa_family == AF_LINK) 853 continue; 854 #ifdef INET 855 /* XXX: Ugly!! ad hoc just for INET */ 856 if (ifa->ifa_addr->sa_family == AF_INET) { 857 struct ifaliasreq ifr; 858 859 bzero(&ifr, sizeof(ifr)); 860 ifr.ifra_addr = *ifa->ifa_addr; 861 if (ifa->ifa_dstaddr) 862 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 863 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, 864 NULL) == 0) 865 continue; 866 } 867 #endif /* INET */ 868 #ifdef INET6 869 if (ifa->ifa_addr->sa_family == AF_INET6) { 870 in6_purgeaddr(ifa); 871 /* ifp_addrhead is already updated */ 872 continue; 873 } 874 #endif /* INET6 */ 875 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 876 IFAFREE(ifa); 877 } 878 } 879 880 /* 881 * Remove any multicast network addresses from an interface. 882 */ 883 void 884 if_purgemaddrs(struct ifnet *ifp) 885 { 886 struct ifmultiaddr *ifma; 887 struct ifmultiaddr *next; 888 889 IF_ADDR_LOCK(ifp); 890 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) 891 if_delmulti_locked(ifp, ifma, 1); 892 IF_ADDR_UNLOCK(ifp); 893 } 894 895 /* 896 * Detach an interface, removing it from the list of "active" interfaces. 897 * If vmove flag is set on entry to if_detach_internal(), perform only a 898 * limited subset of cleanup tasks, given that we are moving an ifnet from 899 * one vnet to another, where it must be fully operational. 900 * 901 * XXXRW: There are some significant questions about event ordering, and 902 * how to prevent things from starting to use the interface during detach. 903 */ 904 void 905 if_detach(struct ifnet *ifp) 906 { 907 908 if_detach_internal(ifp, 0); 909 } 910 911 static void 912 if_detach_internal(struct ifnet *ifp, int vmove) 913 { 914 INIT_VNET_NET(ifp->if_vnet); 915 struct ifaddr *ifa; 916 struct radix_node_head *rnh; 917 int i, j; 918 struct domain *dp; 919 struct ifnet *iter; 920 int found = 0; 921 922 IFNET_WLOCK(); 923 TAILQ_FOREACH(iter, &V_ifnet, if_link) 924 if (iter == ifp) { 925 TAILQ_REMOVE(&V_ifnet, ifp, if_link); 926 found = 1; 927 break; 928 } 929 #ifdef VIMAGE 930 if (found) 931 curvnet->ifcnt--; 932 #endif 933 IFNET_WUNLOCK(); 934 if (!found) { 935 if (vmove) 936 panic("interface not in it's own ifnet list"); 937 else 938 return; /* XXX this should panic as well? */ 939 } 940 941 /* 942 * Remove/wait for pending events. 943 */ 944 taskqueue_drain(taskqueue_swi, &ifp->if_linktask); 945 946 /* 947 * Remove routes and flush queues. 948 */ 949 if_down(ifp); 950 #ifdef ALTQ 951 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 952 altq_disable(&ifp->if_snd); 953 if (ALTQ_IS_ATTACHED(&ifp->if_snd)) 954 altq_detach(&ifp->if_snd); 955 #endif 956 957 if_purgeaddrs(ifp); 958 959 #ifdef INET 960 in_ifdetach(ifp); 961 #endif 962 963 #ifdef INET6 964 /* 965 * Remove all IPv6 kernel structs related to ifp. This should be done 966 * before removing routing entries below, since IPv6 interface direct 967 * routes are expected to be removed by the IPv6-specific kernel API. 968 * Otherwise, the kernel will detect some inconsistency and bark it. 969 */ 970 in6_ifdetach(ifp); 971 #endif 972 if_purgemaddrs(ifp); 973 974 if (!vmove) { 975 /* 976 * Prevent further calls into the device driver via ifnet. 977 */ 978 if_dead(ifp); 979 980 /* 981 * Remove link ifaddr pointer and maybe decrement if_index. 982 * Clean up all addresses. 983 */ 984 ifp->if_addr = NULL; 985 if (IS_DEFAULT_VNET(curvnet)) 986 destroy_dev(ifdev_byindex(ifp->if_index)); 987 ifdev_setbyindex(ifp->if_index, NULL); 988 989 /* We can now free link ifaddr. */ 990 if (!TAILQ_EMPTY(&ifp->if_addrhead)) { 991 ifa = TAILQ_FIRST(&ifp->if_addrhead); 992 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 993 IFAFREE(ifa); 994 } 995 } 996 997 /* 998 * Delete all remaining routes using this interface 999 * Unfortuneatly the only way to do this is to slog through 1000 * the entire routing table looking for routes which point 1001 * to this interface...oh well... 1002 */ 1003 for (i = 1; i <= AF_MAX; i++) { 1004 for (j = 0; j < rt_numfibs; j++) { 1005 rnh = rt_tables_get_rnh(j, i); 1006 if (rnh == NULL) 1007 continue; 1008 RADIX_NODE_HEAD_LOCK(rnh); 1009 (void) rnh->rnh_walktree(rnh, if_rtdel, ifp); 1010 RADIX_NODE_HEAD_UNLOCK(rnh); 1011 } 1012 } 1013 1014 /* Announce that the interface is gone. */ 1015 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1016 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 1017 if (IS_DEFAULT_VNET(curvnet)) 1018 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 1019 if_delgroups(ifp); 1020 1021 IF_AFDATA_LOCK(ifp); 1022 for (dp = domains; dp; dp = dp->dom_next) { 1023 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1024 (*dp->dom_ifdetach)(ifp, 1025 ifp->if_afdata[dp->dom_family]); 1026 } 1027 ifp->if_afdata_initialized = 0; 1028 IF_AFDATA_UNLOCK(ifp); 1029 } 1030 1031 #ifdef VIMAGE 1032 /* 1033 * if_vmove() performs a limited version of if_detach() in current 1034 * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. 1035 * An attempt is made to shrink if_index in current vnet, find an 1036 * unused if_index in target vnet and calls if_grow() if necessary, 1037 * and finally find an unused if_xname for the target vnet. 1038 */ 1039 void 1040 if_vmove(struct ifnet *ifp, struct vnet *new_vnet) 1041 { 1042 1043 /* 1044 * Detach from current vnet, but preserve LLADDR info, do not 1045 * mark as dead etc. so that the ifnet can be reattached later. 1046 */ 1047 if_detach_internal(ifp, 1); 1048 1049 /* 1050 * Unlink the ifnet from ifindex_table[] in current vnet, 1051 * and shrink the if_index for that vnet if possible. 1052 * do / while construct below is needed to confine the scope 1053 * of INIT_VNET_NET(). 1054 */ 1055 { 1056 INIT_VNET_NET(curvnet); 1057 1058 IFNET_WLOCK(); 1059 ifnet_setbyindex(ifp->if_index, NULL); 1060 while (V_if_index > 0 && \ 1061 ifnet_byindex_locked(V_if_index) == NULL) 1062 V_if_index--; 1063 IFNET_WUNLOCK(); 1064 }; 1065 1066 /* 1067 * Switch to the context of the target vnet. 1068 */ 1069 CURVNET_SET_QUIET(new_vnet); 1070 INIT_VNET_NET(new_vnet); 1071 1072 /* 1073 * Try to find an empty slot below if_index. If we fail, take 1074 * the next slot. 1075 */ 1076 IFNET_WLOCK(); 1077 for (ifp->if_index = 1; ifp->if_index <= V_if_index; ifp->if_index++) { 1078 if (ifnet_byindex_locked(ifp->if_index) == NULL) 1079 break; 1080 } 1081 /* Catch if_index overflow. */ 1082 if (ifp->if_index < 1) 1083 panic("if_index overflow"); 1084 1085 if (ifp->if_index > V_if_index) 1086 V_if_index = ifp->if_index; 1087 if (V_if_index >= V_if_indexlim) 1088 if_grow(); 1089 ifnet_setbyindex(ifp->if_index, ifp); 1090 IFNET_WUNLOCK(); 1091 1092 if_attach_internal(ifp, 1); 1093 1094 CURVNET_RESTORE(); 1095 } 1096 #endif /* VIMAGE */ 1097 1098 /* 1099 * Add a group to an interface 1100 */ 1101 int 1102 if_addgroup(struct ifnet *ifp, const char *groupname) 1103 { 1104 INIT_VNET_NET(ifp->if_vnet); 1105 struct ifg_list *ifgl; 1106 struct ifg_group *ifg = NULL; 1107 struct ifg_member *ifgm; 1108 1109 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1110 groupname[strlen(groupname) - 1] <= '9') 1111 return (EINVAL); 1112 1113 IFNET_WLOCK(); 1114 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1115 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { 1116 IFNET_WUNLOCK(); 1117 return (EEXIST); 1118 } 1119 1120 if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, 1121 M_NOWAIT)) == NULL) { 1122 IFNET_WUNLOCK(); 1123 return (ENOMEM); 1124 } 1125 1126 if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), 1127 M_TEMP, M_NOWAIT)) == NULL) { 1128 free(ifgl, M_TEMP); 1129 IFNET_WUNLOCK(); 1130 return (ENOMEM); 1131 } 1132 1133 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1134 if (!strcmp(ifg->ifg_group, groupname)) 1135 break; 1136 1137 if (ifg == NULL) { 1138 if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), 1139 M_TEMP, M_NOWAIT)) == NULL) { 1140 free(ifgl, M_TEMP); 1141 free(ifgm, M_TEMP); 1142 IFNET_WUNLOCK(); 1143 return (ENOMEM); 1144 } 1145 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1146 ifg->ifg_refcnt = 0; 1147 TAILQ_INIT(&ifg->ifg_members); 1148 EVENTHANDLER_INVOKE(group_attach_event, ifg); 1149 TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); 1150 } 1151 1152 ifg->ifg_refcnt++; 1153 ifgl->ifgl_group = ifg; 1154 ifgm->ifgm_ifp = ifp; 1155 1156 IF_ADDR_LOCK(ifp); 1157 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1158 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1159 IF_ADDR_UNLOCK(ifp); 1160 1161 IFNET_WUNLOCK(); 1162 1163 EVENTHANDLER_INVOKE(group_change_event, groupname); 1164 1165 return (0); 1166 } 1167 1168 /* 1169 * Remove a group from an interface 1170 */ 1171 int 1172 if_delgroup(struct ifnet *ifp, const char *groupname) 1173 { 1174 INIT_VNET_NET(ifp->if_vnet); 1175 struct ifg_list *ifgl; 1176 struct ifg_member *ifgm; 1177 1178 IFNET_WLOCK(); 1179 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1180 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1181 break; 1182 if (ifgl == NULL) { 1183 IFNET_WUNLOCK(); 1184 return (ENOENT); 1185 } 1186 1187 IF_ADDR_LOCK(ifp); 1188 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1189 IF_ADDR_UNLOCK(ifp); 1190 1191 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1192 if (ifgm->ifgm_ifp == ifp) 1193 break; 1194 1195 if (ifgm != NULL) { 1196 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1197 free(ifgm, M_TEMP); 1198 } 1199 1200 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1201 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1202 EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); 1203 free(ifgl->ifgl_group, M_TEMP); 1204 } 1205 IFNET_WUNLOCK(); 1206 1207 free(ifgl, M_TEMP); 1208 1209 EVENTHANDLER_INVOKE(group_change_event, groupname); 1210 1211 return (0); 1212 } 1213 1214 /* 1215 * Remove an interface from all groups 1216 */ 1217 static void 1218 if_delgroups(struct ifnet *ifp) 1219 { 1220 INIT_VNET_NET(ifp->if_vnet); 1221 struct ifg_list *ifgl; 1222 struct ifg_member *ifgm; 1223 char groupname[IFNAMSIZ]; 1224 1225 IFNET_WLOCK(); 1226 while (!TAILQ_EMPTY(&ifp->if_groups)) { 1227 ifgl = TAILQ_FIRST(&ifp->if_groups); 1228 1229 strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); 1230 1231 IF_ADDR_LOCK(ifp); 1232 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1233 IF_ADDR_UNLOCK(ifp); 1234 1235 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1236 if (ifgm->ifgm_ifp == ifp) 1237 break; 1238 1239 if (ifgm != NULL) { 1240 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, 1241 ifgm_next); 1242 free(ifgm, M_TEMP); 1243 } 1244 1245 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1246 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1247 EVENTHANDLER_INVOKE(group_detach_event, 1248 ifgl->ifgl_group); 1249 free(ifgl->ifgl_group, M_TEMP); 1250 } 1251 IFNET_WUNLOCK(); 1252 1253 free(ifgl, M_TEMP); 1254 1255 EVENTHANDLER_INVOKE(group_change_event, groupname); 1256 1257 IFNET_WLOCK(); 1258 } 1259 IFNET_WUNLOCK(); 1260 } 1261 1262 /* 1263 * Stores all groups from an interface in memory pointed 1264 * to by data 1265 */ 1266 static int 1267 if_getgroup(struct ifgroupreq *data, struct ifnet *ifp) 1268 { 1269 int len, error; 1270 struct ifg_list *ifgl; 1271 struct ifg_req ifgrq, *ifgp; 1272 struct ifgroupreq *ifgr = data; 1273 1274 if (ifgr->ifgr_len == 0) { 1275 IF_ADDR_LOCK(ifp); 1276 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1277 ifgr->ifgr_len += sizeof(struct ifg_req); 1278 IF_ADDR_UNLOCK(ifp); 1279 return (0); 1280 } 1281 1282 len = ifgr->ifgr_len; 1283 ifgp = ifgr->ifgr_groups; 1284 /* XXX: wire */ 1285 IF_ADDR_LOCK(ifp); 1286 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1287 if (len < sizeof(ifgrq)) { 1288 IF_ADDR_UNLOCK(ifp); 1289 return (EINVAL); 1290 } 1291 bzero(&ifgrq, sizeof ifgrq); 1292 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1293 sizeof(ifgrq.ifgrq_group)); 1294 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1295 IF_ADDR_UNLOCK(ifp); 1296 return (error); 1297 } 1298 len -= sizeof(ifgrq); 1299 ifgp++; 1300 } 1301 IF_ADDR_UNLOCK(ifp); 1302 1303 return (0); 1304 } 1305 1306 /* 1307 * Stores all members of a group in memory pointed to by data 1308 */ 1309 static int 1310 if_getgroupmembers(struct ifgroupreq *data) 1311 { 1312 INIT_VNET_NET(curvnet); 1313 struct ifgroupreq *ifgr = data; 1314 struct ifg_group *ifg; 1315 struct ifg_member *ifgm; 1316 struct ifg_req ifgrq, *ifgp; 1317 int len, error; 1318 1319 IFNET_RLOCK(); 1320 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1321 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1322 break; 1323 if (ifg == NULL) { 1324 IFNET_RUNLOCK(); 1325 return (ENOENT); 1326 } 1327 1328 if (ifgr->ifgr_len == 0) { 1329 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1330 ifgr->ifgr_len += sizeof(ifgrq); 1331 IFNET_RUNLOCK(); 1332 return (0); 1333 } 1334 1335 len = ifgr->ifgr_len; 1336 ifgp = ifgr->ifgr_groups; 1337 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1338 if (len < sizeof(ifgrq)) { 1339 IFNET_RUNLOCK(); 1340 return (EINVAL); 1341 } 1342 bzero(&ifgrq, sizeof ifgrq); 1343 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1344 sizeof(ifgrq.ifgrq_member)); 1345 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1346 IFNET_RUNLOCK(); 1347 return (error); 1348 } 1349 len -= sizeof(ifgrq); 1350 ifgp++; 1351 } 1352 IFNET_RUNLOCK(); 1353 1354 return (0); 1355 } 1356 1357 /* 1358 * Delete Routes for a Network Interface 1359 * 1360 * Called for each routing entry via the rnh->rnh_walktree() call above 1361 * to delete all route entries referencing a detaching network interface. 1362 * 1363 * Arguments: 1364 * rn pointer to node in the routing table 1365 * arg argument passed to rnh->rnh_walktree() - detaching interface 1366 * 1367 * Returns: 1368 * 0 successful 1369 * errno failed - reason indicated 1370 * 1371 */ 1372 static int 1373 if_rtdel(struct radix_node *rn, void *arg) 1374 { 1375 struct rtentry *rt = (struct rtentry *)rn; 1376 struct ifnet *ifp = arg; 1377 int err; 1378 1379 if (rt->rt_ifp == ifp) { 1380 1381 /* 1382 * Protect (sorta) against walktree recursion problems 1383 * with cloned routes 1384 */ 1385 if ((rt->rt_flags & RTF_UP) == 0) 1386 return (0); 1387 1388 err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1389 rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED, 1390 (struct rtentry **) NULL, rt->rt_fibnum); 1391 if (err) { 1392 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1393 } 1394 } 1395 1396 return (0); 1397 } 1398 1399 /* 1400 * XXX: Because sockaddr_dl has deeper structure than the sockaddr 1401 * structs used to represent other address families, it is necessary 1402 * to perform a different comparison. 1403 */ 1404 1405 #define sa_equal(a1, a2) \ 1406 (bcmp((a1), (a2), ((a1))->sa_len) == 0) 1407 1408 #define sa_dl_equal(a1, a2) \ 1409 ((((struct sockaddr_dl *)(a1))->sdl_len == \ 1410 ((struct sockaddr_dl *)(a2))->sdl_len) && \ 1411 (bcmp(LLADDR((struct sockaddr_dl *)(a1)), \ 1412 LLADDR((struct sockaddr_dl *)(a2)), \ 1413 ((struct sockaddr_dl *)(a1))->sdl_alen) == 0)) 1414 1415 /* 1416 * Locate an interface based on a complete address. 1417 */ 1418 /*ARGSUSED*/ 1419 struct ifaddr * 1420 ifa_ifwithaddr(struct sockaddr *addr) 1421 { 1422 INIT_VNET_NET(curvnet); 1423 struct ifnet *ifp; 1424 struct ifaddr *ifa; 1425 1426 IFNET_RLOCK(); 1427 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1428 IF_ADDR_LOCK(ifp); 1429 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1430 if (ifa->ifa_addr->sa_family != addr->sa_family) 1431 continue; 1432 if (sa_equal(addr, ifa->ifa_addr)) { 1433 IF_ADDR_UNLOCK(ifp); 1434 goto done; 1435 } 1436 /* IP6 doesn't have broadcast */ 1437 if ((ifp->if_flags & IFF_BROADCAST) && 1438 ifa->ifa_broadaddr && 1439 ifa->ifa_broadaddr->sa_len != 0 && 1440 sa_equal(ifa->ifa_broadaddr, addr)) { 1441 IF_ADDR_UNLOCK(ifp); 1442 goto done; 1443 } 1444 } 1445 IF_ADDR_UNLOCK(ifp); 1446 } 1447 ifa = NULL; 1448 done: 1449 IFNET_RUNLOCK(); 1450 return (ifa); 1451 } 1452 1453 /* 1454 * Locate an interface based on the broadcast address. 1455 */ 1456 /* ARGSUSED */ 1457 struct ifaddr * 1458 ifa_ifwithbroadaddr(struct sockaddr *addr) 1459 { 1460 INIT_VNET_NET(curvnet); 1461 struct ifnet *ifp; 1462 struct ifaddr *ifa; 1463 1464 IFNET_RLOCK(); 1465 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1466 IF_ADDR_LOCK(ifp); 1467 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1468 if (ifa->ifa_addr->sa_family != addr->sa_family) 1469 continue; 1470 if ((ifp->if_flags & IFF_BROADCAST) && 1471 ifa->ifa_broadaddr && 1472 ifa->ifa_broadaddr->sa_len != 0 && 1473 sa_equal(ifa->ifa_broadaddr, addr)) { 1474 IF_ADDR_UNLOCK(ifp); 1475 goto done; 1476 } 1477 } 1478 IF_ADDR_UNLOCK(ifp); 1479 } 1480 ifa = NULL; 1481 done: 1482 IFNET_RUNLOCK(); 1483 return (ifa); 1484 } 1485 1486 /* 1487 * Locate the point to point interface with a given destination address. 1488 */ 1489 /*ARGSUSED*/ 1490 struct ifaddr * 1491 ifa_ifwithdstaddr(struct sockaddr *addr) 1492 { 1493 INIT_VNET_NET(curvnet); 1494 struct ifnet *ifp; 1495 struct ifaddr *ifa; 1496 1497 IFNET_RLOCK(); 1498 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1499 if ((ifp->if_flags & IFF_POINTOPOINT) == 0) 1500 continue; 1501 IF_ADDR_LOCK(ifp); 1502 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1503 if (ifa->ifa_addr->sa_family != addr->sa_family) 1504 continue; 1505 if (ifa->ifa_dstaddr != NULL && 1506 sa_equal(addr, ifa->ifa_dstaddr)) { 1507 IF_ADDR_UNLOCK(ifp); 1508 goto done; 1509 } 1510 } 1511 IF_ADDR_UNLOCK(ifp); 1512 } 1513 ifa = NULL; 1514 done: 1515 IFNET_RUNLOCK(); 1516 return (ifa); 1517 } 1518 1519 /* 1520 * Find an interface on a specific network. If many, choice 1521 * is most specific found. 1522 */ 1523 struct ifaddr * 1524 ifa_ifwithnet(struct sockaddr *addr) 1525 { 1526 INIT_VNET_NET(curvnet); 1527 struct ifnet *ifp; 1528 struct ifaddr *ifa; 1529 struct ifaddr *ifa_maybe = (struct ifaddr *) 0; 1530 u_int af = addr->sa_family; 1531 char *addr_data = addr->sa_data, *cplim; 1532 1533 /* 1534 * AF_LINK addresses can be looked up directly by their index number, 1535 * so do that if we can. 1536 */ 1537 if (af == AF_LINK) { 1538 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1539 if (sdl->sdl_index && sdl->sdl_index <= V_if_index) 1540 return (ifaddr_byindex(sdl->sdl_index)); 1541 } 1542 1543 /* 1544 * Scan though each interface, looking for ones that have 1545 * addresses in this address family. 1546 */ 1547 IFNET_RLOCK(); 1548 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1549 IF_ADDR_LOCK(ifp); 1550 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1551 char *cp, *cp2, *cp3; 1552 1553 if (ifa->ifa_addr->sa_family != af) 1554 next: continue; 1555 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1556 /* 1557 * This is a bit broken as it doesn't 1558 * take into account that the remote end may 1559 * be a single node in the network we are 1560 * looking for. 1561 * The trouble is that we don't know the 1562 * netmask for the remote end. 1563 */ 1564 if (ifa->ifa_dstaddr != NULL && 1565 sa_equal(addr, ifa->ifa_dstaddr)) { 1566 IF_ADDR_UNLOCK(ifp); 1567 goto done; 1568 } 1569 } else { 1570 /* 1571 * if we have a special address handler, 1572 * then use it instead of the generic one. 1573 */ 1574 if (ifa->ifa_claim_addr) { 1575 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1576 IF_ADDR_UNLOCK(ifp); 1577 goto done; 1578 } 1579 continue; 1580 } 1581 1582 /* 1583 * Scan all the bits in the ifa's address. 1584 * If a bit dissagrees with what we are 1585 * looking for, mask it with the netmask 1586 * to see if it really matters. 1587 * (A byte at a time) 1588 */ 1589 if (ifa->ifa_netmask == 0) 1590 continue; 1591 cp = addr_data; 1592 cp2 = ifa->ifa_addr->sa_data; 1593 cp3 = ifa->ifa_netmask->sa_data; 1594 cplim = ifa->ifa_netmask->sa_len 1595 + (char *)ifa->ifa_netmask; 1596 while (cp3 < cplim) 1597 if ((*cp++ ^ *cp2++) & *cp3++) 1598 goto next; /* next address! */ 1599 /* 1600 * If the netmask of what we just found 1601 * is more specific than what we had before 1602 * (if we had one) then remember the new one 1603 * before continuing to search 1604 * for an even better one. 1605 */ 1606 if (ifa_maybe == 0 || 1607 rn_refines((caddr_t)ifa->ifa_netmask, 1608 (caddr_t)ifa_maybe->ifa_netmask)) 1609 ifa_maybe = ifa; 1610 } 1611 } 1612 IF_ADDR_UNLOCK(ifp); 1613 } 1614 ifa = ifa_maybe; 1615 done: 1616 IFNET_RUNLOCK(); 1617 return (ifa); 1618 } 1619 1620 /* 1621 * Find an interface address specific to an interface best matching 1622 * a given address. 1623 */ 1624 struct ifaddr * 1625 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1626 { 1627 struct ifaddr *ifa; 1628 char *cp, *cp2, *cp3; 1629 char *cplim; 1630 struct ifaddr *ifa_maybe = 0; 1631 u_int af = addr->sa_family; 1632 1633 if (af >= AF_MAX) 1634 return (0); 1635 IF_ADDR_LOCK(ifp); 1636 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1637 if (ifa->ifa_addr->sa_family != af) 1638 continue; 1639 if (ifa_maybe == 0) 1640 ifa_maybe = ifa; 1641 if (ifa->ifa_netmask == 0) { 1642 if (sa_equal(addr, ifa->ifa_addr) || 1643 (ifa->ifa_dstaddr && 1644 sa_equal(addr, ifa->ifa_dstaddr))) 1645 goto done; 1646 continue; 1647 } 1648 if (ifp->if_flags & IFF_POINTOPOINT) { 1649 if (sa_equal(addr, ifa->ifa_dstaddr)) 1650 goto done; 1651 } else { 1652 cp = addr->sa_data; 1653 cp2 = ifa->ifa_addr->sa_data; 1654 cp3 = ifa->ifa_netmask->sa_data; 1655 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1656 for (; cp3 < cplim; cp3++) 1657 if ((*cp++ ^ *cp2++) & *cp3) 1658 break; 1659 if (cp3 == cplim) 1660 goto done; 1661 } 1662 } 1663 ifa = ifa_maybe; 1664 done: 1665 IF_ADDR_UNLOCK(ifp); 1666 return (ifa); 1667 } 1668 1669 #include <net/route.h> 1670 #include <net/if_llatbl.h> 1671 1672 /* 1673 * Default action when installing a route with a Link Level gateway. 1674 * Lookup an appropriate real ifa to point to. 1675 * This should be moved to /sys/net/link.c eventually. 1676 */ 1677 static void 1678 link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) 1679 { 1680 struct ifaddr *ifa, *oifa; 1681 struct sockaddr *dst; 1682 struct ifnet *ifp; 1683 1684 RT_LOCK_ASSERT(rt); 1685 1686 if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) || 1687 ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) 1688 return; 1689 ifa = ifaof_ifpforaddr(dst, ifp); 1690 if (ifa) { 1691 IFAREF(ifa); /* XXX */ 1692 oifa = rt->rt_ifa; 1693 rt->rt_ifa = ifa; 1694 IFAFREE(oifa); 1695 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1696 ifa->ifa_rtrequest(cmd, rt, info); 1697 } 1698 } 1699 1700 /* 1701 * Mark an interface down and notify protocols of 1702 * the transition. 1703 * NOTE: must be called at splnet or eqivalent. 1704 */ 1705 static void 1706 if_unroute(struct ifnet *ifp, int flag, int fam) 1707 { 1708 struct ifaddr *ifa; 1709 1710 KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); 1711 1712 ifp->if_flags &= ~flag; 1713 getmicrotime(&ifp->if_lastchange); 1714 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 1715 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1716 pfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1717 ifp->if_qflush(ifp); 1718 1719 #ifdef DEV_CARP 1720 if (ifp->if_carp) 1721 carp_carpdev_state(ifp->if_carp); 1722 #endif 1723 rt_ifmsg(ifp); 1724 } 1725 1726 /* 1727 * Mark an interface up and notify protocols of 1728 * the transition. 1729 * NOTE: must be called at splnet or eqivalent. 1730 */ 1731 static void 1732 if_route(struct ifnet *ifp, int flag, int fam) 1733 { 1734 struct ifaddr *ifa; 1735 1736 KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); 1737 1738 ifp->if_flags |= flag; 1739 getmicrotime(&ifp->if_lastchange); 1740 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 1741 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1742 pfctlinput(PRC_IFUP, ifa->ifa_addr); 1743 #ifdef DEV_CARP 1744 if (ifp->if_carp) 1745 carp_carpdev_state(ifp->if_carp); 1746 #endif 1747 rt_ifmsg(ifp); 1748 #ifdef INET6 1749 in6_if_up(ifp); 1750 #endif 1751 } 1752 1753 void (*vlan_link_state_p)(struct ifnet *, int); /* XXX: private from if_vlan */ 1754 void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ 1755 1756 /* 1757 * Handle a change in the interface link state. To avoid LORs 1758 * between driver lock and upper layer locks, as well as possible 1759 * recursions, we post event to taskqueue, and all job 1760 * is done in static do_link_state_change(). 1761 */ 1762 void 1763 if_link_state_change(struct ifnet *ifp, int link_state) 1764 { 1765 /* Return if state hasn't changed. */ 1766 if (ifp->if_link_state == link_state) 1767 return; 1768 1769 ifp->if_link_state = link_state; 1770 1771 taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); 1772 } 1773 1774 static void 1775 do_link_state_change(void *arg, int pending) 1776 { 1777 struct ifnet *ifp = (struct ifnet *)arg; 1778 int link_state = ifp->if_link_state; 1779 int link; 1780 CURVNET_SET(ifp->if_vnet); 1781 1782 /* Notify that the link state has changed. */ 1783 rt_ifmsg(ifp); 1784 if (link_state == LINK_STATE_UP) 1785 link = NOTE_LINKUP; 1786 else if (link_state == LINK_STATE_DOWN) 1787 link = NOTE_LINKDOWN; 1788 else 1789 link = NOTE_LINKINV; 1790 KNOTE_UNLOCKED(&ifp->if_klist, link); 1791 if (ifp->if_vlantrunk != NULL) 1792 (*vlan_link_state_p)(ifp, link); 1793 1794 if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) && 1795 IFP2AC(ifp)->ac_netgraph != NULL) 1796 (*ng_ether_link_state_p)(ifp, link_state); 1797 #ifdef DEV_CARP 1798 if (ifp->if_carp) 1799 carp_carpdev_state(ifp->if_carp); 1800 #endif 1801 if (ifp->if_bridge) { 1802 KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!")); 1803 (*bstp_linkstate_p)(ifp, link_state); 1804 } 1805 if (ifp->if_lagg) { 1806 KASSERT(lagg_linkstate_p != NULL,("if_lagg not loaded!")); 1807 (*lagg_linkstate_p)(ifp, link_state); 1808 } 1809 1810 if (IS_DEFAULT_VNET(curvnet)) 1811 devctl_notify("IFNET", ifp->if_xname, 1812 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", 1813 NULL); 1814 if (pending > 1) 1815 if_printf(ifp, "%d link states coalesced\n", pending); 1816 if (log_link_state_change) 1817 log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname, 1818 (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); 1819 CURVNET_RESTORE(); 1820 } 1821 1822 /* 1823 * Mark an interface down and notify protocols of 1824 * the transition. 1825 * NOTE: must be called at splnet or eqivalent. 1826 */ 1827 void 1828 if_down(struct ifnet *ifp) 1829 { 1830 1831 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1832 } 1833 1834 /* 1835 * Mark an interface up and notify protocols of 1836 * the transition. 1837 * NOTE: must be called at splnet or eqivalent. 1838 */ 1839 void 1840 if_up(struct ifnet *ifp) 1841 { 1842 1843 if_route(ifp, IFF_UP, AF_UNSPEC); 1844 } 1845 1846 /* 1847 * Flush an interface queue. 1848 */ 1849 void 1850 if_qflush(struct ifnet *ifp) 1851 { 1852 struct mbuf *m, *n; 1853 struct ifaltq *ifq; 1854 1855 ifq = &ifp->if_snd; 1856 IFQ_LOCK(ifq); 1857 #ifdef ALTQ 1858 if (ALTQ_IS_ENABLED(ifq)) 1859 ALTQ_PURGE(ifq); 1860 #endif 1861 n = ifq->ifq_head; 1862 while ((m = n) != 0) { 1863 n = m->m_act; 1864 m_freem(m); 1865 } 1866 ifq->ifq_head = 0; 1867 ifq->ifq_tail = 0; 1868 ifq->ifq_len = 0; 1869 IFQ_UNLOCK(ifq); 1870 } 1871 1872 /* 1873 * Handle interface watchdog timer routines. Called 1874 * from softclock, we decrement timers (if set) and 1875 * call the appropriate interface routine on expiration. 1876 * 1877 * XXXRW: Note that because timeouts run with Giant, if_watchdog() is called 1878 * holding Giant. 1879 */ 1880 static void 1881 if_slowtimo(void *arg) 1882 { 1883 VNET_ITERATOR_DECL(vnet_iter); 1884 struct ifnet *ifp; 1885 int s = splimp(); 1886 1887 IFNET_RLOCK(); 1888 VNET_LIST_RLOCK(); 1889 VNET_FOREACH(vnet_iter) { 1890 CURVNET_SET(vnet_iter); 1891 INIT_VNET_NET(vnet_iter); 1892 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1893 if (ifp->if_timer == 0 || --ifp->if_timer) 1894 continue; 1895 if (ifp->if_watchdog) 1896 (*ifp->if_watchdog)(ifp); 1897 } 1898 CURVNET_RESTORE(); 1899 } 1900 VNET_LIST_RUNLOCK(); 1901 IFNET_RUNLOCK(); 1902 splx(s); 1903 timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ); 1904 } 1905 1906 /* 1907 * Map interface name to interface structure pointer, with or without 1908 * returning a reference. 1909 */ 1910 struct ifnet * 1911 ifunit_ref(const char *name) 1912 { 1913 INIT_VNET_NET(curvnet); 1914 struct ifnet *ifp; 1915 1916 IFNET_RLOCK(); 1917 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1918 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && 1919 !(ifp->if_flags & IFF_DYING)) 1920 break; 1921 } 1922 if (ifp != NULL) 1923 if_ref(ifp); 1924 IFNET_RUNLOCK(); 1925 return (ifp); 1926 } 1927 1928 struct ifnet * 1929 ifunit(const char *name) 1930 { 1931 INIT_VNET_NET(curvnet); 1932 struct ifnet *ifp; 1933 1934 IFNET_RLOCK(); 1935 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1936 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) 1937 break; 1938 } 1939 IFNET_RUNLOCK(); 1940 return (ifp); 1941 } 1942 1943 /* 1944 * Hardware specific interface ioctls. 1945 */ 1946 static int 1947 ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) 1948 { 1949 struct ifreq *ifr; 1950 struct ifstat *ifs; 1951 int error = 0; 1952 int new_flags, temp_flags; 1953 size_t namelen, onamelen; 1954 char new_name[IFNAMSIZ]; 1955 struct ifaddr *ifa; 1956 struct sockaddr_dl *sdl; 1957 1958 ifr = (struct ifreq *)data; 1959 switch (cmd) { 1960 case SIOCGIFINDEX: 1961 ifr->ifr_index = ifp->if_index; 1962 break; 1963 1964 case SIOCGIFFLAGS: 1965 temp_flags = ifp->if_flags | ifp->if_drv_flags; 1966 ifr->ifr_flags = temp_flags & 0xffff; 1967 ifr->ifr_flagshigh = temp_flags >> 16; 1968 break; 1969 1970 case SIOCGIFCAP: 1971 ifr->ifr_reqcap = ifp->if_capabilities; 1972 ifr->ifr_curcap = ifp->if_capenable; 1973 break; 1974 1975 #ifdef MAC 1976 case SIOCGIFMAC: 1977 error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); 1978 break; 1979 #endif 1980 1981 case SIOCGIFMETRIC: 1982 ifr->ifr_metric = ifp->if_metric; 1983 break; 1984 1985 case SIOCGIFMTU: 1986 ifr->ifr_mtu = ifp->if_mtu; 1987 break; 1988 1989 case SIOCGIFPHYS: 1990 ifr->ifr_phys = ifp->if_physical; 1991 break; 1992 1993 case SIOCSIFFLAGS: 1994 error = priv_check(td, PRIV_NET_SETIFFLAGS); 1995 if (error) 1996 return (error); 1997 /* 1998 * Currently, no driver owned flags pass the IFF_CANTCHANGE 1999 * check, so we don't need special handling here yet. 2000 */ 2001 new_flags = (ifr->ifr_flags & 0xffff) | 2002 (ifr->ifr_flagshigh << 16); 2003 if (ifp->if_flags & IFF_SMART) { 2004 /* Smart drivers twiddle their own routes */ 2005 } else if (ifp->if_flags & IFF_UP && 2006 (new_flags & IFF_UP) == 0) { 2007 int s = splimp(); 2008 if_down(ifp); 2009 splx(s); 2010 } else if (new_flags & IFF_UP && 2011 (ifp->if_flags & IFF_UP) == 0) { 2012 int s = splimp(); 2013 if_up(ifp); 2014 splx(s); 2015 } 2016 /* See if permanently promiscuous mode bit is about to flip */ 2017 if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { 2018 if (new_flags & IFF_PPROMISC) 2019 ifp->if_flags |= IFF_PROMISC; 2020 else if (ifp->if_pcount == 0) 2021 ifp->if_flags &= ~IFF_PROMISC; 2022 log(LOG_INFO, "%s: permanently promiscuous mode %s\n", 2023 ifp->if_xname, 2024 (new_flags & IFF_PPROMISC) ? "enabled" : "disabled"); 2025 } 2026 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 2027 (new_flags &~ IFF_CANTCHANGE); 2028 if (ifp->if_ioctl) { 2029 (void) (*ifp->if_ioctl)(ifp, cmd, data); 2030 } 2031 getmicrotime(&ifp->if_lastchange); 2032 break; 2033 2034 case SIOCSIFCAP: 2035 error = priv_check(td, PRIV_NET_SETIFCAP); 2036 if (error) 2037 return (error); 2038 if (ifp->if_ioctl == NULL) 2039 return (EOPNOTSUPP); 2040 if (ifr->ifr_reqcap & ~ifp->if_capabilities) 2041 return (EINVAL); 2042 error = (*ifp->if_ioctl)(ifp, cmd, data); 2043 if (error == 0) 2044 getmicrotime(&ifp->if_lastchange); 2045 break; 2046 2047 #ifdef MAC 2048 case SIOCSIFMAC: 2049 error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); 2050 break; 2051 #endif 2052 2053 case SIOCSIFNAME: 2054 error = priv_check(td, PRIV_NET_SETIFNAME); 2055 if (error) 2056 return (error); 2057 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 2058 if (error != 0) 2059 return (error); 2060 if (new_name[0] == '\0') 2061 return (EINVAL); 2062 if (ifunit(new_name) != NULL) 2063 return (EEXIST); 2064 2065 /* Announce the departure of the interface. */ 2066 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 2067 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 2068 2069 log(LOG_INFO, "%s: changing name to '%s'\n", 2070 ifp->if_xname, new_name); 2071 2072 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 2073 ifa = ifp->if_addr; 2074 IFA_LOCK(ifa); 2075 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 2076 namelen = strlen(new_name); 2077 onamelen = sdl->sdl_nlen; 2078 /* 2079 * Move the address if needed. This is safe because we 2080 * allocate space for a name of length IFNAMSIZ when we 2081 * create this in if_attach(). 2082 */ 2083 if (namelen != onamelen) { 2084 bcopy(sdl->sdl_data + onamelen, 2085 sdl->sdl_data + namelen, sdl->sdl_alen); 2086 } 2087 bcopy(new_name, sdl->sdl_data, namelen); 2088 sdl->sdl_nlen = namelen; 2089 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2090 bzero(sdl->sdl_data, onamelen); 2091 while (namelen != 0) 2092 sdl->sdl_data[--namelen] = 0xff; 2093 IFA_UNLOCK(ifa); 2094 2095 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 2096 /* Announce the return of the interface. */ 2097 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2098 break; 2099 2100 case SIOCSIFMETRIC: 2101 error = priv_check(td, PRIV_NET_SETIFMETRIC); 2102 if (error) 2103 return (error); 2104 ifp->if_metric = ifr->ifr_metric; 2105 getmicrotime(&ifp->if_lastchange); 2106 break; 2107 2108 case SIOCSIFPHYS: 2109 error = priv_check(td, PRIV_NET_SETIFPHYS); 2110 if (error) 2111 return (error); 2112 if (ifp->if_ioctl == NULL) 2113 return (EOPNOTSUPP); 2114 error = (*ifp->if_ioctl)(ifp, cmd, data); 2115 if (error == 0) 2116 getmicrotime(&ifp->if_lastchange); 2117 break; 2118 2119 case SIOCSIFMTU: 2120 { 2121 u_long oldmtu = ifp->if_mtu; 2122 2123 error = priv_check(td, PRIV_NET_SETIFMTU); 2124 if (error) 2125 return (error); 2126 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) 2127 return (EINVAL); 2128 if (ifp->if_ioctl == NULL) 2129 return (EOPNOTSUPP); 2130 error = (*ifp->if_ioctl)(ifp, cmd, data); 2131 if (error == 0) { 2132 getmicrotime(&ifp->if_lastchange); 2133 rt_ifmsg(ifp); 2134 } 2135 /* 2136 * If the link MTU changed, do network layer specific procedure. 2137 */ 2138 if (ifp->if_mtu != oldmtu) { 2139 #ifdef INET6 2140 nd6_setmtu(ifp); 2141 #endif 2142 } 2143 break; 2144 } 2145 2146 case SIOCADDMULTI: 2147 case SIOCDELMULTI: 2148 if (cmd == SIOCADDMULTI) 2149 error = priv_check(td, PRIV_NET_ADDMULTI); 2150 else 2151 error = priv_check(td, PRIV_NET_DELMULTI); 2152 if (error) 2153 return (error); 2154 2155 /* Don't allow group membership on non-multicast interfaces. */ 2156 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2157 return (EOPNOTSUPP); 2158 2159 /* Don't let users screw up protocols' entries. */ 2160 if (ifr->ifr_addr.sa_family != AF_LINK) 2161 return (EINVAL); 2162 2163 if (cmd == SIOCADDMULTI) { 2164 struct ifmultiaddr *ifma; 2165 2166 /* 2167 * Userland is only permitted to join groups once 2168 * via the if_addmulti() KPI, because it cannot hold 2169 * struct ifmultiaddr * between calls. It may also 2170 * lose a race while we check if the membership 2171 * already exists. 2172 */ 2173 IF_ADDR_LOCK(ifp); 2174 ifma = if_findmulti(ifp, &ifr->ifr_addr); 2175 IF_ADDR_UNLOCK(ifp); 2176 if (ifma != NULL) 2177 error = EADDRINUSE; 2178 else 2179 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2180 } else { 2181 error = if_delmulti(ifp, &ifr->ifr_addr); 2182 } 2183 if (error == 0) 2184 getmicrotime(&ifp->if_lastchange); 2185 break; 2186 2187 case SIOCSIFPHYADDR: 2188 case SIOCDIFPHYADDR: 2189 #ifdef INET6 2190 case SIOCSIFPHYADDR_IN6: 2191 #endif 2192 case SIOCSLIFPHYADDR: 2193 case SIOCSIFMEDIA: 2194 case SIOCSIFGENERIC: 2195 error = priv_check(td, PRIV_NET_HWIOCTL); 2196 if (error) 2197 return (error); 2198 if (ifp->if_ioctl == NULL) 2199 return (EOPNOTSUPP); 2200 error = (*ifp->if_ioctl)(ifp, cmd, data); 2201 if (error == 0) 2202 getmicrotime(&ifp->if_lastchange); 2203 break; 2204 2205 case SIOCGIFSTATUS: 2206 ifs = (struct ifstat *)data; 2207 ifs->ascii[0] = '\0'; 2208 2209 case SIOCGIFPSRCADDR: 2210 case SIOCGIFPDSTADDR: 2211 case SIOCGLIFPHYADDR: 2212 case SIOCGIFMEDIA: 2213 case SIOCGIFGENERIC: 2214 if (ifp->if_ioctl == NULL) 2215 return (EOPNOTSUPP); 2216 error = (*ifp->if_ioctl)(ifp, cmd, data); 2217 break; 2218 2219 case SIOCSIFLLADDR: 2220 error = priv_check(td, PRIV_NET_SETLLADDR); 2221 if (error) 2222 return (error); 2223 error = if_setlladdr(ifp, 2224 ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); 2225 break; 2226 2227 case SIOCAIFGROUP: 2228 { 2229 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2230 2231 error = priv_check(td, PRIV_NET_ADDIFGROUP); 2232 if (error) 2233 return (error); 2234 if ((error = if_addgroup(ifp, ifgr->ifgr_group))) 2235 return (error); 2236 break; 2237 } 2238 2239 case SIOCGIFGROUP: 2240 if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp))) 2241 return (error); 2242 break; 2243 2244 case SIOCDIFGROUP: 2245 { 2246 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2247 2248 error = priv_check(td, PRIV_NET_DELIFGROUP); 2249 if (error) 2250 return (error); 2251 if ((error = if_delgroup(ifp, ifgr->ifgr_group))) 2252 return (error); 2253 break; 2254 } 2255 2256 default: 2257 error = ENOIOCTL; 2258 break; 2259 } 2260 return (error); 2261 } 2262 2263 /* 2264 * Interface ioctls. 2265 */ 2266 int 2267 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) 2268 { 2269 struct ifnet *ifp; 2270 struct ifreq *ifr; 2271 int error; 2272 int oif_flags; 2273 2274 switch (cmd) { 2275 case SIOCGIFCONF: 2276 case OSIOCGIFCONF: 2277 #ifdef __amd64__ 2278 case SIOCGIFCONF32: 2279 #endif 2280 return (ifconf(cmd, data)); 2281 } 2282 ifr = (struct ifreq *)data; 2283 2284 switch (cmd) { 2285 #ifdef VIMAGE 2286 /* 2287 * XXX vnet creation will be implemented through the new jail 2288 * framework - this is just a temporary hack for testing the 2289 * vnet create / destroy mechanisms. 2290 */ 2291 case SIOCSIFVIMAGE: 2292 error = vi_if_move((struct vi_req *) data, NULL, 2293 TD_TO_VIMAGE(td)); 2294 return (error); 2295 case SIOCSPVIMAGE: 2296 case SIOCGPVIMAGE: 2297 error = vi_td_ioctl(cmd, (struct vi_req *) data, td); 2298 return (error); 2299 #endif 2300 case SIOCIFCREATE: 2301 case SIOCIFCREATE2: 2302 error = priv_check(td, PRIV_NET_IFCREATE); 2303 if (error) 2304 return (error); 2305 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 2306 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 2307 case SIOCIFDESTROY: 2308 error = priv_check(td, PRIV_NET_IFDESTROY); 2309 if (error) 2310 return (error); 2311 return if_clone_destroy(ifr->ifr_name); 2312 2313 case SIOCIFGCLONERS: 2314 return (if_clone_list((struct if_clonereq *)data)); 2315 case SIOCGIFGMEMB: 2316 return (if_getgroupmembers((struct ifgroupreq *)data)); 2317 } 2318 2319 ifp = ifunit_ref(ifr->ifr_name); 2320 if (ifp == NULL) 2321 return (ENXIO); 2322 2323 error = ifhwioctl(cmd, ifp, data, td); 2324 if (error != ENOIOCTL) { 2325 if_rele(ifp); 2326 return (error); 2327 } 2328 2329 oif_flags = ifp->if_flags; 2330 if (so->so_proto == NULL) { 2331 if_rele(ifp); 2332 return (EOPNOTSUPP); 2333 } 2334 #ifndef COMPAT_43 2335 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, 2336 data, 2337 ifp, td)); 2338 if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL) 2339 error = (*ifp->if_ioctl)(ifp, cmd, data); 2340 #else 2341 { 2342 int ocmd = cmd; 2343 2344 switch (cmd) { 2345 2346 case SIOCSIFDSTADDR: 2347 case SIOCSIFADDR: 2348 case SIOCSIFBRDADDR: 2349 case SIOCSIFNETMASK: 2350 #if BYTE_ORDER != BIG_ENDIAN 2351 if (ifr->ifr_addr.sa_family == 0 && 2352 ifr->ifr_addr.sa_len < 16) { 2353 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; 2354 ifr->ifr_addr.sa_len = 16; 2355 } 2356 #else 2357 if (ifr->ifr_addr.sa_len == 0) 2358 ifr->ifr_addr.sa_len = 16; 2359 #endif 2360 break; 2361 2362 case OSIOCGIFADDR: 2363 cmd = SIOCGIFADDR; 2364 break; 2365 2366 case OSIOCGIFDSTADDR: 2367 cmd = SIOCGIFDSTADDR; 2368 break; 2369 2370 case OSIOCGIFBRDADDR: 2371 cmd = SIOCGIFBRDADDR; 2372 break; 2373 2374 case OSIOCGIFNETMASK: 2375 cmd = SIOCGIFNETMASK; 2376 } 2377 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, 2378 cmd, 2379 data, 2380 ifp, td)); 2381 if (error == EOPNOTSUPP && ifp != NULL && 2382 ifp->if_ioctl != NULL) 2383 error = (*ifp->if_ioctl)(ifp, cmd, data); 2384 switch (ocmd) { 2385 2386 case OSIOCGIFADDR: 2387 case OSIOCGIFDSTADDR: 2388 case OSIOCGIFBRDADDR: 2389 case OSIOCGIFNETMASK: 2390 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; 2391 2392 } 2393 } 2394 #endif /* COMPAT_43 */ 2395 2396 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2397 #ifdef INET6 2398 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2399 if (ifp->if_flags & IFF_UP) { 2400 int s = splimp(); 2401 in6_if_up(ifp); 2402 splx(s); 2403 } 2404 #endif 2405 } 2406 if_rele(ifp); 2407 return (error); 2408 } 2409 2410 /* 2411 * The code common to handling reference counted flags, 2412 * e.g., in ifpromisc() and if_allmulti(). 2413 * The "pflag" argument can specify a permanent mode flag to check, 2414 * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. 2415 * 2416 * Only to be used on stack-owned flags, not driver-owned flags. 2417 */ 2418 static int 2419 if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) 2420 { 2421 struct ifreq ifr; 2422 int error; 2423 int oldflags, oldcount; 2424 2425 /* Sanity checks to catch programming errors */ 2426 KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0, 2427 ("%s: setting driver-owned flag %d", __func__, flag)); 2428 2429 if (onswitch) 2430 KASSERT(*refcount >= 0, 2431 ("%s: increment negative refcount %d for flag %d", 2432 __func__, *refcount, flag)); 2433 else 2434 KASSERT(*refcount > 0, 2435 ("%s: decrement non-positive refcount %d for flag %d", 2436 __func__, *refcount, flag)); 2437 2438 /* In case this mode is permanent, just touch refcount */ 2439 if (ifp->if_flags & pflag) { 2440 *refcount += onswitch ? 1 : -1; 2441 return (0); 2442 } 2443 2444 /* Save ifnet parameters for if_ioctl() may fail */ 2445 oldcount = *refcount; 2446 oldflags = ifp->if_flags; 2447 2448 /* 2449 * See if we aren't the only and touching refcount is enough. 2450 * Actually toggle interface flag if we are the first or last. 2451 */ 2452 if (onswitch) { 2453 if ((*refcount)++) 2454 return (0); 2455 ifp->if_flags |= flag; 2456 } else { 2457 if (--(*refcount)) 2458 return (0); 2459 ifp->if_flags &= ~flag; 2460 } 2461 2462 /* Call down the driver since we've changed interface flags */ 2463 if (ifp->if_ioctl == NULL) { 2464 error = EOPNOTSUPP; 2465 goto recover; 2466 } 2467 ifr.ifr_flags = ifp->if_flags & 0xffff; 2468 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2469 error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 2470 if (error) 2471 goto recover; 2472 /* Notify userland that interface flags have changed */ 2473 rt_ifmsg(ifp); 2474 return (0); 2475 2476 recover: 2477 /* Recover after driver error */ 2478 *refcount = oldcount; 2479 ifp->if_flags = oldflags; 2480 return (error); 2481 } 2482 2483 /* 2484 * Set/clear promiscuous mode on interface ifp based on the truth value 2485 * of pswitch. The calls are reference counted so that only the first 2486 * "on" request actually has an effect, as does the final "off" request. 2487 * Results are undefined if the "off" and "on" requests are not matched. 2488 */ 2489 int 2490 ifpromisc(struct ifnet *ifp, int pswitch) 2491 { 2492 int error; 2493 int oldflags = ifp->if_flags; 2494 2495 error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, 2496 &ifp->if_pcount, pswitch); 2497 /* If promiscuous mode status has changed, log a message */ 2498 if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC)) 2499 log(LOG_INFO, "%s: promiscuous mode %s\n", 2500 ifp->if_xname, 2501 (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); 2502 return (error); 2503 } 2504 2505 /* 2506 * Return interface configuration 2507 * of system. List may be used 2508 * in later ioctl's (above) to get 2509 * other information. 2510 */ 2511 /*ARGSUSED*/ 2512 static int 2513 ifconf(u_long cmd, caddr_t data) 2514 { 2515 INIT_VNET_NET(curvnet); 2516 struct ifconf *ifc = (struct ifconf *)data; 2517 #ifdef __amd64__ 2518 struct ifconf32 *ifc32 = (struct ifconf32 *)data; 2519 struct ifconf ifc_swab; 2520 #endif 2521 struct ifnet *ifp; 2522 struct ifaddr *ifa; 2523 struct ifreq ifr; 2524 struct sbuf *sb; 2525 int error, full = 0, valid_len, max_len; 2526 2527 #ifdef __amd64__ 2528 if (cmd == SIOCGIFCONF32) { 2529 ifc_swab.ifc_len = ifc32->ifc_len; 2530 ifc_swab.ifc_buf = (caddr_t)(uintptr_t)ifc32->ifc_buf; 2531 ifc = &ifc_swab; 2532 } 2533 #endif 2534 /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ 2535 max_len = MAXPHYS - 1; 2536 2537 /* Prevent hostile input from being able to crash the system */ 2538 if (ifc->ifc_len <= 0) 2539 return (EINVAL); 2540 2541 again: 2542 if (ifc->ifc_len <= max_len) { 2543 max_len = ifc->ifc_len; 2544 full = 1; 2545 } 2546 sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); 2547 max_len = 0; 2548 valid_len = 0; 2549 2550 IFNET_RLOCK(); /* could sleep XXX */ 2551 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 2552 int addrs; 2553 2554 /* 2555 * Zero the ifr_name buffer to make sure we don't 2556 * disclose the contents of the stack. 2557 */ 2558 memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name)); 2559 2560 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2561 >= sizeof(ifr.ifr_name)) { 2562 sbuf_delete(sb); 2563 IFNET_RUNLOCK(); 2564 return (ENAMETOOLONG); 2565 } 2566 2567 addrs = 0; 2568 IF_ADDR_LOCK(ifp); 2569 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 2570 struct sockaddr *sa = ifa->ifa_addr; 2571 2572 if (prison_if(curthread->td_ucred, sa) != 0) 2573 continue; 2574 addrs++; 2575 #ifdef COMPAT_43 2576 if (cmd == OSIOCGIFCONF) { 2577 struct osockaddr *osa = 2578 (struct osockaddr *)&ifr.ifr_addr; 2579 ifr.ifr_addr = *sa; 2580 osa->sa_family = sa->sa_family; 2581 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2582 max_len += sizeof(ifr); 2583 } else 2584 #endif 2585 if (sa->sa_len <= sizeof(*sa)) { 2586 ifr.ifr_addr = *sa; 2587 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2588 max_len += sizeof(ifr); 2589 } else { 2590 sbuf_bcat(sb, &ifr, 2591 offsetof(struct ifreq, ifr_addr)); 2592 max_len += offsetof(struct ifreq, ifr_addr); 2593 sbuf_bcat(sb, sa, sa->sa_len); 2594 max_len += sa->sa_len; 2595 } 2596 2597 if (!sbuf_overflowed(sb)) 2598 valid_len = sbuf_len(sb); 2599 } 2600 IF_ADDR_UNLOCK(ifp); 2601 if (addrs == 0) { 2602 bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); 2603 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2604 max_len += sizeof(ifr); 2605 2606 if (!sbuf_overflowed(sb)) 2607 valid_len = sbuf_len(sb); 2608 } 2609 } 2610 IFNET_RUNLOCK(); 2611 2612 /* 2613 * If we didn't allocate enough space (uncommon), try again. If 2614 * we have already allocated as much space as we are allowed, 2615 * return what we've got. 2616 */ 2617 if (valid_len != max_len && !full) { 2618 sbuf_delete(sb); 2619 goto again; 2620 } 2621 2622 ifc->ifc_len = valid_len; 2623 #ifdef __amd64__ 2624 if (cmd == SIOCGIFCONF32) 2625 ifc32->ifc_len = valid_len; 2626 #endif 2627 sbuf_finish(sb); 2628 error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); 2629 sbuf_delete(sb); 2630 return (error); 2631 } 2632 2633 /* 2634 * Just like ifpromisc(), but for all-multicast-reception mode. 2635 */ 2636 int 2637 if_allmulti(struct ifnet *ifp, int onswitch) 2638 { 2639 2640 return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); 2641 } 2642 2643 struct ifmultiaddr * 2644 if_findmulti(struct ifnet *ifp, struct sockaddr *sa) 2645 { 2646 struct ifmultiaddr *ifma; 2647 2648 IF_ADDR_LOCK_ASSERT(ifp); 2649 2650 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2651 if (sa->sa_family == AF_LINK) { 2652 if (sa_dl_equal(ifma->ifma_addr, sa)) 2653 break; 2654 } else { 2655 if (sa_equal(ifma->ifma_addr, sa)) 2656 break; 2657 } 2658 } 2659 2660 return ifma; 2661 } 2662 2663 /* 2664 * Allocate a new ifmultiaddr and initialize based on passed arguments. We 2665 * make copies of passed sockaddrs. The ifmultiaddr will not be added to 2666 * the ifnet multicast address list here, so the caller must do that and 2667 * other setup work (such as notifying the device driver). The reference 2668 * count is initialized to 1. 2669 */ 2670 static struct ifmultiaddr * 2671 if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, 2672 int mflags) 2673 { 2674 struct ifmultiaddr *ifma; 2675 struct sockaddr *dupsa; 2676 2677 ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | 2678 M_ZERO); 2679 if (ifma == NULL) 2680 return (NULL); 2681 2682 dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); 2683 if (dupsa == NULL) { 2684 free(ifma, M_IFMADDR); 2685 return (NULL); 2686 } 2687 bcopy(sa, dupsa, sa->sa_len); 2688 ifma->ifma_addr = dupsa; 2689 2690 ifma->ifma_ifp = ifp; 2691 ifma->ifma_refcount = 1; 2692 ifma->ifma_protospec = NULL; 2693 2694 if (llsa == NULL) { 2695 ifma->ifma_lladdr = NULL; 2696 return (ifma); 2697 } 2698 2699 dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); 2700 if (dupsa == NULL) { 2701 free(ifma->ifma_addr, M_IFMADDR); 2702 free(ifma, M_IFMADDR); 2703 return (NULL); 2704 } 2705 bcopy(llsa, dupsa, llsa->sa_len); 2706 ifma->ifma_lladdr = dupsa; 2707 2708 return (ifma); 2709 } 2710 2711 /* 2712 * if_freemulti: free ifmultiaddr structure and possibly attached related 2713 * addresses. The caller is responsible for implementing reference 2714 * counting, notifying the driver, handling routing messages, and releasing 2715 * any dependent link layer state. 2716 */ 2717 static void 2718 if_freemulti(struct ifmultiaddr *ifma) 2719 { 2720 2721 KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", 2722 ifma->ifma_refcount)); 2723 KASSERT(ifma->ifma_protospec == NULL, 2724 ("if_freemulti: protospec not NULL")); 2725 2726 if (ifma->ifma_lladdr != NULL) 2727 free(ifma->ifma_lladdr, M_IFMADDR); 2728 free(ifma->ifma_addr, M_IFMADDR); 2729 free(ifma, M_IFMADDR); 2730 } 2731 2732 /* 2733 * Register an additional multicast address with a network interface. 2734 * 2735 * - If the address is already present, bump the reference count on the 2736 * address and return. 2737 * - If the address is not link-layer, look up a link layer address. 2738 * - Allocate address structures for one or both addresses, and attach to the 2739 * multicast address list on the interface. If automatically adding a link 2740 * layer address, the protocol address will own a reference to the link 2741 * layer address, to be freed when it is freed. 2742 * - Notify the network device driver of an addition to the multicast address 2743 * list. 2744 * 2745 * 'sa' points to caller-owned memory with the desired multicast address. 2746 * 2747 * 'retifma' will be used to return a pointer to the resulting multicast 2748 * address reference, if desired. 2749 */ 2750 int 2751 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2752 struct ifmultiaddr **retifma) 2753 { 2754 struct ifmultiaddr *ifma, *ll_ifma; 2755 struct sockaddr *llsa; 2756 int error; 2757 2758 /* 2759 * If the address is already present, return a new reference to it; 2760 * otherwise, allocate storage and set up a new address. 2761 */ 2762 IF_ADDR_LOCK(ifp); 2763 ifma = if_findmulti(ifp, sa); 2764 if (ifma != NULL) { 2765 ifma->ifma_refcount++; 2766 if (retifma != NULL) 2767 *retifma = ifma; 2768 IF_ADDR_UNLOCK(ifp); 2769 return (0); 2770 } 2771 2772 /* 2773 * The address isn't already present; resolve the protocol address 2774 * into a link layer address, and then look that up, bump its 2775 * refcount or allocate an ifma for that also. If 'llsa' was 2776 * returned, we will need to free it later. 2777 */ 2778 llsa = NULL; 2779 ll_ifma = NULL; 2780 if (ifp->if_resolvemulti != NULL) { 2781 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2782 if (error) 2783 goto unlock_out; 2784 } 2785 2786 /* 2787 * Allocate the new address. Don't hook it up yet, as we may also 2788 * need to allocate a link layer multicast address. 2789 */ 2790 ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); 2791 if (ifma == NULL) { 2792 error = ENOMEM; 2793 goto free_llsa_out; 2794 } 2795 2796 /* 2797 * If a link layer address is found, we'll need to see if it's 2798 * already present in the address list, or allocate is as well. 2799 * When this block finishes, the link layer address will be on the 2800 * list. 2801 */ 2802 if (llsa != NULL) { 2803 ll_ifma = if_findmulti(ifp, llsa); 2804 if (ll_ifma == NULL) { 2805 ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); 2806 if (ll_ifma == NULL) { 2807 --ifma->ifma_refcount; 2808 if_freemulti(ifma); 2809 error = ENOMEM; 2810 goto free_llsa_out; 2811 } 2812 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, 2813 ifma_link); 2814 } else 2815 ll_ifma->ifma_refcount++; 2816 ifma->ifma_llifma = ll_ifma; 2817 } 2818 2819 /* 2820 * We now have a new multicast address, ifma, and possibly a new or 2821 * referenced link layer address. Add the primary address to the 2822 * ifnet address list. 2823 */ 2824 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2825 2826 if (retifma != NULL) 2827 *retifma = ifma; 2828 2829 /* 2830 * Must generate the message while holding the lock so that 'ifma' 2831 * pointer is still valid. 2832 */ 2833 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2834 IF_ADDR_UNLOCK(ifp); 2835 2836 /* 2837 * We are certain we have added something, so call down to the 2838 * interface to let them know about it. 2839 */ 2840 if (ifp->if_ioctl != NULL) { 2841 (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0); 2842 } 2843 2844 if (llsa != NULL) 2845 free(llsa, M_IFMADDR); 2846 2847 return (0); 2848 2849 free_llsa_out: 2850 if (llsa != NULL) 2851 free(llsa, M_IFMADDR); 2852 2853 unlock_out: 2854 IF_ADDR_UNLOCK(ifp); 2855 return (error); 2856 } 2857 2858 /* 2859 * Delete a multicast group membership by network-layer group address. 2860 * 2861 * Returns ENOENT if the entry could not be found. If ifp no longer 2862 * exists, results are undefined. This entry point should only be used 2863 * from subsystems which do appropriate locking to hold ifp for the 2864 * duration of the call. 2865 * Network-layer protocol domains must use if_delmulti_ifma(). 2866 */ 2867 int 2868 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2869 { 2870 struct ifmultiaddr *ifma; 2871 int lastref; 2872 #ifdef INVARIANTS 2873 struct ifnet *oifp; 2874 INIT_VNET_NET(ifp->if_vnet); 2875 2876 IFNET_RLOCK(); 2877 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 2878 if (ifp == oifp) 2879 break; 2880 if (ifp != oifp) 2881 ifp = NULL; 2882 IFNET_RUNLOCK(); 2883 2884 KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); 2885 #endif 2886 if (ifp == NULL) 2887 return (ENOENT); 2888 2889 IF_ADDR_LOCK(ifp); 2890 lastref = 0; 2891 ifma = if_findmulti(ifp, sa); 2892 if (ifma != NULL) 2893 lastref = if_delmulti_locked(ifp, ifma, 0); 2894 IF_ADDR_UNLOCK(ifp); 2895 2896 if (ifma == NULL) 2897 return (ENOENT); 2898 2899 if (lastref && ifp->if_ioctl != NULL) { 2900 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 2901 } 2902 2903 return (0); 2904 } 2905 2906 /* 2907 * Delete a multicast group membership by group membership pointer. 2908 * Network-layer protocol domains must use this routine. 2909 * 2910 * It is safe to call this routine if the ifp disappeared. 2911 */ 2912 void 2913 if_delmulti_ifma(struct ifmultiaddr *ifma) 2914 { 2915 #ifdef DIAGNOSTIC 2916 INIT_VNET_NET(curvnet); 2917 #endif 2918 struct ifnet *ifp; 2919 int lastref; 2920 2921 ifp = ifma->ifma_ifp; 2922 #ifdef DIAGNOSTIC 2923 if (ifp == NULL) { 2924 printf("%s: ifma_ifp seems to be detached\n", __func__); 2925 } else { 2926 struct ifnet *oifp; 2927 2928 IFNET_RLOCK(); 2929 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 2930 if (ifp == oifp) 2931 break; 2932 if (ifp != oifp) { 2933 printf("%s: ifnet %p disappeared\n", __func__, ifp); 2934 ifp = NULL; 2935 } 2936 IFNET_RUNLOCK(); 2937 } 2938 #endif 2939 /* 2940 * If and only if the ifnet instance exists: Acquire the address lock. 2941 */ 2942 if (ifp != NULL) 2943 IF_ADDR_LOCK(ifp); 2944 2945 lastref = if_delmulti_locked(ifp, ifma, 0); 2946 2947 if (ifp != NULL) { 2948 /* 2949 * If and only if the ifnet instance exists: 2950 * Release the address lock. 2951 * If the group was left: update the hardware hash filter. 2952 */ 2953 IF_ADDR_UNLOCK(ifp); 2954 if (lastref && ifp->if_ioctl != NULL) { 2955 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 2956 } 2957 } 2958 } 2959 2960 /* 2961 * Perform deletion of network-layer and/or link-layer multicast address. 2962 * 2963 * Return 0 if the reference count was decremented. 2964 * Return 1 if the final reference was released, indicating that the 2965 * hardware hash filter should be reprogrammed. 2966 */ 2967 static int 2968 if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) 2969 { 2970 struct ifmultiaddr *ll_ifma; 2971 2972 if (ifp != NULL && ifma->ifma_ifp != NULL) { 2973 KASSERT(ifma->ifma_ifp == ifp, 2974 ("%s: inconsistent ifp %p", __func__, ifp)); 2975 IF_ADDR_LOCK_ASSERT(ifp); 2976 } 2977 2978 ifp = ifma->ifma_ifp; 2979 2980 /* 2981 * If the ifnet is detaching, null out references to ifnet, 2982 * so that upper protocol layers will notice, and not attempt 2983 * to obtain locks for an ifnet which no longer exists. The 2984 * routing socket announcement must happen before the ifnet 2985 * instance is detached from the system. 2986 */ 2987 if (detaching) { 2988 #ifdef DIAGNOSTIC 2989 printf("%s: detaching ifnet instance %p\n", __func__, ifp); 2990 #endif 2991 /* 2992 * ifp may already be nulled out if we are being reentered 2993 * to delete the ll_ifma. 2994 */ 2995 if (ifp != NULL) { 2996 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2997 ifma->ifma_ifp = NULL; 2998 } 2999 } 3000 3001 if (--ifma->ifma_refcount > 0) 3002 return 0; 3003 3004 /* 3005 * If this ifma is a network-layer ifma, a link-layer ifma may 3006 * have been associated with it. Release it first if so. 3007 */ 3008 ll_ifma = ifma->ifma_llifma; 3009 if (ll_ifma != NULL) { 3010 KASSERT(ifma->ifma_lladdr != NULL, 3011 ("%s: llifma w/o lladdr", __func__)); 3012 if (detaching) 3013 ll_ifma->ifma_ifp = NULL; /* XXX */ 3014 if (--ll_ifma->ifma_refcount == 0) { 3015 if (ifp != NULL) { 3016 TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, 3017 ifma_link); 3018 } 3019 if_freemulti(ll_ifma); 3020 } 3021 } 3022 3023 if (ifp != NULL) 3024 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 3025 3026 if_freemulti(ifma); 3027 3028 /* 3029 * The last reference to this instance of struct ifmultiaddr 3030 * was released; the hardware should be notified of this change. 3031 */ 3032 return 1; 3033 } 3034 3035 /* 3036 * Set the link layer address on an interface. 3037 * 3038 * At this time we only support certain types of interfaces, 3039 * and we don't allow the length of the address to change. 3040 */ 3041 int 3042 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 3043 { 3044 struct sockaddr_dl *sdl; 3045 struct ifaddr *ifa; 3046 struct ifreq ifr; 3047 3048 ifa = ifp->if_addr; 3049 if (ifa == NULL) 3050 return (EINVAL); 3051 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 3052 if (sdl == NULL) 3053 return (EINVAL); 3054 if (len != sdl->sdl_alen) /* don't allow length to change */ 3055 return (EINVAL); 3056 switch (ifp->if_type) { 3057 case IFT_ETHER: 3058 case IFT_FDDI: 3059 case IFT_XETHER: 3060 case IFT_ISO88025: 3061 case IFT_L2VLAN: 3062 case IFT_BRIDGE: 3063 case IFT_ARCNET: 3064 case IFT_IEEE8023ADLAG: 3065 case IFT_IEEE80211: 3066 bcopy(lladdr, LLADDR(sdl), len); 3067 break; 3068 default: 3069 return (ENODEV); 3070 } 3071 /* 3072 * If the interface is already up, we need 3073 * to re-init it in order to reprogram its 3074 * address filter. 3075 */ 3076 if ((ifp->if_flags & IFF_UP) != 0) { 3077 if (ifp->if_ioctl) { 3078 ifp->if_flags &= ~IFF_UP; 3079 ifr.ifr_flags = ifp->if_flags & 0xffff; 3080 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3081 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3082 ifp->if_flags |= IFF_UP; 3083 ifr.ifr_flags = ifp->if_flags & 0xffff; 3084 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3085 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3086 } 3087 #ifdef INET 3088 /* 3089 * Also send gratuitous ARPs to notify other nodes about 3090 * the address change. 3091 */ 3092 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 3093 if (ifa->ifa_addr->sa_family == AF_INET) 3094 arp_ifinit(ifp, ifa); 3095 } 3096 #endif 3097 } 3098 return (0); 3099 } 3100 3101 /* 3102 * The name argument must be a pointer to storage which will last as 3103 * long as the interface does. For physical devices, the result of 3104 * device_get_name(dev) is a good choice and for pseudo-devices a 3105 * static string works well. 3106 */ 3107 void 3108 if_initname(struct ifnet *ifp, const char *name, int unit) 3109 { 3110 ifp->if_dname = name; 3111 ifp->if_dunit = unit; 3112 if (unit != IF_DUNIT_NONE) 3113 snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 3114 else 3115 strlcpy(ifp->if_xname, name, IFNAMSIZ); 3116 } 3117 3118 int 3119 if_printf(struct ifnet *ifp, const char * fmt, ...) 3120 { 3121 va_list ap; 3122 int retval; 3123 3124 retval = printf("%s: ", ifp->if_xname); 3125 va_start(ap, fmt); 3126 retval += vprintf(fmt, ap); 3127 va_end(ap); 3128 return (retval); 3129 } 3130 3131 void 3132 if_start(struct ifnet *ifp) 3133 { 3134 3135 (*(ifp)->if_start)(ifp); 3136 } 3137 3138 /* 3139 * Backwards compatibility interface for drivers 3140 * that have not implemented it 3141 */ 3142 static int 3143 if_transmit(struct ifnet *ifp, struct mbuf *m) 3144 { 3145 int error; 3146 3147 IFQ_HANDOFF(ifp, m, error); 3148 return (error); 3149 } 3150 3151 int 3152 if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust) 3153 { 3154 int active = 0; 3155 3156 IF_LOCK(ifq); 3157 if (_IF_QFULL(ifq)) { 3158 _IF_DROP(ifq); 3159 IF_UNLOCK(ifq); 3160 m_freem(m); 3161 return (0); 3162 } 3163 if (ifp != NULL) { 3164 ifp->if_obytes += m->m_pkthdr.len + adjust; 3165 if (m->m_flags & (M_BCAST|M_MCAST)) 3166 ifp->if_omcasts++; 3167 active = ifp->if_drv_flags & IFF_DRV_OACTIVE; 3168 } 3169 _IF_ENQUEUE(ifq, m); 3170 IF_UNLOCK(ifq); 3171 if (ifp != NULL && !active) 3172 (*(ifp)->if_start)(ifp); 3173 return (1); 3174 } 3175 3176 void 3177 if_register_com_alloc(u_char type, 3178 if_com_alloc_t *a, if_com_free_t *f) 3179 { 3180 3181 KASSERT(if_com_alloc[type] == NULL, 3182 ("if_register_com_alloc: %d already registered", type)); 3183 KASSERT(if_com_free[type] == NULL, 3184 ("if_register_com_alloc: %d free already registered", type)); 3185 3186 if_com_alloc[type] = a; 3187 if_com_free[type] = f; 3188 } 3189 3190 void 3191 if_deregister_com_alloc(u_char type) 3192 { 3193 3194 KASSERT(if_com_alloc[type] != NULL, 3195 ("if_deregister_com_alloc: %d not registered", type)); 3196 KASSERT(if_com_free[type] != NULL, 3197 ("if_deregister_com_alloc: %d free not registered", type)); 3198 if_com_alloc[type] = NULL; 3199 if_com_free[type] = NULL; 3200 } 3201