1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 40 #include <sys/param.h> 41 #include <sys/jail.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/protosw.h> 49 #include <sys/rwlock.h> 50 #include <sys/signalvar.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/sx.h> 54 #include <sys/sysctl.h> 55 #include <sys/systm.h> 56 57 #include <vm/uma.h> 58 59 #include <net/if.h> 60 #include <net/if_var.h> 61 #include <net/route.h> 62 #include <net/vnet.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/in_var.h> 68 #include <netinet/if_ether.h> 69 #include <netinet/ip.h> 70 #include <netinet/ip_var.h> 71 #include <netinet/ip_mroute.h> 72 73 #ifdef IPSEC 74 #include <netipsec/ipsec.h> 75 #endif /*IPSEC*/ 76 77 #include <security/mac/mac_framework.h> 78 79 VNET_DEFINE(int, ip_defttl) = IPDEFTTL; 80 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, 81 &VNET_NAME(ip_defttl), 0, 82 "Maximum TTL on IP packets"); 83 84 VNET_DEFINE(struct inpcbhead, ripcb); 85 VNET_DEFINE(struct inpcbinfo, ripcbinfo); 86 87 #define V_ripcb VNET(ripcb) 88 #define V_ripcbinfo VNET(ripcbinfo) 89 90 /* 91 * Control and data hooks for ipfw, dummynet, divert and so on. 92 * The data hooks are not used here but it is convenient 93 * to keep them all in one place. 94 */ 95 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL; 96 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL; 97 98 int (*ip_dn_ctl_ptr)(struct sockopt *); 99 int (*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *); 100 void (*ip_divert_ptr)(struct mbuf *, int); 101 int (*ng_ipfw_input_p)(struct mbuf **, int, 102 struct ip_fw_args *, int); 103 104 #ifdef INET 105 /* 106 * Hooks for multicast routing. They all default to NULL, so leave them not 107 * initialized and rely on BSS being set to 0. 108 */ 109 110 /* 111 * The socket used to communicate with the multicast routing daemon. 112 */ 113 VNET_DEFINE(struct socket *, ip_mrouter); 114 115 /* 116 * The various mrouter and rsvp functions. 117 */ 118 int (*ip_mrouter_set)(struct socket *, struct sockopt *); 119 int (*ip_mrouter_get)(struct socket *, struct sockopt *); 120 int (*ip_mrouter_done)(void); 121 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, 122 struct ip_moptions *); 123 int (*mrt_ioctl)(u_long, caddr_t, int); 124 int (*legal_vif_num)(int); 125 u_long (*ip_mcast_src)(int); 126 127 void (*rsvp_input_p)(struct mbuf *m, int off); 128 int (*ip_rsvp_vif)(struct socket *, struct sockopt *); 129 void (*ip_rsvp_force_done)(struct socket *); 130 #endif /* INET */ 131 132 u_long rip_sendspace = 9216; 133 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, 134 &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); 135 136 u_long rip_recvspace = 9216; 137 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, 138 &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams"); 139 140 /* 141 * Hash functions 142 */ 143 144 #define INP_PCBHASH_RAW_SIZE 256 145 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \ 146 (((proto) + (laddr) + (faddr)) % (mask) + 1) 147 148 #ifdef INET 149 static void 150 rip_inshash(struct inpcb *inp) 151 { 152 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; 153 struct inpcbhead *pcbhash; 154 int hash; 155 156 INP_INFO_WLOCK_ASSERT(pcbinfo); 157 INP_WLOCK_ASSERT(inp); 158 159 if (inp->inp_ip_p != 0 && 160 inp->inp_laddr.s_addr != INADDR_ANY && 161 inp->inp_faddr.s_addr != INADDR_ANY) { 162 hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, 163 inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); 164 } else 165 hash = 0; 166 pcbhash = &pcbinfo->ipi_hashbase[hash]; 167 LIST_INSERT_HEAD(pcbhash, inp, inp_hash); 168 } 169 170 static void 171 rip_delhash(struct inpcb *inp) 172 { 173 174 INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); 175 INP_WLOCK_ASSERT(inp); 176 177 LIST_REMOVE(inp, inp_hash); 178 } 179 #endif /* INET */ 180 181 /* 182 * Raw interface to IP protocol. 183 */ 184 185 /* 186 * Initialize raw connection block q. 187 */ 188 static void 189 rip_zone_change(void *tag) 190 { 191 192 uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets); 193 } 194 195 static int 196 rip_inpcb_init(void *mem, int size, int flags) 197 { 198 struct inpcb *inp = mem; 199 200 INP_LOCK_INIT(inp, "inp", "rawinp"); 201 return (0); 202 } 203 204 void 205 rip_init(void) 206 { 207 208 in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE, 209 1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE, 210 IPI_HASHFIELDS_NONE); 211 EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL, 212 EVENTHANDLER_PRI_ANY); 213 } 214 215 #ifdef VIMAGE 216 void 217 rip_destroy(void) 218 { 219 220 in_pcbinfo_destroy(&V_ripcbinfo); 221 } 222 #endif 223 224 #ifdef INET 225 static int 226 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n, 227 struct sockaddr_in *ripsrc) 228 { 229 int policyfail = 0; 230 231 INP_LOCK_ASSERT(last); 232 233 #ifdef IPSEC 234 /* check AH/ESP integrity. */ 235 if (ipsec4_in_reject(n, last)) { 236 policyfail = 1; 237 } 238 #endif /* IPSEC */ 239 #ifdef MAC 240 if (!policyfail && mac_inpcb_check_deliver(last, n) != 0) 241 policyfail = 1; 242 #endif 243 /* Check the minimum TTL for socket. */ 244 if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl) 245 policyfail = 1; 246 if (!policyfail) { 247 struct mbuf *opts = NULL; 248 struct socket *so; 249 250 so = last->inp_socket; 251 if ((last->inp_flags & INP_CONTROLOPTS) || 252 (so->so_options & (SO_TIMESTAMP | SO_BINTIME))) 253 ip_savecontrol(last, &opts, ip, n); 254 SOCKBUF_LOCK(&so->so_rcv); 255 if (sbappendaddr_locked(&so->so_rcv, 256 (struct sockaddr *)ripsrc, n, opts) == 0) { 257 /* should notify about lost packet */ 258 m_freem(n); 259 if (opts) 260 m_freem(opts); 261 SOCKBUF_UNLOCK(&so->so_rcv); 262 } else 263 sorwakeup_locked(so); 264 } else 265 m_freem(n); 266 return (policyfail); 267 } 268 269 /* 270 * Setup generic address and protocol structures for raw_input routine, then 271 * pass them along with mbuf chain. 272 */ 273 void 274 rip_input(struct mbuf *m, int off) 275 { 276 struct ifnet *ifp; 277 struct ip *ip = mtod(m, struct ip *); 278 int proto = ip->ip_p; 279 struct inpcb *inp, *last; 280 struct sockaddr_in ripsrc; 281 int hash; 282 283 bzero(&ripsrc, sizeof(ripsrc)); 284 ripsrc.sin_len = sizeof(ripsrc); 285 ripsrc.sin_family = AF_INET; 286 ripsrc.sin_addr = ip->ip_src; 287 last = NULL; 288 289 ifp = m->m_pkthdr.rcvif; 290 /* 291 * Applications on raw sockets expect host byte order. 292 */ 293 ip->ip_len = ntohs(ip->ip_len); 294 ip->ip_off = ntohs(ip->ip_off); 295 296 hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr, 297 ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask); 298 INP_INFO_RLOCK(&V_ripcbinfo); 299 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) { 300 if (inp->inp_ip_p != proto) 301 continue; 302 #ifdef INET6 303 /* XXX inp locking */ 304 if ((inp->inp_vflag & INP_IPV4) == 0) 305 continue; 306 #endif 307 if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr) 308 continue; 309 if (inp->inp_faddr.s_addr != ip->ip_src.s_addr) 310 continue; 311 if (jailed_without_vnet(inp->inp_cred)) { 312 /* 313 * XXX: If faddr was bound to multicast group, 314 * jailed raw socket will drop datagram. 315 */ 316 if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 317 continue; 318 } 319 if (last != NULL) { 320 struct mbuf *n; 321 322 n = m_copy(m, 0, (int)M_COPYALL); 323 if (n != NULL) 324 (void) rip_append(last, ip, n, &ripsrc); 325 /* XXX count dropped packet */ 326 INP_RUNLOCK(last); 327 } 328 INP_RLOCK(inp); 329 last = inp; 330 } 331 LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) { 332 if (inp->inp_ip_p && inp->inp_ip_p != proto) 333 continue; 334 #ifdef INET6 335 /* XXX inp locking */ 336 if ((inp->inp_vflag & INP_IPV4) == 0) 337 continue; 338 #endif 339 if (!in_nullhost(inp->inp_laddr) && 340 !in_hosteq(inp->inp_laddr, ip->ip_dst)) 341 continue; 342 if (!in_nullhost(inp->inp_faddr) && 343 !in_hosteq(inp->inp_faddr, ip->ip_src)) 344 continue; 345 if (jailed_without_vnet(inp->inp_cred)) { 346 /* 347 * Allow raw socket in jail to receive multicast; 348 * assume process had PRIV_NETINET_RAW at attach, 349 * and fall through into normal filter path if so. 350 */ 351 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) && 352 prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0) 353 continue; 354 } 355 /* 356 * If this raw socket has multicast state, and we 357 * have received a multicast, check if this socket 358 * should receive it, as multicast filtering is now 359 * the responsibility of the transport layer. 360 */ 361 if (inp->inp_moptions != NULL && 362 IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 363 /* 364 * If the incoming datagram is for IGMP, allow it 365 * through unconditionally to the raw socket. 366 * 367 * In the case of IGMPv2, we may not have explicitly 368 * joined the group, and may have set IFF_ALLMULTI 369 * on the interface. imo_multi_filter() may discard 370 * control traffic we actually need to see. 371 * 372 * Userland multicast routing daemons should continue 373 * filter the control traffic appropriately. 374 */ 375 int blocked; 376 377 blocked = MCAST_PASS; 378 if (proto != IPPROTO_IGMP) { 379 struct sockaddr_in group; 380 381 bzero(&group, sizeof(struct sockaddr_in)); 382 group.sin_len = sizeof(struct sockaddr_in); 383 group.sin_family = AF_INET; 384 group.sin_addr = ip->ip_dst; 385 386 blocked = imo_multi_filter(inp->inp_moptions, 387 ifp, 388 (struct sockaddr *)&group, 389 (struct sockaddr *)&ripsrc); 390 } 391 392 if (blocked != MCAST_PASS) { 393 IPSTAT_INC(ips_notmember); 394 continue; 395 } 396 } 397 if (last != NULL) { 398 struct mbuf *n; 399 400 n = m_copy(m, 0, (int)M_COPYALL); 401 if (n != NULL) 402 (void) rip_append(last, ip, n, &ripsrc); 403 /* XXX count dropped packet */ 404 INP_RUNLOCK(last); 405 } 406 INP_RLOCK(inp); 407 last = inp; 408 } 409 INP_INFO_RUNLOCK(&V_ripcbinfo); 410 if (last != NULL) { 411 if (rip_append(last, ip, m, &ripsrc) != 0) 412 IPSTAT_INC(ips_delivered); 413 INP_RUNLOCK(last); 414 } else { 415 m_freem(m); 416 IPSTAT_INC(ips_noproto); 417 IPSTAT_DEC(ips_delivered); 418 } 419 } 420 421 /* 422 * Generate IP header and pass packet to ip_output. Tack on options user may 423 * have setup with control call. 424 */ 425 int 426 rip_output(struct mbuf *m, struct socket *so, u_long dst) 427 { 428 struct ip *ip; 429 int error; 430 struct inpcb *inp = sotoinpcb(so); 431 int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) | 432 IP_ALLOWBROADCAST; 433 434 /* 435 * If the user handed us a complete IP packet, use it. Otherwise, 436 * allocate an mbuf for a header and fill it in. 437 */ 438 if ((inp->inp_flags & INP_HDRINCL) == 0) { 439 if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { 440 m_freem(m); 441 return(EMSGSIZE); 442 } 443 M_PREPEND(m, sizeof(struct ip), M_NOWAIT); 444 if (m == NULL) 445 return(ENOBUFS); 446 447 INP_RLOCK(inp); 448 ip = mtod(m, struct ip *); 449 ip->ip_tos = inp->inp_ip_tos; 450 if (inp->inp_flags & INP_DONTFRAG) 451 ip->ip_off = htons(IP_DF); 452 else 453 ip->ip_off = htons(0); 454 ip->ip_p = inp->inp_ip_p; 455 ip->ip_len = htons(m->m_pkthdr.len); 456 ip->ip_src = inp->inp_laddr; 457 if (jailed(inp->inp_cred)) { 458 /* 459 * prison_local_ip4() would be good enough but would 460 * let a source of INADDR_ANY pass, which we do not 461 * want to see from jails. We do not go through the 462 * pain of in_pcbladdr() for raw sockets. 463 */ 464 if (ip->ip_src.s_addr == INADDR_ANY) 465 error = prison_get_ip4(inp->inp_cred, 466 &ip->ip_src); 467 else 468 error = prison_local_ip4(inp->inp_cred, 469 &ip->ip_src); 470 if (error != 0) { 471 INP_RUNLOCK(inp); 472 m_freem(m); 473 return (error); 474 } 475 } 476 ip->ip_dst.s_addr = dst; 477 ip->ip_ttl = inp->inp_ip_ttl; 478 } else { 479 if (m->m_pkthdr.len > IP_MAXPACKET) { 480 m_freem(m); 481 return(EMSGSIZE); 482 } 483 INP_RLOCK(inp); 484 ip = mtod(m, struct ip *); 485 error = prison_check_ip4(inp->inp_cred, &ip->ip_src); 486 if (error != 0) { 487 INP_RUNLOCK(inp); 488 m_freem(m); 489 return (error); 490 } 491 492 /* 493 * Don't allow both user specified and setsockopt options, 494 * and don't allow packet length sizes that will crash. 495 */ 496 if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) 497 || (ip->ip_len > m->m_pkthdr.len) 498 || (ip->ip_len < (ip->ip_hl << 2))) { 499 INP_RUNLOCK(inp); 500 m_freem(m); 501 return (EINVAL); 502 } 503 if (ip->ip_id == 0) 504 ip->ip_id = ip_newid(); 505 506 /* 507 * Applications on raw sockets pass us packets 508 * in host byte order. 509 */ 510 ip->ip_len = htons(ip->ip_len); 511 ip->ip_off = htons(ip->ip_off); 512 513 /* 514 * XXX prevent ip_output from overwriting header fields. 515 */ 516 flags |= IP_RAWOUTPUT; 517 IPSTAT_INC(ips_rawout); 518 } 519 520 if (inp->inp_flags & INP_ONESBCAST) 521 flags |= IP_SENDONES; 522 523 #ifdef MAC 524 mac_inpcb_create_mbuf(inp, m); 525 #endif 526 527 error = ip_output(m, inp->inp_options, NULL, flags, 528 inp->inp_moptions, inp); 529 INP_RUNLOCK(inp); 530 return (error); 531 } 532 533 /* 534 * Raw IP socket option processing. 535 * 536 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could 537 * only be created by a privileged process, and as such, socket option 538 * operations to manage system properties on any raw socket were allowed to 539 * take place without explicit additional access control checks. However, 540 * raw sockets can now also be created in jail(), and therefore explicit 541 * checks are now required. Likewise, raw sockets can be used by a process 542 * after it gives up privilege, so some caution is required. For options 543 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be 544 * performed in ip_ctloutput() and therefore no check occurs here. 545 * Unilaterally checking priv_check() here breaks normal IP socket option 546 * operations on raw sockets. 547 * 548 * When adding new socket options here, make sure to add access control 549 * checks here as necessary. 550 * 551 * XXX-BZ inp locking? 552 */ 553 int 554 rip_ctloutput(struct socket *so, struct sockopt *sopt) 555 { 556 struct inpcb *inp = sotoinpcb(so); 557 int error, optval; 558 559 if (sopt->sopt_level != IPPROTO_IP) { 560 if ((sopt->sopt_level == SOL_SOCKET) && 561 (sopt->sopt_name == SO_SETFIB)) { 562 inp->inp_inc.inc_fibnum = so->so_fibnum; 563 return (0); 564 } 565 return (EINVAL); 566 } 567 568 error = 0; 569 switch (sopt->sopt_dir) { 570 case SOPT_GET: 571 switch (sopt->sopt_name) { 572 case IP_HDRINCL: 573 optval = inp->inp_flags & INP_HDRINCL; 574 error = sooptcopyout(sopt, &optval, sizeof optval); 575 break; 576 577 case IP_FW3: /* generic ipfw v.3 functions */ 578 case IP_FW_ADD: /* ADD actually returns the body... */ 579 case IP_FW_GET: 580 case IP_FW_TABLE_GETSIZE: 581 case IP_FW_TABLE_LIST: 582 case IP_FW_NAT_GET_CONFIG: 583 case IP_FW_NAT_GET_LOG: 584 if (V_ip_fw_ctl_ptr != NULL) 585 error = V_ip_fw_ctl_ptr(sopt); 586 else 587 error = ENOPROTOOPT; 588 break; 589 590 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 591 case IP_DUMMYNET_GET: 592 if (ip_dn_ctl_ptr != NULL) 593 error = ip_dn_ctl_ptr(sopt); 594 else 595 error = ENOPROTOOPT; 596 break ; 597 598 case MRT_INIT: 599 case MRT_DONE: 600 case MRT_ADD_VIF: 601 case MRT_DEL_VIF: 602 case MRT_ADD_MFC: 603 case MRT_DEL_MFC: 604 case MRT_VERSION: 605 case MRT_ASSERT: 606 case MRT_API_SUPPORT: 607 case MRT_API_CONFIG: 608 case MRT_ADD_BW_UPCALL: 609 case MRT_DEL_BW_UPCALL: 610 error = priv_check(curthread, PRIV_NETINET_MROUTE); 611 if (error != 0) 612 return (error); 613 error = ip_mrouter_get ? ip_mrouter_get(so, sopt) : 614 EOPNOTSUPP; 615 break; 616 617 default: 618 error = ip_ctloutput(so, sopt); 619 break; 620 } 621 break; 622 623 case SOPT_SET: 624 switch (sopt->sopt_name) { 625 case IP_HDRINCL: 626 error = sooptcopyin(sopt, &optval, sizeof optval, 627 sizeof optval); 628 if (error) 629 break; 630 if (optval) 631 inp->inp_flags |= INP_HDRINCL; 632 else 633 inp->inp_flags &= ~INP_HDRINCL; 634 break; 635 636 case IP_FW3: /* generic ipfw v.3 functions */ 637 case IP_FW_ADD: 638 case IP_FW_DEL: 639 case IP_FW_FLUSH: 640 case IP_FW_ZERO: 641 case IP_FW_RESETLOG: 642 case IP_FW_TABLE_ADD: 643 case IP_FW_TABLE_DEL: 644 case IP_FW_TABLE_FLUSH: 645 case IP_FW_NAT_CFG: 646 case IP_FW_NAT_DEL: 647 if (V_ip_fw_ctl_ptr != NULL) 648 error = V_ip_fw_ctl_ptr(sopt); 649 else 650 error = ENOPROTOOPT; 651 break; 652 653 case IP_DUMMYNET3: /* generic dummynet v.3 functions */ 654 case IP_DUMMYNET_CONFIGURE: 655 case IP_DUMMYNET_DEL: 656 case IP_DUMMYNET_FLUSH: 657 if (ip_dn_ctl_ptr != NULL) 658 error = ip_dn_ctl_ptr(sopt); 659 else 660 error = ENOPROTOOPT ; 661 break ; 662 663 case IP_RSVP_ON: 664 error = priv_check(curthread, PRIV_NETINET_MROUTE); 665 if (error != 0) 666 return (error); 667 error = ip_rsvp_init(so); 668 break; 669 670 case IP_RSVP_OFF: 671 error = priv_check(curthread, PRIV_NETINET_MROUTE); 672 if (error != 0) 673 return (error); 674 error = ip_rsvp_done(); 675 break; 676 677 case IP_RSVP_VIF_ON: 678 case IP_RSVP_VIF_OFF: 679 error = priv_check(curthread, PRIV_NETINET_MROUTE); 680 if (error != 0) 681 return (error); 682 error = ip_rsvp_vif ? 683 ip_rsvp_vif(so, sopt) : EINVAL; 684 break; 685 686 case MRT_INIT: 687 case MRT_DONE: 688 case MRT_ADD_VIF: 689 case MRT_DEL_VIF: 690 case MRT_ADD_MFC: 691 case MRT_DEL_MFC: 692 case MRT_VERSION: 693 case MRT_ASSERT: 694 case MRT_API_SUPPORT: 695 case MRT_API_CONFIG: 696 case MRT_ADD_BW_UPCALL: 697 case MRT_DEL_BW_UPCALL: 698 error = priv_check(curthread, PRIV_NETINET_MROUTE); 699 if (error != 0) 700 return (error); 701 error = ip_mrouter_set ? ip_mrouter_set(so, sopt) : 702 EOPNOTSUPP; 703 break; 704 705 default: 706 error = ip_ctloutput(so, sopt); 707 break; 708 } 709 break; 710 } 711 712 return (error); 713 } 714 715 /* 716 * This function exists solely to receive the PRC_IFDOWN messages which are 717 * sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, and calls 718 * in_ifadown() to remove all routes corresponding to that address. It also 719 * receives the PRC_IFUP messages from if_up() and reinstalls the interface 720 * routes. 721 */ 722 void 723 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip) 724 { 725 struct in_ifaddr *ia; 726 struct ifnet *ifp; 727 int err; 728 int flags; 729 730 switch (cmd) { 731 case PRC_IFDOWN: 732 IN_IFADDR_RLOCK(); 733 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 734 if (ia->ia_ifa.ifa_addr == sa 735 && (ia->ia_flags & IFA_ROUTE)) { 736 ifa_ref(&ia->ia_ifa); 737 IN_IFADDR_RUNLOCK(); 738 /* 739 * in_ifscrub kills the interface route. 740 */ 741 in_ifscrub(ia->ia_ifp, ia, 0); 742 /* 743 * in_ifadown gets rid of all the rest of the 744 * routes. This is not quite the right thing 745 * to do, but at least if we are running a 746 * routing process they will come back. 747 */ 748 in_ifadown(&ia->ia_ifa, 0); 749 ifa_free(&ia->ia_ifa); 750 break; 751 } 752 } 753 if (ia == NULL) /* If ia matched, already unlocked. */ 754 IN_IFADDR_RUNLOCK(); 755 break; 756 757 case PRC_IFUP: 758 IN_IFADDR_RLOCK(); 759 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 760 if (ia->ia_ifa.ifa_addr == sa) 761 break; 762 } 763 if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) { 764 IN_IFADDR_RUNLOCK(); 765 return; 766 } 767 ifa_ref(&ia->ia_ifa); 768 IN_IFADDR_RUNLOCK(); 769 flags = RTF_UP; 770 ifp = ia->ia_ifa.ifa_ifp; 771 772 if ((ifp->if_flags & IFF_LOOPBACK) 773 || (ifp->if_flags & IFF_POINTOPOINT)) 774 flags |= RTF_HOST; 775 776 err = ifa_del_loopback_route((struct ifaddr *)ia, sa); 777 if (err == 0) 778 ia->ia_flags &= ~IFA_RTSELF; 779 780 err = rtinit(&ia->ia_ifa, RTM_ADD, flags); 781 if (err == 0) 782 ia->ia_flags |= IFA_ROUTE; 783 784 err = ifa_add_loopback_route((struct ifaddr *)ia, sa); 785 if (err == 0) 786 ia->ia_flags |= IFA_RTSELF; 787 788 ifa_free(&ia->ia_ifa); 789 break; 790 } 791 } 792 793 static int 794 rip_attach(struct socket *so, int proto, struct thread *td) 795 { 796 struct inpcb *inp; 797 int error; 798 799 inp = sotoinpcb(so); 800 KASSERT(inp == NULL, ("rip_attach: inp != NULL")); 801 802 error = priv_check(td, PRIV_NETINET_RAW); 803 if (error) 804 return (error); 805 if (proto >= IPPROTO_MAX || proto < 0) 806 return EPROTONOSUPPORT; 807 error = soreserve(so, rip_sendspace, rip_recvspace); 808 if (error) 809 return (error); 810 INP_INFO_WLOCK(&V_ripcbinfo); 811 error = in_pcballoc(so, &V_ripcbinfo); 812 if (error) { 813 INP_INFO_WUNLOCK(&V_ripcbinfo); 814 return (error); 815 } 816 inp = (struct inpcb *)so->so_pcb; 817 inp->inp_vflag |= INP_IPV4; 818 inp->inp_ip_p = proto; 819 inp->inp_ip_ttl = V_ip_defttl; 820 rip_inshash(inp); 821 INP_INFO_WUNLOCK(&V_ripcbinfo); 822 INP_WUNLOCK(inp); 823 return (0); 824 } 825 826 static void 827 rip_detach(struct socket *so) 828 { 829 struct inpcb *inp; 830 831 inp = sotoinpcb(so); 832 KASSERT(inp != NULL, ("rip_detach: inp == NULL")); 833 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY, 834 ("rip_detach: not closed")); 835 836 INP_INFO_WLOCK(&V_ripcbinfo); 837 INP_WLOCK(inp); 838 rip_delhash(inp); 839 if (so == V_ip_mrouter && ip_mrouter_done) 840 ip_mrouter_done(); 841 if (ip_rsvp_force_done) 842 ip_rsvp_force_done(so); 843 if (so == V_ip_rsvpd) 844 ip_rsvp_done(); 845 in_pcbdetach(inp); 846 in_pcbfree(inp); 847 INP_INFO_WUNLOCK(&V_ripcbinfo); 848 } 849 850 static void 851 rip_dodisconnect(struct socket *so, struct inpcb *inp) 852 { 853 struct inpcbinfo *pcbinfo; 854 855 pcbinfo = inp->inp_pcbinfo; 856 INP_INFO_WLOCK(pcbinfo); 857 INP_WLOCK(inp); 858 rip_delhash(inp); 859 inp->inp_faddr.s_addr = INADDR_ANY; 860 rip_inshash(inp); 861 SOCK_LOCK(so); 862 so->so_state &= ~SS_ISCONNECTED; 863 SOCK_UNLOCK(so); 864 INP_WUNLOCK(inp); 865 INP_INFO_WUNLOCK(pcbinfo); 866 } 867 868 static void 869 rip_abort(struct socket *so) 870 { 871 struct inpcb *inp; 872 873 inp = sotoinpcb(so); 874 KASSERT(inp != NULL, ("rip_abort: inp == NULL")); 875 876 rip_dodisconnect(so, inp); 877 } 878 879 static void 880 rip_close(struct socket *so) 881 { 882 struct inpcb *inp; 883 884 inp = sotoinpcb(so); 885 KASSERT(inp != NULL, ("rip_close: inp == NULL")); 886 887 rip_dodisconnect(so, inp); 888 } 889 890 static int 891 rip_disconnect(struct socket *so) 892 { 893 struct inpcb *inp; 894 895 if ((so->so_state & SS_ISCONNECTED) == 0) 896 return (ENOTCONN); 897 898 inp = sotoinpcb(so); 899 KASSERT(inp != NULL, ("rip_disconnect: inp == NULL")); 900 901 rip_dodisconnect(so, inp); 902 return (0); 903 } 904 905 static int 906 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td) 907 { 908 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 909 struct inpcb *inp; 910 int error; 911 912 if (nam->sa_len != sizeof(*addr)) 913 return (EINVAL); 914 915 error = prison_check_ip4(td->td_ucred, &addr->sin_addr); 916 if (error != 0) 917 return (error); 918 919 inp = sotoinpcb(so); 920 KASSERT(inp != NULL, ("rip_bind: inp == NULL")); 921 922 if (TAILQ_EMPTY(&V_ifnet) || 923 (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) || 924 (addr->sin_addr.s_addr && 925 (inp->inp_flags & INP_BINDANY) == 0 && 926 ifa_ifwithaddr_check((struct sockaddr *)addr) == 0)) 927 return (EADDRNOTAVAIL); 928 929 INP_INFO_WLOCK(&V_ripcbinfo); 930 INP_WLOCK(inp); 931 rip_delhash(inp); 932 inp->inp_laddr = addr->sin_addr; 933 rip_inshash(inp); 934 INP_WUNLOCK(inp); 935 INP_INFO_WUNLOCK(&V_ripcbinfo); 936 return (0); 937 } 938 939 static int 940 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 941 { 942 struct sockaddr_in *addr = (struct sockaddr_in *)nam; 943 struct inpcb *inp; 944 945 if (nam->sa_len != sizeof(*addr)) 946 return (EINVAL); 947 if (TAILQ_EMPTY(&V_ifnet)) 948 return (EADDRNOTAVAIL); 949 if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) 950 return (EAFNOSUPPORT); 951 952 inp = sotoinpcb(so); 953 KASSERT(inp != NULL, ("rip_connect: inp == NULL")); 954 955 INP_INFO_WLOCK(&V_ripcbinfo); 956 INP_WLOCK(inp); 957 rip_delhash(inp); 958 inp->inp_faddr = addr->sin_addr; 959 rip_inshash(inp); 960 soisconnected(so); 961 INP_WUNLOCK(inp); 962 INP_INFO_WUNLOCK(&V_ripcbinfo); 963 return (0); 964 } 965 966 static int 967 rip_shutdown(struct socket *so) 968 { 969 struct inpcb *inp; 970 971 inp = sotoinpcb(so); 972 KASSERT(inp != NULL, ("rip_shutdown: inp == NULL")); 973 974 INP_WLOCK(inp); 975 socantsendmore(so); 976 INP_WUNLOCK(inp); 977 return (0); 978 } 979 980 static int 981 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, 982 struct mbuf *control, struct thread *td) 983 { 984 struct inpcb *inp; 985 u_long dst; 986 987 inp = sotoinpcb(so); 988 KASSERT(inp != NULL, ("rip_send: inp == NULL")); 989 990 /* 991 * Note: 'dst' reads below are unlocked. 992 */ 993 if (so->so_state & SS_ISCONNECTED) { 994 if (nam) { 995 m_freem(m); 996 return (EISCONN); 997 } 998 dst = inp->inp_faddr.s_addr; /* Unlocked read. */ 999 } else { 1000 if (nam == NULL) { 1001 m_freem(m); 1002 return (ENOTCONN); 1003 } 1004 dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; 1005 } 1006 return (rip_output(m, so, dst)); 1007 } 1008 #endif /* INET */ 1009 1010 static int 1011 rip_pcblist(SYSCTL_HANDLER_ARGS) 1012 { 1013 int error, i, n; 1014 struct inpcb *inp, **inp_list; 1015 inp_gen_t gencnt; 1016 struct xinpgen xig; 1017 1018 /* 1019 * The process of preparing the TCB list is too time-consuming and 1020 * resource-intensive to repeat twice on every request. 1021 */ 1022 if (req->oldptr == 0) { 1023 n = V_ripcbinfo.ipi_count; 1024 n += imax(n / 8, 10); 1025 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb); 1026 return (0); 1027 } 1028 1029 if (req->newptr != 0) 1030 return (EPERM); 1031 1032 /* 1033 * OK, now we're committed to doing something. 1034 */ 1035 INP_INFO_RLOCK(&V_ripcbinfo); 1036 gencnt = V_ripcbinfo.ipi_gencnt; 1037 n = V_ripcbinfo.ipi_count; 1038 INP_INFO_RUNLOCK(&V_ripcbinfo); 1039 1040 xig.xig_len = sizeof xig; 1041 xig.xig_count = n; 1042 xig.xig_gen = gencnt; 1043 xig.xig_sogen = so_gencnt; 1044 error = SYSCTL_OUT(req, &xig, sizeof xig); 1045 if (error) 1046 return (error); 1047 1048 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 1049 if (inp_list == 0) 1050 return (ENOMEM); 1051 1052 INP_INFO_RLOCK(&V_ripcbinfo); 1053 for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n; 1054 inp = LIST_NEXT(inp, inp_list)) { 1055 INP_WLOCK(inp); 1056 if (inp->inp_gencnt <= gencnt && 1057 cr_canseeinpcb(req->td->td_ucred, inp) == 0) { 1058 in_pcbref(inp); 1059 inp_list[i++] = inp; 1060 } 1061 INP_WUNLOCK(inp); 1062 } 1063 INP_INFO_RUNLOCK(&V_ripcbinfo); 1064 n = i; 1065 1066 error = 0; 1067 for (i = 0; i < n; i++) { 1068 inp = inp_list[i]; 1069 INP_RLOCK(inp); 1070 if (inp->inp_gencnt <= gencnt) { 1071 struct xinpcb xi; 1072 1073 bzero(&xi, sizeof(xi)); 1074 xi.xi_len = sizeof xi; 1075 /* XXX should avoid extra copy */ 1076 bcopy(inp, &xi.xi_inp, sizeof *inp); 1077 if (inp->inp_socket) 1078 sotoxsocket(inp->inp_socket, &xi.xi_socket); 1079 INP_RUNLOCK(inp); 1080 error = SYSCTL_OUT(req, &xi, sizeof xi); 1081 } else 1082 INP_RUNLOCK(inp); 1083 } 1084 INP_INFO_WLOCK(&V_ripcbinfo); 1085 for (i = 0; i < n; i++) { 1086 inp = inp_list[i]; 1087 INP_RLOCK(inp); 1088 if (!in_pcbrele_rlocked(inp)) 1089 INP_RUNLOCK(inp); 1090 } 1091 INP_INFO_WUNLOCK(&V_ripcbinfo); 1092 1093 if (!error) { 1094 /* 1095 * Give the user an updated idea of our state. If the 1096 * generation differs from what we told her before, she knows 1097 * that something happened while we were processing this 1098 * request, and it might be necessary to retry. 1099 */ 1100 INP_INFO_RLOCK(&V_ripcbinfo); 1101 xig.xig_gen = V_ripcbinfo.ipi_gencnt; 1102 xig.xig_sogen = so_gencnt; 1103 xig.xig_count = V_ripcbinfo.ipi_count; 1104 INP_INFO_RUNLOCK(&V_ripcbinfo); 1105 error = SYSCTL_OUT(req, &xig, sizeof xig); 1106 } 1107 free(inp_list, M_TEMP); 1108 return (error); 1109 } 1110 1111 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, 1112 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, 1113 rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); 1114 1115 #ifdef INET 1116 struct pr_usrreqs rip_usrreqs = { 1117 .pru_abort = rip_abort, 1118 .pru_attach = rip_attach, 1119 .pru_bind = rip_bind, 1120 .pru_connect = rip_connect, 1121 .pru_control = in_control, 1122 .pru_detach = rip_detach, 1123 .pru_disconnect = rip_disconnect, 1124 .pru_peeraddr = in_getpeeraddr, 1125 .pru_send = rip_send, 1126 .pru_shutdown = rip_shutdown, 1127 .pru_sockaddr = in_getsockaddr, 1128 .pru_sosetlabel = in_pcbsosetlabel, 1129 .pru_close = rip_close, 1130 }; 1131 #endif /* INET */ 1132