xref: /freebsd/sys/netinet/raw_ip.c (revision 13ea0450a9c8742119d36f3bf8f47accdce46e54)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 
42 #include <sys/param.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/eventhandler.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/protosw.h>
52 #include <sys/rmlock.h>
53 #include <sys/rwlock.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
60 
61 #include <vm/uma.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/route.h>
66 #include <net/vnet.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip_var.h>
75 #include <netinet/ip_mroute.h>
76 #include <netinet/ip_icmp.h>
77 
78 #include <netipsec/ipsec_support.h>
79 
80 #include <machine/stdarg.h>
81 #include <security/mac/mac_framework.h>
82 
83 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
84 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
85     &VNET_NAME(ip_defttl), 0,
86     "Maximum TTL on IP packets");
87 
88 VNET_DEFINE(struct inpcbhead, ripcb);
89 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
90 
91 #define	V_ripcb			VNET(ripcb)
92 #define	V_ripcbinfo		VNET(ripcbinfo)
93 
94 /*
95  * Control and data hooks for ipfw, dummynet, divert and so on.
96  * The data hooks are not used here but it is convenient
97  * to keep them all in one place.
98  */
99 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
100 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
101 
102 int	(*ip_dn_ctl_ptr)(struct sockopt *);
103 int	(*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
104 void	(*ip_divert_ptr)(struct mbuf *, int);
105 int	(*ng_ipfw_input_p)(struct mbuf **, int,
106 			struct ip_fw_args *, int);
107 
108 #ifdef INET
109 /*
110  * Hooks for multicast routing. They all default to NULL, so leave them not
111  * initialized and rely on BSS being set to 0.
112  */
113 
114 /*
115  * The socket used to communicate with the multicast routing daemon.
116  */
117 VNET_DEFINE(struct socket *, ip_mrouter);
118 
119 /*
120  * The various mrouter and rsvp functions.
121  */
122 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
123 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
124 int (*ip_mrouter_done)(void);
125 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
126 		   struct ip_moptions *);
127 int (*mrt_ioctl)(u_long, caddr_t, int);
128 int (*legal_vif_num)(int);
129 u_long (*ip_mcast_src)(int);
130 
131 int (*rsvp_input_p)(struct mbuf **, int *, int);
132 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
133 void (*ip_rsvp_force_done)(struct socket *);
134 #endif /* INET */
135 
136 extern	struct protosw inetsw[];
137 
138 u_long	rip_sendspace = 9216;
139 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
140     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
141 
142 u_long	rip_recvspace = 9216;
143 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
144     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
145 
146 /*
147  * Hash functions
148  */
149 
150 #define INP_PCBHASH_RAW_SIZE	256
151 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
152         (((proto) + (laddr) + (faddr)) % (mask) + 1)
153 
154 #ifdef INET
155 static void
156 rip_inshash(struct inpcb *inp)
157 {
158 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
159 	struct inpcbhead *pcbhash;
160 	int hash;
161 
162 	INP_INFO_WLOCK_ASSERT(pcbinfo);
163 	INP_WLOCK_ASSERT(inp);
164 
165 	if (inp->inp_ip_p != 0 &&
166 	    inp->inp_laddr.s_addr != INADDR_ANY &&
167 	    inp->inp_faddr.s_addr != INADDR_ANY) {
168 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
169 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
170 	} else
171 		hash = 0;
172 	pcbhash = &pcbinfo->ipi_hashbase[hash];
173 	CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
174 }
175 
176 static void
177 rip_delhash(struct inpcb *inp)
178 {
179 
180 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
181 	INP_WLOCK_ASSERT(inp);
182 
183 	CK_LIST_REMOVE(inp, inp_hash);
184 }
185 #endif /* INET */
186 
187 /*
188  * Raw interface to IP protocol.
189  */
190 
191 /*
192  * Initialize raw connection block q.
193  */
194 static void
195 rip_zone_change(void *tag)
196 {
197 
198 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
199 }
200 
201 static int
202 rip_inpcb_init(void *mem, int size, int flags)
203 {
204 	struct inpcb *inp = mem;
205 
206 	INP_LOCK_INIT(inp, "inp", "rawinp");
207 	return (0);
208 }
209 
210 void
211 rip_init(void)
212 {
213 
214 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
215 	    1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE);
216 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
217 	    EVENTHANDLER_PRI_ANY);
218 }
219 
220 #ifdef VIMAGE
221 static void
222 rip_destroy(void *unused __unused)
223 {
224 
225 	in_pcbinfo_destroy(&V_ripcbinfo);
226 }
227 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
228 #endif
229 
230 #ifdef INET
231 static int
232 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
233     struct sockaddr_in *ripsrc)
234 {
235 	int policyfail = 0;
236 
237 	INP_LOCK_ASSERT(last);
238 
239 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
240 	/* check AH/ESP integrity. */
241 	if (IPSEC_ENABLED(ipv4)) {
242 		if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
243 			policyfail = 1;
244 	}
245 #endif /* IPSEC */
246 #ifdef MAC
247 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
248 		policyfail = 1;
249 #endif
250 	/* Check the minimum TTL for socket. */
251 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
252 		policyfail = 1;
253 	if (!policyfail) {
254 		struct mbuf *opts = NULL;
255 		struct socket *so;
256 
257 		so = last->inp_socket;
258 		if ((last->inp_flags & INP_CONTROLOPTS) ||
259 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
260 			ip_savecontrol(last, &opts, ip, n);
261 		SOCKBUF_LOCK(&so->so_rcv);
262 		if (sbappendaddr_locked(&so->so_rcv,
263 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
264 			/* should notify about lost packet */
265 			m_freem(n);
266 			if (opts)
267 				m_freem(opts);
268 			SOCKBUF_UNLOCK(&so->so_rcv);
269 		} else
270 			sorwakeup_locked(so);
271 	} else
272 		m_freem(n);
273 	return (policyfail);
274 }
275 
276 /*
277  * Setup generic address and protocol structures for raw_input routine, then
278  * pass them along with mbuf chain.
279  */
280 int
281 rip_input(struct mbuf **mp, int *offp, int proto)
282 {
283 	struct ifnet *ifp;
284 	struct mbuf *m = *mp;
285 	struct ip *ip = mtod(m, struct ip *);
286 	struct inpcb *inp, *last;
287 	struct sockaddr_in ripsrc;
288 	struct epoch_tracker et;
289 	int hash;
290 
291 	*mp = NULL;
292 
293 	bzero(&ripsrc, sizeof(ripsrc));
294 	ripsrc.sin_len = sizeof(ripsrc);
295 	ripsrc.sin_family = AF_INET;
296 	ripsrc.sin_addr = ip->ip_src;
297 	last = NULL;
298 
299 	ifp = m->m_pkthdr.rcvif;
300 
301 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
302 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
303 	INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
304 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
305 		if (inp->inp_ip_p != proto)
306 			continue;
307 #ifdef INET6
308 		/* XXX inp locking */
309 		if ((inp->inp_vflag & INP_IPV4) == 0)
310 			continue;
311 #endif
312 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
313 			continue;
314 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
315 			continue;
316 		if (last != NULL) {
317 			struct mbuf *n;
318 
319 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
320 			if (n != NULL)
321 			    (void) rip_append(last, ip, n, &ripsrc);
322 			/* XXX count dropped packet */
323 			INP_RUNLOCK(last);
324 			last = NULL;
325 		}
326 		INP_RLOCK(inp);
327 		if (__predict_false(inp->inp_flags2 & INP_FREED))
328 			goto skip_1;
329 		if (jailed_without_vnet(inp->inp_cred)) {
330 			/*
331 			 * XXX: If faddr was bound to multicast group,
332 			 * jailed raw socket will drop datagram.
333 			 */
334 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
335 				goto skip_1;
336 		}
337 		last = inp;
338 		continue;
339 	skip_1:
340 		INP_RUNLOCK(inp);
341 	}
342 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
343 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
344 			continue;
345 #ifdef INET6
346 		/* XXX inp locking */
347 		if ((inp->inp_vflag & INP_IPV4) == 0)
348 			continue;
349 #endif
350 		if (!in_nullhost(inp->inp_laddr) &&
351 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
352 			continue;
353 		if (!in_nullhost(inp->inp_faddr) &&
354 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
355 			continue;
356 		if (last != NULL) {
357 			struct mbuf *n;
358 
359 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
360 			if (n != NULL)
361 				(void) rip_append(last, ip, n, &ripsrc);
362 			/* XXX count dropped packet */
363 			INP_RUNLOCK(last);
364 			last = NULL;
365 		}
366 		INP_RLOCK(inp);
367 		if (__predict_false(inp->inp_flags2 & INP_FREED))
368 			goto skip_2;
369 		if (jailed_without_vnet(inp->inp_cred)) {
370 			/*
371 			 * Allow raw socket in jail to receive multicast;
372 			 * assume process had PRIV_NETINET_RAW at attach,
373 			 * and fall through into normal filter path if so.
374 			 */
375 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
376 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
377 				goto skip_2;
378 		}
379 		/*
380 		 * If this raw socket has multicast state, and we
381 		 * have received a multicast, check if this socket
382 		 * should receive it, as multicast filtering is now
383 		 * the responsibility of the transport layer.
384 		 */
385 		if (inp->inp_moptions != NULL &&
386 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
387 			/*
388 			 * If the incoming datagram is for IGMP, allow it
389 			 * through unconditionally to the raw socket.
390 			 *
391 			 * In the case of IGMPv2, we may not have explicitly
392 			 * joined the group, and may have set IFF_ALLMULTI
393 			 * on the interface. imo_multi_filter() may discard
394 			 * control traffic we actually need to see.
395 			 *
396 			 * Userland multicast routing daemons should continue
397 			 * filter the control traffic appropriately.
398 			 */
399 			int blocked;
400 
401 			blocked = MCAST_PASS;
402 			if (proto != IPPROTO_IGMP) {
403 				struct sockaddr_in group;
404 
405 				bzero(&group, sizeof(struct sockaddr_in));
406 				group.sin_len = sizeof(struct sockaddr_in);
407 				group.sin_family = AF_INET;
408 				group.sin_addr = ip->ip_dst;
409 
410 				blocked = imo_multi_filter(inp->inp_moptions,
411 				    ifp,
412 				    (struct sockaddr *)&group,
413 				    (struct sockaddr *)&ripsrc);
414 			}
415 
416 			if (blocked != MCAST_PASS) {
417 				IPSTAT_INC(ips_notmember);
418 				goto skip_2;
419 			}
420 		}
421 		last = inp;
422 		continue;
423 	skip_2:
424 		INP_RUNLOCK(inp);
425 	}
426 	INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
427 	if (last != NULL) {
428 		if (rip_append(last, ip, m, &ripsrc) != 0)
429 			IPSTAT_INC(ips_delivered);
430 		INP_RUNLOCK(last);
431 	} else {
432 		if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
433 			IPSTAT_INC(ips_noproto);
434 			IPSTAT_DEC(ips_delivered);
435 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
436 		} else {
437 			m_freem(m);
438 		}
439 	}
440 	return (IPPROTO_DONE);
441 }
442 
443 /*
444  * Generate IP header and pass packet to ip_output.  Tack on options user may
445  * have setup with control call.
446  */
447 int
448 rip_output(struct mbuf *m, struct socket *so, ...)
449 {
450 	struct ip *ip;
451 	int error;
452 	struct inpcb *inp = sotoinpcb(so);
453 	va_list ap;
454 	u_long dst;
455 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
456 	    IP_ALLOWBROADCAST;
457 	int cnt;
458 	u_char opttype, optlen, *cp;
459 
460 	va_start(ap, so);
461 	dst = va_arg(ap, u_long);
462 	va_end(ap);
463 
464 	/*
465 	 * If the user handed us a complete IP packet, use it.  Otherwise,
466 	 * allocate an mbuf for a header and fill it in.
467 	 */
468 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
469 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
470 			m_freem(m);
471 			return(EMSGSIZE);
472 		}
473 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
474 		if (m == NULL)
475 			return(ENOBUFS);
476 
477 		INP_RLOCK(inp);
478 		ip = mtod(m, struct ip *);
479 		ip->ip_tos = inp->inp_ip_tos;
480 		if (inp->inp_flags & INP_DONTFRAG)
481 			ip->ip_off = htons(IP_DF);
482 		else
483 			ip->ip_off = htons(0);
484 		ip->ip_p = inp->inp_ip_p;
485 		ip->ip_len = htons(m->m_pkthdr.len);
486 		ip->ip_src = inp->inp_laddr;
487 		ip->ip_dst.s_addr = dst;
488 		if (jailed(inp->inp_cred)) {
489 			/*
490 			 * prison_local_ip4() would be good enough but would
491 			 * let a source of INADDR_ANY pass, which we do not
492 			 * want to see from jails.
493 			 */
494 			if (ip->ip_src.s_addr == INADDR_ANY) {
495 				error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src,
496 				    inp->inp_cred);
497 			} else {
498 				error = prison_local_ip4(inp->inp_cred,
499 				    &ip->ip_src);
500 			}
501 			if (error != 0) {
502 				INP_RUNLOCK(inp);
503 				m_freem(m);
504 				return (error);
505 			}
506 		}
507 		ip->ip_ttl = inp->inp_ip_ttl;
508 	} else {
509 		if (m->m_pkthdr.len > IP_MAXPACKET) {
510 			m_freem(m);
511 			return(EMSGSIZE);
512 		}
513 		INP_RLOCK(inp);
514 		ip = mtod(m, struct ip *);
515 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
516 		if (error != 0) {
517 			INP_RUNLOCK(inp);
518 			m_freem(m);
519 			return (error);
520 		}
521 
522 		/*
523 		 * Don't allow both user specified and setsockopt options,
524 		 * and don't allow packet length sizes that will crash.
525 		 */
526 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
527 		    || (ntohs(ip->ip_len) != m->m_pkthdr.len)
528 		    || (ntohs(ip->ip_len) < (ip->ip_hl << 2))) {
529 			INP_RUNLOCK(inp);
530 			m_freem(m);
531 			return (EINVAL);
532 		}
533 		/*
534 		 * Don't allow IP options which do not have the required
535 		 * structure as specified in section 3.1 of RFC 791 on
536 		 * pages 15-23.
537 		 */
538 		cp = (u_char *)(ip + 1);
539 		cnt = (ip->ip_hl << 2) - sizeof (struct ip);
540 		for (; cnt > 0; cnt -= optlen, cp += optlen) {
541 			opttype = cp[IPOPT_OPTVAL];
542 			if (opttype == IPOPT_EOL)
543 				break;
544 			if (opttype == IPOPT_NOP) {
545 				optlen = 1;
546 				continue;
547 			}
548 			if (cnt < IPOPT_OLEN + sizeof(u_char)) {
549 				INP_RUNLOCK(inp);
550 				m_freem(m);
551 				return (EINVAL);
552 			}
553 			optlen = cp[IPOPT_OLEN];
554 			if (optlen < IPOPT_OLEN + sizeof(u_char) ||
555 			    optlen > cnt) {
556 				INP_RUNLOCK(inp);
557 				m_freem(m);
558 				return (EINVAL);
559 			}
560 		}
561 		/*
562 		 * This doesn't allow application to specify ID of zero,
563 		 * but we got this limitation from the beginning of history.
564 		 */
565 		if (ip->ip_id == 0)
566 			ip_fillid(ip);
567 
568 		/*
569 		 * XXX prevent ip_output from overwriting header fields.
570 		 */
571 		flags |= IP_RAWOUTPUT;
572 		IPSTAT_INC(ips_rawout);
573 	}
574 
575 	if (inp->inp_flags & INP_ONESBCAST)
576 		flags |= IP_SENDONES;
577 
578 #ifdef MAC
579 	mac_inpcb_create_mbuf(inp, m);
580 #endif
581 
582 	error = ip_output(m, inp->inp_options, NULL, flags,
583 	    inp->inp_moptions, inp);
584 	INP_RUNLOCK(inp);
585 	return (error);
586 }
587 
588 /*
589  * Raw IP socket option processing.
590  *
591  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
592  * only be created by a privileged process, and as such, socket option
593  * operations to manage system properties on any raw socket were allowed to
594  * take place without explicit additional access control checks.  However,
595  * raw sockets can now also be created in jail(), and therefore explicit
596  * checks are now required.  Likewise, raw sockets can be used by a process
597  * after it gives up privilege, so some caution is required.  For options
598  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
599  * performed in ip_ctloutput() and therefore no check occurs here.
600  * Unilaterally checking priv_check() here breaks normal IP socket option
601  * operations on raw sockets.
602  *
603  * When adding new socket options here, make sure to add access control
604  * checks here as necessary.
605  *
606  * XXX-BZ inp locking?
607  */
608 int
609 rip_ctloutput(struct socket *so, struct sockopt *sopt)
610 {
611 	struct	inpcb *inp = sotoinpcb(so);
612 	int	error, optval;
613 
614 	if (sopt->sopt_level != IPPROTO_IP) {
615 		if ((sopt->sopt_level == SOL_SOCKET) &&
616 		    (sopt->sopt_name == SO_SETFIB)) {
617 			inp->inp_inc.inc_fibnum = so->so_fibnum;
618 			return (0);
619 		}
620 		return (EINVAL);
621 	}
622 
623 	error = 0;
624 	switch (sopt->sopt_dir) {
625 	case SOPT_GET:
626 		switch (sopt->sopt_name) {
627 		case IP_HDRINCL:
628 			optval = inp->inp_flags & INP_HDRINCL;
629 			error = sooptcopyout(sopt, &optval, sizeof optval);
630 			break;
631 
632 		case IP_FW3:	/* generic ipfw v.3 functions */
633 		case IP_FW_ADD:	/* ADD actually returns the body... */
634 		case IP_FW_GET:
635 		case IP_FW_TABLE_GETSIZE:
636 		case IP_FW_TABLE_LIST:
637 		case IP_FW_NAT_GET_CONFIG:
638 		case IP_FW_NAT_GET_LOG:
639 			if (V_ip_fw_ctl_ptr != NULL)
640 				error = V_ip_fw_ctl_ptr(sopt);
641 			else
642 				error = ENOPROTOOPT;
643 			break;
644 
645 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
646 		case IP_DUMMYNET_GET:
647 			if (ip_dn_ctl_ptr != NULL)
648 				error = ip_dn_ctl_ptr(sopt);
649 			else
650 				error = ENOPROTOOPT;
651 			break ;
652 
653 		case MRT_INIT:
654 		case MRT_DONE:
655 		case MRT_ADD_VIF:
656 		case MRT_DEL_VIF:
657 		case MRT_ADD_MFC:
658 		case MRT_DEL_MFC:
659 		case MRT_VERSION:
660 		case MRT_ASSERT:
661 		case MRT_API_SUPPORT:
662 		case MRT_API_CONFIG:
663 		case MRT_ADD_BW_UPCALL:
664 		case MRT_DEL_BW_UPCALL:
665 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
666 			if (error != 0)
667 				return (error);
668 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
669 				EOPNOTSUPP;
670 			break;
671 
672 		default:
673 			error = ip_ctloutput(so, sopt);
674 			break;
675 		}
676 		break;
677 
678 	case SOPT_SET:
679 		switch (sopt->sopt_name) {
680 		case IP_HDRINCL:
681 			error = sooptcopyin(sopt, &optval, sizeof optval,
682 					    sizeof optval);
683 			if (error)
684 				break;
685 			if (optval)
686 				inp->inp_flags |= INP_HDRINCL;
687 			else
688 				inp->inp_flags &= ~INP_HDRINCL;
689 			break;
690 
691 		case IP_FW3:	/* generic ipfw v.3 functions */
692 		case IP_FW_ADD:
693 		case IP_FW_DEL:
694 		case IP_FW_FLUSH:
695 		case IP_FW_ZERO:
696 		case IP_FW_RESETLOG:
697 		case IP_FW_TABLE_ADD:
698 		case IP_FW_TABLE_DEL:
699 		case IP_FW_TABLE_FLUSH:
700 		case IP_FW_NAT_CFG:
701 		case IP_FW_NAT_DEL:
702 			if (V_ip_fw_ctl_ptr != NULL)
703 				error = V_ip_fw_ctl_ptr(sopt);
704 			else
705 				error = ENOPROTOOPT;
706 			break;
707 
708 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
709 		case IP_DUMMYNET_CONFIGURE:
710 		case IP_DUMMYNET_DEL:
711 		case IP_DUMMYNET_FLUSH:
712 			if (ip_dn_ctl_ptr != NULL)
713 				error = ip_dn_ctl_ptr(sopt);
714 			else
715 				error = ENOPROTOOPT ;
716 			break ;
717 
718 		case IP_RSVP_ON:
719 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
720 			if (error != 0)
721 				return (error);
722 			error = ip_rsvp_init(so);
723 			break;
724 
725 		case IP_RSVP_OFF:
726 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
727 			if (error != 0)
728 				return (error);
729 			error = ip_rsvp_done();
730 			break;
731 
732 		case IP_RSVP_VIF_ON:
733 		case IP_RSVP_VIF_OFF:
734 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
735 			if (error != 0)
736 				return (error);
737 			error = ip_rsvp_vif ?
738 				ip_rsvp_vif(so, sopt) : EINVAL;
739 			break;
740 
741 		case MRT_INIT:
742 		case MRT_DONE:
743 		case MRT_ADD_VIF:
744 		case MRT_DEL_VIF:
745 		case MRT_ADD_MFC:
746 		case MRT_DEL_MFC:
747 		case MRT_VERSION:
748 		case MRT_ASSERT:
749 		case MRT_API_SUPPORT:
750 		case MRT_API_CONFIG:
751 		case MRT_ADD_BW_UPCALL:
752 		case MRT_DEL_BW_UPCALL:
753 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
754 			if (error != 0)
755 				return (error);
756 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
757 					EOPNOTSUPP;
758 			break;
759 
760 		default:
761 			error = ip_ctloutput(so, sopt);
762 			break;
763 		}
764 		break;
765 	}
766 
767 	return (error);
768 }
769 
770 /*
771  * This function exists solely to receive the PRC_IFDOWN messages which are
772  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
773  * in_ifadown() to remove all routes corresponding to that address.  It also
774  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
775  * routes.
776  */
777 void
778 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
779 {
780 	struct rm_priotracker in_ifa_tracker;
781 	struct in_ifaddr *ia;
782 	struct ifnet *ifp;
783 	int err;
784 	int flags;
785 
786 	switch (cmd) {
787 	case PRC_IFDOWN:
788 		IN_IFADDR_RLOCK(&in_ifa_tracker);
789 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
790 			if (ia->ia_ifa.ifa_addr == sa
791 			    && (ia->ia_flags & IFA_ROUTE)) {
792 				ifa_ref(&ia->ia_ifa);
793 				IN_IFADDR_RUNLOCK(&in_ifa_tracker);
794 				/*
795 				 * in_scrubprefix() kills the interface route.
796 				 */
797 				in_scrubprefix(ia, 0);
798 				/*
799 				 * in_ifadown gets rid of all the rest of the
800 				 * routes.  This is not quite the right thing
801 				 * to do, but at least if we are running a
802 				 * routing process they will come back.
803 				 */
804 				in_ifadown(&ia->ia_ifa, 0);
805 				ifa_free(&ia->ia_ifa);
806 				break;
807 			}
808 		}
809 		if (ia == NULL)		/* If ia matched, already unlocked. */
810 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
811 		break;
812 
813 	case PRC_IFUP:
814 		IN_IFADDR_RLOCK(&in_ifa_tracker);
815 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
816 			if (ia->ia_ifa.ifa_addr == sa)
817 				break;
818 		}
819 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
820 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
821 			return;
822 		}
823 		ifa_ref(&ia->ia_ifa);
824 		IN_IFADDR_RUNLOCK(&in_ifa_tracker);
825 		flags = RTF_UP;
826 		ifp = ia->ia_ifa.ifa_ifp;
827 
828 		if ((ifp->if_flags & IFF_LOOPBACK)
829 		    || (ifp->if_flags & IFF_POINTOPOINT))
830 			flags |= RTF_HOST;
831 
832 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
833 
834 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
835 		if (err == 0)
836 			ia->ia_flags |= IFA_ROUTE;
837 
838 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
839 
840 		ifa_free(&ia->ia_ifa);
841 		break;
842 	}
843 }
844 
845 static int
846 rip_attach(struct socket *so, int proto, struct thread *td)
847 {
848 	struct inpcb *inp;
849 	int error;
850 
851 	inp = sotoinpcb(so);
852 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
853 
854 	error = priv_check(td, PRIV_NETINET_RAW);
855 	if (error)
856 		return (error);
857 	if (proto >= IPPROTO_MAX || proto < 0)
858 		return EPROTONOSUPPORT;
859 	error = soreserve(so, rip_sendspace, rip_recvspace);
860 	if (error)
861 		return (error);
862 	INP_INFO_WLOCK(&V_ripcbinfo);
863 	error = in_pcballoc(so, &V_ripcbinfo);
864 	if (error) {
865 		INP_INFO_WUNLOCK(&V_ripcbinfo);
866 		return (error);
867 	}
868 	inp = (struct inpcb *)so->so_pcb;
869 	inp->inp_vflag |= INP_IPV4;
870 	inp->inp_ip_p = proto;
871 	inp->inp_ip_ttl = V_ip_defttl;
872 	rip_inshash(inp);
873 	INP_INFO_WUNLOCK(&V_ripcbinfo);
874 	INP_WUNLOCK(inp);
875 	return (0);
876 }
877 
878 static void
879 rip_detach(struct socket *so)
880 {
881 	struct inpcb *inp;
882 
883 	inp = sotoinpcb(so);
884 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
885 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
886 	    ("rip_detach: not closed"));
887 
888 	INP_INFO_WLOCK(&V_ripcbinfo);
889 	INP_WLOCK(inp);
890 	rip_delhash(inp);
891 	if (so == V_ip_mrouter && ip_mrouter_done)
892 		ip_mrouter_done();
893 	if (ip_rsvp_force_done)
894 		ip_rsvp_force_done(so);
895 	if (so == V_ip_rsvpd)
896 		ip_rsvp_done();
897 	in_pcbdetach(inp);
898 	in_pcbfree(inp);
899 	INP_INFO_WUNLOCK(&V_ripcbinfo);
900 }
901 
902 static void
903 rip_dodisconnect(struct socket *so, struct inpcb *inp)
904 {
905 	struct inpcbinfo *pcbinfo;
906 
907 	pcbinfo = inp->inp_pcbinfo;
908 	INP_INFO_WLOCK(pcbinfo);
909 	INP_WLOCK(inp);
910 	rip_delhash(inp);
911 	inp->inp_faddr.s_addr = INADDR_ANY;
912 	rip_inshash(inp);
913 	SOCK_LOCK(so);
914 	so->so_state &= ~SS_ISCONNECTED;
915 	SOCK_UNLOCK(so);
916 	INP_WUNLOCK(inp);
917 	INP_INFO_WUNLOCK(pcbinfo);
918 }
919 
920 static void
921 rip_abort(struct socket *so)
922 {
923 	struct inpcb *inp;
924 
925 	inp = sotoinpcb(so);
926 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
927 
928 	rip_dodisconnect(so, inp);
929 }
930 
931 static void
932 rip_close(struct socket *so)
933 {
934 	struct inpcb *inp;
935 
936 	inp = sotoinpcb(so);
937 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
938 
939 	rip_dodisconnect(so, inp);
940 }
941 
942 static int
943 rip_disconnect(struct socket *so)
944 {
945 	struct inpcb *inp;
946 
947 	if ((so->so_state & SS_ISCONNECTED) == 0)
948 		return (ENOTCONN);
949 
950 	inp = sotoinpcb(so);
951 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
952 
953 	rip_dodisconnect(so, inp);
954 	return (0);
955 }
956 
957 static int
958 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
959 {
960 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
961 	struct inpcb *inp;
962 	int error;
963 
964 	if (nam->sa_len != sizeof(*addr))
965 		return (EINVAL);
966 
967 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
968 	if (error != 0)
969 		return (error);
970 
971 	inp = sotoinpcb(so);
972 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
973 
974 	if (CK_STAILQ_EMPTY(&V_ifnet) ||
975 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
976 	    (addr->sin_addr.s_addr &&
977 	     (inp->inp_flags & INP_BINDANY) == 0 &&
978 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
979 		return (EADDRNOTAVAIL);
980 
981 	INP_INFO_WLOCK(&V_ripcbinfo);
982 	INP_WLOCK(inp);
983 	rip_delhash(inp);
984 	inp->inp_laddr = addr->sin_addr;
985 	rip_inshash(inp);
986 	INP_WUNLOCK(inp);
987 	INP_INFO_WUNLOCK(&V_ripcbinfo);
988 	return (0);
989 }
990 
991 static int
992 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
993 {
994 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
995 	struct inpcb *inp;
996 
997 	if (nam->sa_len != sizeof(*addr))
998 		return (EINVAL);
999 	if (CK_STAILQ_EMPTY(&V_ifnet))
1000 		return (EADDRNOTAVAIL);
1001 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
1002 		return (EAFNOSUPPORT);
1003 
1004 	inp = sotoinpcb(so);
1005 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
1006 
1007 	INP_INFO_WLOCK(&V_ripcbinfo);
1008 	INP_WLOCK(inp);
1009 	rip_delhash(inp);
1010 	inp->inp_faddr = addr->sin_addr;
1011 	rip_inshash(inp);
1012 	soisconnected(so);
1013 	INP_WUNLOCK(inp);
1014 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1015 	return (0);
1016 }
1017 
1018 static int
1019 rip_shutdown(struct socket *so)
1020 {
1021 	struct inpcb *inp;
1022 
1023 	inp = sotoinpcb(so);
1024 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1025 
1026 	INP_WLOCK(inp);
1027 	socantsendmore(so);
1028 	INP_WUNLOCK(inp);
1029 	return (0);
1030 }
1031 
1032 static int
1033 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1034     struct mbuf *control, struct thread *td)
1035 {
1036 	struct inpcb *inp;
1037 	u_long dst;
1038 
1039 	inp = sotoinpcb(so);
1040 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1041 
1042 	/*
1043 	 * Note: 'dst' reads below are unlocked.
1044 	 */
1045 	if (so->so_state & SS_ISCONNECTED) {
1046 		if (nam) {
1047 			m_freem(m);
1048 			return (EISCONN);
1049 		}
1050 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1051 	} else {
1052 		if (nam == NULL) {
1053 			m_freem(m);
1054 			return (ENOTCONN);
1055 		}
1056 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1057 	}
1058 	return (rip_output(m, so, dst));
1059 }
1060 #endif /* INET */
1061 
1062 static int
1063 rip_pcblist(SYSCTL_HANDLER_ARGS)
1064 {
1065 	int error, i, n;
1066 	struct inpcb *inp, **inp_list;
1067 	inp_gen_t gencnt;
1068 	struct xinpgen xig;
1069 	struct epoch_tracker et;
1070 
1071 	/*
1072 	 * The process of preparing the TCB list is too time-consuming and
1073 	 * resource-intensive to repeat twice on every request.
1074 	 */
1075 	if (req->oldptr == 0) {
1076 		n = V_ripcbinfo.ipi_count;
1077 		n += imax(n / 8, 10);
1078 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1079 		return (0);
1080 	}
1081 
1082 	if (req->newptr != 0)
1083 		return (EPERM);
1084 
1085 	/*
1086 	 * OK, now we're committed to doing something.
1087 	 */
1088 	INP_INFO_WLOCK(&V_ripcbinfo);
1089 	gencnt = V_ripcbinfo.ipi_gencnt;
1090 	n = V_ripcbinfo.ipi_count;
1091 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1092 
1093 	bzero(&xig, sizeof(xig));
1094 	xig.xig_len = sizeof xig;
1095 	xig.xig_count = n;
1096 	xig.xig_gen = gencnt;
1097 	xig.xig_sogen = so_gencnt;
1098 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1099 	if (error)
1100 		return (error);
1101 
1102 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1103 
1104 	INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1105 	for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1106 	     inp = CK_LIST_NEXT(inp, inp_list)) {
1107 		INP_WLOCK(inp);
1108 		if (inp->inp_gencnt <= gencnt &&
1109 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1110 			in_pcbref(inp);
1111 			inp_list[i++] = inp;
1112 		}
1113 		INP_WUNLOCK(inp);
1114 	}
1115 	INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1116 	n = i;
1117 
1118 	error = 0;
1119 	for (i = 0; i < n; i++) {
1120 		inp = inp_list[i];
1121 		INP_RLOCK(inp);
1122 		if (inp->inp_gencnt <= gencnt) {
1123 			struct xinpcb xi;
1124 
1125 			in_pcbtoxinpcb(inp, &xi);
1126 			INP_RUNLOCK(inp);
1127 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1128 		} else
1129 			INP_RUNLOCK(inp);
1130 	}
1131 	INP_INFO_WLOCK(&V_ripcbinfo);
1132 	for (i = 0; i < n; i++) {
1133 		inp = inp_list[i];
1134 		INP_RLOCK(inp);
1135 		if (!in_pcbrele_rlocked(inp))
1136 			INP_RUNLOCK(inp);
1137 	}
1138 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1139 
1140 	if (!error) {
1141 		struct epoch_tracker et;
1142 		/*
1143 		 * Give the user an updated idea of our state.  If the
1144 		 * generation differs from what we told her before, she knows
1145 		 * that something happened while we were processing this
1146 		 * request, and it might be necessary to retry.
1147 		 */
1148 		INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1149 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1150 		xig.xig_sogen = so_gencnt;
1151 		xig.xig_count = V_ripcbinfo.ipi_count;
1152 		INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1153 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1154 	}
1155 	free(inp_list, M_TEMP);
1156 	return (error);
1157 }
1158 
1159 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1160     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1161     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1162 
1163 #ifdef INET
1164 struct pr_usrreqs rip_usrreqs = {
1165 	.pru_abort =		rip_abort,
1166 	.pru_attach =		rip_attach,
1167 	.pru_bind =		rip_bind,
1168 	.pru_connect =		rip_connect,
1169 	.pru_control =		in_control,
1170 	.pru_detach =		rip_detach,
1171 	.pru_disconnect =	rip_disconnect,
1172 	.pru_peeraddr =		in_getpeeraddr,
1173 	.pru_send =		rip_send,
1174 	.pru_shutdown =		rip_shutdown,
1175 	.pru_sockaddr =		in_getsockaddr,
1176 	.pru_sosetlabel =	in_pcbsosetlabel,
1177 	.pru_close =		rip_close,
1178 };
1179 #endif /* INET */
1180