xref: /freebsd/sys/netinet/raw_ip.c (revision 2ef9ff7dd34a78a7890ba4d6de64da34d9c10942)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 
42 #include <sys/param.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/eventhandler.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/priv.h>
50 #include <sys/proc.h>
51 #include <sys/protosw.h>
52 #include <sys/rmlock.h>
53 #include <sys/rwlock.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
60 
61 #include <vm/uma.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/route.h>
66 #include <net/vnet.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_pcb.h>
71 #include <netinet/in_var.h>
72 #include <netinet/if_ether.h>
73 #include <netinet/ip.h>
74 #include <netinet/ip_var.h>
75 #include <netinet/ip_mroute.h>
76 #include <netinet/ip_icmp.h>
77 
78 #include <netipsec/ipsec_support.h>
79 
80 #include <machine/stdarg.h>
81 #include <security/mac/mac_framework.h>
82 
83 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
84 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
85     &VNET_NAME(ip_defttl), 0,
86     "Maximum TTL on IP packets");
87 
88 VNET_DEFINE(struct inpcbhead, ripcb);
89 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
90 
91 #define	V_ripcb			VNET(ripcb)
92 #define	V_ripcbinfo		VNET(ripcbinfo)
93 
94 /*
95  * Control and data hooks for ipfw, dummynet, divert and so on.
96  * The data hooks are not used here but it is convenient
97  * to keep them all in one place.
98  */
99 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
100 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
101 
102 int	(*ip_dn_ctl_ptr)(struct sockopt *);
103 int	(*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *);
104 void	(*ip_divert_ptr)(struct mbuf *, bool);
105 int	(*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool);
106 
107 #ifdef INET
108 /*
109  * Hooks for multicast routing. They all default to NULL, so leave them not
110  * initialized and rely on BSS being set to 0.
111  */
112 
113 /*
114  * The socket used to communicate with the multicast routing daemon.
115  */
116 VNET_DEFINE(struct socket *, ip_mrouter);
117 
118 /*
119  * The various mrouter and rsvp functions.
120  */
121 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
122 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
123 int (*ip_mrouter_done)(void);
124 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
125 		   struct ip_moptions *);
126 int (*mrt_ioctl)(u_long, caddr_t, int);
127 int (*legal_vif_num)(int);
128 u_long (*ip_mcast_src)(int);
129 
130 int (*rsvp_input_p)(struct mbuf **, int *, int);
131 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
132 void (*ip_rsvp_force_done)(struct socket *);
133 #endif /* INET */
134 
135 extern	struct protosw inetsw[];
136 
137 u_long	rip_sendspace = 9216;
138 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
139     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
140 
141 u_long	rip_recvspace = 9216;
142 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
143     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
144 
145 /*
146  * Hash functions
147  */
148 
149 #define INP_PCBHASH_RAW_SIZE	256
150 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
151         (((proto) + (laddr) + (faddr)) % (mask) + 1)
152 
153 #ifdef INET
154 static void
155 rip_inshash(struct inpcb *inp)
156 {
157 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
158 	struct inpcbhead *pcbhash;
159 	int hash;
160 
161 	INP_INFO_WLOCK_ASSERT(pcbinfo);
162 	INP_WLOCK_ASSERT(inp);
163 
164 	if (inp->inp_ip_p != 0 &&
165 	    inp->inp_laddr.s_addr != INADDR_ANY &&
166 	    inp->inp_faddr.s_addr != INADDR_ANY) {
167 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
168 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
169 	} else
170 		hash = 0;
171 	pcbhash = &pcbinfo->ipi_hashbase[hash];
172 	CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
173 }
174 
175 static void
176 rip_delhash(struct inpcb *inp)
177 {
178 
179 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
180 	INP_WLOCK_ASSERT(inp);
181 
182 	CK_LIST_REMOVE(inp, inp_hash);
183 }
184 #endif /* INET */
185 
186 /*
187  * Raw interface to IP protocol.
188  */
189 
190 /*
191  * Initialize raw connection block q.
192  */
193 static void
194 rip_zone_change(void *tag)
195 {
196 
197 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
198 }
199 
200 static int
201 rip_inpcb_init(void *mem, int size, int flags)
202 {
203 	struct inpcb *inp = mem;
204 
205 	INP_LOCK_INIT(inp, "inp", "rawinp");
206 	return (0);
207 }
208 
209 void
210 rip_init(void)
211 {
212 
213 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
214 	    1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE);
215 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
216 	    EVENTHANDLER_PRI_ANY);
217 }
218 
219 #ifdef VIMAGE
220 static void
221 rip_destroy(void *unused __unused)
222 {
223 
224 	in_pcbinfo_destroy(&V_ripcbinfo);
225 }
226 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
227 #endif
228 
229 #ifdef INET
230 static int
231 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
232     struct sockaddr_in *ripsrc)
233 {
234 	int policyfail = 0;
235 
236 	INP_LOCK_ASSERT(last);
237 
238 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
239 	/* check AH/ESP integrity. */
240 	if (IPSEC_ENABLED(ipv4)) {
241 		if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
242 			policyfail = 1;
243 	}
244 #endif /* IPSEC */
245 #ifdef MAC
246 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
247 		policyfail = 1;
248 #endif
249 	/* Check the minimum TTL for socket. */
250 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
251 		policyfail = 1;
252 	if (!policyfail) {
253 		struct mbuf *opts = NULL;
254 		struct socket *so;
255 
256 		so = last->inp_socket;
257 		if ((last->inp_flags & INP_CONTROLOPTS) ||
258 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
259 			ip_savecontrol(last, &opts, ip, n);
260 		SOCKBUF_LOCK(&so->so_rcv);
261 		if (sbappendaddr_locked(&so->so_rcv,
262 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
263 			/* should notify about lost packet */
264 			m_freem(n);
265 			if (opts)
266 				m_freem(opts);
267 			SOCKBUF_UNLOCK(&so->so_rcv);
268 		} else
269 			sorwakeup_locked(so);
270 	} else
271 		m_freem(n);
272 	return (policyfail);
273 }
274 
275 /*
276  * Setup generic address and protocol structures for raw_input routine, then
277  * pass them along with mbuf chain.
278  */
279 int
280 rip_input(struct mbuf **mp, int *offp, int proto)
281 {
282 	struct ifnet *ifp;
283 	struct mbuf *m = *mp;
284 	struct ip *ip = mtod(m, struct ip *);
285 	struct inpcb *inp, *last;
286 	struct sockaddr_in ripsrc;
287 	struct epoch_tracker et;
288 	int hash;
289 
290 	*mp = NULL;
291 
292 	bzero(&ripsrc, sizeof(ripsrc));
293 	ripsrc.sin_len = sizeof(ripsrc);
294 	ripsrc.sin_family = AF_INET;
295 	ripsrc.sin_addr = ip->ip_src;
296 	last = NULL;
297 
298 	ifp = m->m_pkthdr.rcvif;
299 
300 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
301 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
302 	INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
303 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
304 		if (inp->inp_ip_p != proto)
305 			continue;
306 #ifdef INET6
307 		/* XXX inp locking */
308 		if ((inp->inp_vflag & INP_IPV4) == 0)
309 			continue;
310 #endif
311 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
312 			continue;
313 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
314 			continue;
315 		if (last != NULL) {
316 			struct mbuf *n;
317 
318 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
319 			if (n != NULL)
320 			    (void) rip_append(last, ip, n, &ripsrc);
321 			/* XXX count dropped packet */
322 			INP_RUNLOCK(last);
323 			last = NULL;
324 		}
325 		INP_RLOCK(inp);
326 		if (__predict_false(inp->inp_flags2 & INP_FREED))
327 			goto skip_1;
328 		if (jailed_without_vnet(inp->inp_cred)) {
329 			/*
330 			 * XXX: If faddr was bound to multicast group,
331 			 * jailed raw socket will drop datagram.
332 			 */
333 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
334 				goto skip_1;
335 		}
336 		last = inp;
337 		continue;
338 	skip_1:
339 		INP_RUNLOCK(inp);
340 	}
341 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
342 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
343 			continue;
344 #ifdef INET6
345 		/* XXX inp locking */
346 		if ((inp->inp_vflag & INP_IPV4) == 0)
347 			continue;
348 #endif
349 		if (!in_nullhost(inp->inp_laddr) &&
350 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
351 			continue;
352 		if (!in_nullhost(inp->inp_faddr) &&
353 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
354 			continue;
355 		if (last != NULL) {
356 			struct mbuf *n;
357 
358 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
359 			if (n != NULL)
360 				(void) rip_append(last, ip, n, &ripsrc);
361 			/* XXX count dropped packet */
362 			INP_RUNLOCK(last);
363 			last = NULL;
364 		}
365 		INP_RLOCK(inp);
366 		if (__predict_false(inp->inp_flags2 & INP_FREED))
367 			goto skip_2;
368 		if (jailed_without_vnet(inp->inp_cred)) {
369 			/*
370 			 * Allow raw socket in jail to receive multicast;
371 			 * assume process had PRIV_NETINET_RAW at attach,
372 			 * and fall through into normal filter path if so.
373 			 */
374 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
375 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
376 				goto skip_2;
377 		}
378 		/*
379 		 * If this raw socket has multicast state, and we
380 		 * have received a multicast, check if this socket
381 		 * should receive it, as multicast filtering is now
382 		 * the responsibility of the transport layer.
383 		 */
384 		if (inp->inp_moptions != NULL &&
385 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
386 			/*
387 			 * If the incoming datagram is for IGMP, allow it
388 			 * through unconditionally to the raw socket.
389 			 *
390 			 * In the case of IGMPv2, we may not have explicitly
391 			 * joined the group, and may have set IFF_ALLMULTI
392 			 * on the interface. imo_multi_filter() may discard
393 			 * control traffic we actually need to see.
394 			 *
395 			 * Userland multicast routing daemons should continue
396 			 * filter the control traffic appropriately.
397 			 */
398 			int blocked;
399 
400 			blocked = MCAST_PASS;
401 			if (proto != IPPROTO_IGMP) {
402 				struct sockaddr_in group;
403 
404 				bzero(&group, sizeof(struct sockaddr_in));
405 				group.sin_len = sizeof(struct sockaddr_in);
406 				group.sin_family = AF_INET;
407 				group.sin_addr = ip->ip_dst;
408 
409 				blocked = imo_multi_filter(inp->inp_moptions,
410 				    ifp,
411 				    (struct sockaddr *)&group,
412 				    (struct sockaddr *)&ripsrc);
413 			}
414 
415 			if (blocked != MCAST_PASS) {
416 				IPSTAT_INC(ips_notmember);
417 				goto skip_2;
418 			}
419 		}
420 		last = inp;
421 		continue;
422 	skip_2:
423 		INP_RUNLOCK(inp);
424 	}
425 	INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
426 	if (last != NULL) {
427 		if (rip_append(last, ip, m, &ripsrc) != 0)
428 			IPSTAT_INC(ips_delivered);
429 		INP_RUNLOCK(last);
430 	} else {
431 		if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
432 			IPSTAT_INC(ips_noproto);
433 			IPSTAT_DEC(ips_delivered);
434 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
435 		} else {
436 			m_freem(m);
437 		}
438 	}
439 	return (IPPROTO_DONE);
440 }
441 
442 /*
443  * Generate IP header and pass packet to ip_output.  Tack on options user may
444  * have setup with control call.
445  */
446 int
447 rip_output(struct mbuf *m, struct socket *so, ...)
448 {
449 	struct ip *ip;
450 	int error;
451 	struct inpcb *inp = sotoinpcb(so);
452 	va_list ap;
453 	u_long dst;
454 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
455 	    IP_ALLOWBROADCAST;
456 	int cnt, hlen;
457 	u_char opttype, optlen, *cp;
458 
459 	va_start(ap, so);
460 	dst = va_arg(ap, u_long);
461 	va_end(ap);
462 
463 	/*
464 	 * If the user handed us a complete IP packet, use it.  Otherwise,
465 	 * allocate an mbuf for a header and fill it in.
466 	 */
467 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
468 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
469 			m_freem(m);
470 			return(EMSGSIZE);
471 		}
472 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
473 		if (m == NULL)
474 			return(ENOBUFS);
475 
476 		INP_RLOCK(inp);
477 		ip = mtod(m, struct ip *);
478 		ip->ip_tos = inp->inp_ip_tos;
479 		if (inp->inp_flags & INP_DONTFRAG)
480 			ip->ip_off = htons(IP_DF);
481 		else
482 			ip->ip_off = htons(0);
483 		ip->ip_p = inp->inp_ip_p;
484 		ip->ip_len = htons(m->m_pkthdr.len);
485 		ip->ip_src = inp->inp_laddr;
486 		ip->ip_dst.s_addr = dst;
487 		if (jailed(inp->inp_cred)) {
488 			/*
489 			 * prison_local_ip4() would be good enough but would
490 			 * let a source of INADDR_ANY pass, which we do not
491 			 * want to see from jails.
492 			 */
493 			if (ip->ip_src.s_addr == INADDR_ANY) {
494 				error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src,
495 				    inp->inp_cred);
496 			} else {
497 				error = prison_local_ip4(inp->inp_cred,
498 				    &ip->ip_src);
499 			}
500 			if (error != 0) {
501 				INP_RUNLOCK(inp);
502 				m_freem(m);
503 				return (error);
504 			}
505 		}
506 		ip->ip_ttl = inp->inp_ip_ttl;
507 	} else {
508 		if (m->m_pkthdr.len > IP_MAXPACKET) {
509 			m_freem(m);
510 			return(EMSGSIZE);
511 		}
512 		ip = mtod(m, struct ip *);
513 		hlen = ip->ip_hl << 2;
514 		if (m->m_len < hlen) {
515 			m = m_pullup(m, hlen);
516 			if (m == NULL)
517 				return (EINVAL);
518 			ip = mtod(m, struct ip *);
519 		}
520 
521 		INP_RLOCK(inp);
522 		/*
523 		 * Don't allow both user specified and setsockopt options,
524 		 * and don't allow packet length sizes that will crash.
525 		 */
526 		if ((hlen < sizeof (*ip))
527 		    || ((hlen > sizeof (*ip)) && inp->inp_options)
528 		    || (ntohs(ip->ip_len) != m->m_pkthdr.len)) {
529 			INP_RUNLOCK(inp);
530 			m_freem(m);
531 			return (EINVAL);
532 		}
533 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
534 		if (error != 0) {
535 			INP_RUNLOCK(inp);
536 			m_freem(m);
537 			return (error);
538 		}
539 		/*
540 		 * Don't allow IP options which do not have the required
541 		 * structure as specified in section 3.1 of RFC 791 on
542 		 * pages 15-23.
543 		 */
544 		cp = (u_char *)(ip + 1);
545 		cnt = hlen - sizeof (struct ip);
546 		for (; cnt > 0; cnt -= optlen, cp += optlen) {
547 			opttype = cp[IPOPT_OPTVAL];
548 			if (opttype == IPOPT_EOL)
549 				break;
550 			if (opttype == IPOPT_NOP) {
551 				optlen = 1;
552 				continue;
553 			}
554 			if (cnt < IPOPT_OLEN + sizeof(u_char)) {
555 				INP_RUNLOCK(inp);
556 				m_freem(m);
557 				return (EINVAL);
558 			}
559 			optlen = cp[IPOPT_OLEN];
560 			if (optlen < IPOPT_OLEN + sizeof(u_char) ||
561 			    optlen > cnt) {
562 				INP_RUNLOCK(inp);
563 				m_freem(m);
564 				return (EINVAL);
565 			}
566 		}
567 		/*
568 		 * This doesn't allow application to specify ID of zero,
569 		 * but we got this limitation from the beginning of history.
570 		 */
571 		if (ip->ip_id == 0)
572 			ip_fillid(ip);
573 
574 		/*
575 		 * XXX prevent ip_output from overwriting header fields.
576 		 */
577 		flags |= IP_RAWOUTPUT;
578 		IPSTAT_INC(ips_rawout);
579 	}
580 
581 	if (inp->inp_flags & INP_ONESBCAST)
582 		flags |= IP_SENDONES;
583 
584 #ifdef MAC
585 	mac_inpcb_create_mbuf(inp, m);
586 #endif
587 
588 	error = ip_output(m, inp->inp_options, NULL, flags,
589 	    inp->inp_moptions, inp);
590 	INP_RUNLOCK(inp);
591 	return (error);
592 }
593 
594 /*
595  * Raw IP socket option processing.
596  *
597  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
598  * only be created by a privileged process, and as such, socket option
599  * operations to manage system properties on any raw socket were allowed to
600  * take place without explicit additional access control checks.  However,
601  * raw sockets can now also be created in jail(), and therefore explicit
602  * checks are now required.  Likewise, raw sockets can be used by a process
603  * after it gives up privilege, so some caution is required.  For options
604  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
605  * performed in ip_ctloutput() and therefore no check occurs here.
606  * Unilaterally checking priv_check() here breaks normal IP socket option
607  * operations on raw sockets.
608  *
609  * When adding new socket options here, make sure to add access control
610  * checks here as necessary.
611  *
612  * XXX-BZ inp locking?
613  */
614 int
615 rip_ctloutput(struct socket *so, struct sockopt *sopt)
616 {
617 	struct	inpcb *inp = sotoinpcb(so);
618 	int	error, optval;
619 
620 	if (sopt->sopt_level != IPPROTO_IP) {
621 		if ((sopt->sopt_level == SOL_SOCKET) &&
622 		    (sopt->sopt_name == SO_SETFIB)) {
623 			inp->inp_inc.inc_fibnum = so->so_fibnum;
624 			return (0);
625 		}
626 		return (EINVAL);
627 	}
628 
629 	error = 0;
630 	switch (sopt->sopt_dir) {
631 	case SOPT_GET:
632 		switch (sopt->sopt_name) {
633 		case IP_HDRINCL:
634 			optval = inp->inp_flags & INP_HDRINCL;
635 			error = sooptcopyout(sopt, &optval, sizeof optval);
636 			break;
637 
638 		case IP_FW3:	/* generic ipfw v.3 functions */
639 		case IP_FW_ADD:	/* ADD actually returns the body... */
640 		case IP_FW_GET:
641 		case IP_FW_TABLE_GETSIZE:
642 		case IP_FW_TABLE_LIST:
643 		case IP_FW_NAT_GET_CONFIG:
644 		case IP_FW_NAT_GET_LOG:
645 			if (V_ip_fw_ctl_ptr != NULL)
646 				error = V_ip_fw_ctl_ptr(sopt);
647 			else
648 				error = ENOPROTOOPT;
649 			break;
650 
651 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
652 		case IP_DUMMYNET_GET:
653 			if (ip_dn_ctl_ptr != NULL)
654 				error = ip_dn_ctl_ptr(sopt);
655 			else
656 				error = ENOPROTOOPT;
657 			break ;
658 
659 		case MRT_INIT:
660 		case MRT_DONE:
661 		case MRT_ADD_VIF:
662 		case MRT_DEL_VIF:
663 		case MRT_ADD_MFC:
664 		case MRT_DEL_MFC:
665 		case MRT_VERSION:
666 		case MRT_ASSERT:
667 		case MRT_API_SUPPORT:
668 		case MRT_API_CONFIG:
669 		case MRT_ADD_BW_UPCALL:
670 		case MRT_DEL_BW_UPCALL:
671 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
672 			if (error != 0)
673 				return (error);
674 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
675 				EOPNOTSUPP;
676 			break;
677 
678 		default:
679 			error = ip_ctloutput(so, sopt);
680 			break;
681 		}
682 		break;
683 
684 	case SOPT_SET:
685 		switch (sopt->sopt_name) {
686 		case IP_HDRINCL:
687 			error = sooptcopyin(sopt, &optval, sizeof optval,
688 					    sizeof optval);
689 			if (error)
690 				break;
691 			if (optval)
692 				inp->inp_flags |= INP_HDRINCL;
693 			else
694 				inp->inp_flags &= ~INP_HDRINCL;
695 			break;
696 
697 		case IP_FW3:	/* generic ipfw v.3 functions */
698 		case IP_FW_ADD:
699 		case IP_FW_DEL:
700 		case IP_FW_FLUSH:
701 		case IP_FW_ZERO:
702 		case IP_FW_RESETLOG:
703 		case IP_FW_TABLE_ADD:
704 		case IP_FW_TABLE_DEL:
705 		case IP_FW_TABLE_FLUSH:
706 		case IP_FW_NAT_CFG:
707 		case IP_FW_NAT_DEL:
708 			if (V_ip_fw_ctl_ptr != NULL)
709 				error = V_ip_fw_ctl_ptr(sopt);
710 			else
711 				error = ENOPROTOOPT;
712 			break;
713 
714 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
715 		case IP_DUMMYNET_CONFIGURE:
716 		case IP_DUMMYNET_DEL:
717 		case IP_DUMMYNET_FLUSH:
718 			if (ip_dn_ctl_ptr != NULL)
719 				error = ip_dn_ctl_ptr(sopt);
720 			else
721 				error = ENOPROTOOPT ;
722 			break ;
723 
724 		case IP_RSVP_ON:
725 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
726 			if (error != 0)
727 				return (error);
728 			error = ip_rsvp_init(so);
729 			break;
730 
731 		case IP_RSVP_OFF:
732 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
733 			if (error != 0)
734 				return (error);
735 			error = ip_rsvp_done();
736 			break;
737 
738 		case IP_RSVP_VIF_ON:
739 		case IP_RSVP_VIF_OFF:
740 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
741 			if (error != 0)
742 				return (error);
743 			error = ip_rsvp_vif ?
744 				ip_rsvp_vif(so, sopt) : EINVAL;
745 			break;
746 
747 		case MRT_INIT:
748 		case MRT_DONE:
749 		case MRT_ADD_VIF:
750 		case MRT_DEL_VIF:
751 		case MRT_ADD_MFC:
752 		case MRT_DEL_MFC:
753 		case MRT_VERSION:
754 		case MRT_ASSERT:
755 		case MRT_API_SUPPORT:
756 		case MRT_API_CONFIG:
757 		case MRT_ADD_BW_UPCALL:
758 		case MRT_DEL_BW_UPCALL:
759 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
760 			if (error != 0)
761 				return (error);
762 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
763 					EOPNOTSUPP;
764 			break;
765 
766 		default:
767 			error = ip_ctloutput(so, sopt);
768 			break;
769 		}
770 		break;
771 	}
772 
773 	return (error);
774 }
775 
776 /*
777  * This function exists solely to receive the PRC_IFDOWN messages which are
778  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
779  * in_ifadown() to remove all routes corresponding to that address.  It also
780  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
781  * routes.
782  */
783 void
784 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
785 {
786 	struct rm_priotracker in_ifa_tracker;
787 	struct in_ifaddr *ia;
788 	struct ifnet *ifp;
789 	int err;
790 	int flags;
791 
792 	switch (cmd) {
793 	case PRC_IFDOWN:
794 		IN_IFADDR_RLOCK(&in_ifa_tracker);
795 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
796 			if (ia->ia_ifa.ifa_addr == sa
797 			    && (ia->ia_flags & IFA_ROUTE)) {
798 				ifa_ref(&ia->ia_ifa);
799 				IN_IFADDR_RUNLOCK(&in_ifa_tracker);
800 				/*
801 				 * in_scrubprefix() kills the interface route.
802 				 */
803 				in_scrubprefix(ia, 0);
804 				/*
805 				 * in_ifadown gets rid of all the rest of the
806 				 * routes.  This is not quite the right thing
807 				 * to do, but at least if we are running a
808 				 * routing process they will come back.
809 				 */
810 				in_ifadown(&ia->ia_ifa, 0);
811 				ifa_free(&ia->ia_ifa);
812 				break;
813 			}
814 		}
815 		if (ia == NULL)		/* If ia matched, already unlocked. */
816 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
817 		break;
818 
819 	case PRC_IFUP:
820 		IN_IFADDR_RLOCK(&in_ifa_tracker);
821 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
822 			if (ia->ia_ifa.ifa_addr == sa)
823 				break;
824 		}
825 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
826 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
827 			return;
828 		}
829 		ifa_ref(&ia->ia_ifa);
830 		IN_IFADDR_RUNLOCK(&in_ifa_tracker);
831 		flags = RTF_UP;
832 		ifp = ia->ia_ifa.ifa_ifp;
833 
834 		if ((ifp->if_flags & IFF_LOOPBACK)
835 		    || (ifp->if_flags & IFF_POINTOPOINT))
836 			flags |= RTF_HOST;
837 
838 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
839 
840 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
841 		if (err == 0)
842 			ia->ia_flags |= IFA_ROUTE;
843 
844 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
845 
846 		ifa_free(&ia->ia_ifa);
847 		break;
848 	}
849 }
850 
851 static int
852 rip_attach(struct socket *so, int proto, struct thread *td)
853 {
854 	struct inpcb *inp;
855 	int error;
856 
857 	inp = sotoinpcb(so);
858 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
859 
860 	error = priv_check(td, PRIV_NETINET_RAW);
861 	if (error)
862 		return (error);
863 	if (proto >= IPPROTO_MAX || proto < 0)
864 		return EPROTONOSUPPORT;
865 	error = soreserve(so, rip_sendspace, rip_recvspace);
866 	if (error)
867 		return (error);
868 	INP_INFO_WLOCK(&V_ripcbinfo);
869 	error = in_pcballoc(so, &V_ripcbinfo);
870 	if (error) {
871 		INP_INFO_WUNLOCK(&V_ripcbinfo);
872 		return (error);
873 	}
874 	inp = (struct inpcb *)so->so_pcb;
875 	inp->inp_vflag |= INP_IPV4;
876 	inp->inp_ip_p = proto;
877 	inp->inp_ip_ttl = V_ip_defttl;
878 	rip_inshash(inp);
879 	INP_INFO_WUNLOCK(&V_ripcbinfo);
880 	INP_WUNLOCK(inp);
881 	return (0);
882 }
883 
884 static void
885 rip_detach(struct socket *so)
886 {
887 	struct inpcb *inp;
888 
889 	inp = sotoinpcb(so);
890 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
891 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
892 	    ("rip_detach: not closed"));
893 
894 	INP_INFO_WLOCK(&V_ripcbinfo);
895 	INP_WLOCK(inp);
896 	rip_delhash(inp);
897 	if (so == V_ip_mrouter && ip_mrouter_done)
898 		ip_mrouter_done();
899 	if (ip_rsvp_force_done)
900 		ip_rsvp_force_done(so);
901 	if (so == V_ip_rsvpd)
902 		ip_rsvp_done();
903 	in_pcbdetach(inp);
904 	in_pcbfree(inp);
905 	INP_INFO_WUNLOCK(&V_ripcbinfo);
906 }
907 
908 static void
909 rip_dodisconnect(struct socket *so, struct inpcb *inp)
910 {
911 	struct inpcbinfo *pcbinfo;
912 
913 	pcbinfo = inp->inp_pcbinfo;
914 	INP_INFO_WLOCK(pcbinfo);
915 	INP_WLOCK(inp);
916 	rip_delhash(inp);
917 	inp->inp_faddr.s_addr = INADDR_ANY;
918 	rip_inshash(inp);
919 	SOCK_LOCK(so);
920 	so->so_state &= ~SS_ISCONNECTED;
921 	SOCK_UNLOCK(so);
922 	INP_WUNLOCK(inp);
923 	INP_INFO_WUNLOCK(pcbinfo);
924 }
925 
926 static void
927 rip_abort(struct socket *so)
928 {
929 	struct inpcb *inp;
930 
931 	inp = sotoinpcb(so);
932 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
933 
934 	rip_dodisconnect(so, inp);
935 }
936 
937 static void
938 rip_close(struct socket *so)
939 {
940 	struct inpcb *inp;
941 
942 	inp = sotoinpcb(so);
943 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
944 
945 	rip_dodisconnect(so, inp);
946 }
947 
948 static int
949 rip_disconnect(struct socket *so)
950 {
951 	struct inpcb *inp;
952 
953 	if ((so->so_state & SS_ISCONNECTED) == 0)
954 		return (ENOTCONN);
955 
956 	inp = sotoinpcb(so);
957 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
958 
959 	rip_dodisconnect(so, inp);
960 	return (0);
961 }
962 
963 static int
964 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
965 {
966 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
967 	struct inpcb *inp;
968 	int error;
969 
970 	if (nam->sa_len != sizeof(*addr))
971 		return (EINVAL);
972 
973 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
974 	if (error != 0)
975 		return (error);
976 
977 	inp = sotoinpcb(so);
978 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
979 
980 	if (CK_STAILQ_EMPTY(&V_ifnet) ||
981 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
982 	    (addr->sin_addr.s_addr &&
983 	     (inp->inp_flags & INP_BINDANY) == 0 &&
984 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
985 		return (EADDRNOTAVAIL);
986 
987 	INP_INFO_WLOCK(&V_ripcbinfo);
988 	INP_WLOCK(inp);
989 	rip_delhash(inp);
990 	inp->inp_laddr = addr->sin_addr;
991 	rip_inshash(inp);
992 	INP_WUNLOCK(inp);
993 	INP_INFO_WUNLOCK(&V_ripcbinfo);
994 	return (0);
995 }
996 
997 static int
998 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
999 {
1000 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1001 	struct inpcb *inp;
1002 
1003 	if (nam->sa_len != sizeof(*addr))
1004 		return (EINVAL);
1005 	if (CK_STAILQ_EMPTY(&V_ifnet))
1006 		return (EADDRNOTAVAIL);
1007 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
1008 		return (EAFNOSUPPORT);
1009 
1010 	inp = sotoinpcb(so);
1011 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
1012 
1013 	INP_INFO_WLOCK(&V_ripcbinfo);
1014 	INP_WLOCK(inp);
1015 	rip_delhash(inp);
1016 	inp->inp_faddr = addr->sin_addr;
1017 	rip_inshash(inp);
1018 	soisconnected(so);
1019 	INP_WUNLOCK(inp);
1020 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1021 	return (0);
1022 }
1023 
1024 static int
1025 rip_shutdown(struct socket *so)
1026 {
1027 	struct inpcb *inp;
1028 
1029 	inp = sotoinpcb(so);
1030 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1031 
1032 	INP_WLOCK(inp);
1033 	socantsendmore(so);
1034 	INP_WUNLOCK(inp);
1035 	return (0);
1036 }
1037 
1038 static int
1039 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1040     struct mbuf *control, struct thread *td)
1041 {
1042 	struct inpcb *inp;
1043 	u_long dst;
1044 
1045 	inp = sotoinpcb(so);
1046 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1047 
1048 	/*
1049 	 * Note: 'dst' reads below are unlocked.
1050 	 */
1051 	if (so->so_state & SS_ISCONNECTED) {
1052 		if (nam) {
1053 			m_freem(m);
1054 			return (EISCONN);
1055 		}
1056 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1057 	} else {
1058 		if (nam == NULL) {
1059 			m_freem(m);
1060 			return (ENOTCONN);
1061 		}
1062 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1063 	}
1064 	return (rip_output(m, so, dst));
1065 }
1066 #endif /* INET */
1067 
1068 static int
1069 rip_pcblist(SYSCTL_HANDLER_ARGS)
1070 {
1071 	int error, i, n;
1072 	struct inpcb *inp, **inp_list;
1073 	inp_gen_t gencnt;
1074 	struct xinpgen xig;
1075 	struct epoch_tracker et;
1076 
1077 	/*
1078 	 * The process of preparing the TCB list is too time-consuming and
1079 	 * resource-intensive to repeat twice on every request.
1080 	 */
1081 	if (req->oldptr == 0) {
1082 		n = V_ripcbinfo.ipi_count;
1083 		n += imax(n / 8, 10);
1084 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1085 		return (0);
1086 	}
1087 
1088 	if (req->newptr != 0)
1089 		return (EPERM);
1090 
1091 	/*
1092 	 * OK, now we're committed to doing something.
1093 	 */
1094 	INP_INFO_WLOCK(&V_ripcbinfo);
1095 	gencnt = V_ripcbinfo.ipi_gencnt;
1096 	n = V_ripcbinfo.ipi_count;
1097 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1098 
1099 	bzero(&xig, sizeof(xig));
1100 	xig.xig_len = sizeof xig;
1101 	xig.xig_count = n;
1102 	xig.xig_gen = gencnt;
1103 	xig.xig_sogen = so_gencnt;
1104 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1105 	if (error)
1106 		return (error);
1107 
1108 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1109 
1110 	INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1111 	for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1112 	     inp = CK_LIST_NEXT(inp, inp_list)) {
1113 		INP_WLOCK(inp);
1114 		if (inp->inp_gencnt <= gencnt &&
1115 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1116 			in_pcbref(inp);
1117 			inp_list[i++] = inp;
1118 		}
1119 		INP_WUNLOCK(inp);
1120 	}
1121 	INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1122 	n = i;
1123 
1124 	error = 0;
1125 	for (i = 0; i < n; i++) {
1126 		inp = inp_list[i];
1127 		INP_RLOCK(inp);
1128 		if (inp->inp_gencnt <= gencnt) {
1129 			struct xinpcb xi;
1130 
1131 			in_pcbtoxinpcb(inp, &xi);
1132 			INP_RUNLOCK(inp);
1133 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1134 		} else
1135 			INP_RUNLOCK(inp);
1136 	}
1137 	INP_INFO_WLOCK(&V_ripcbinfo);
1138 	for (i = 0; i < n; i++) {
1139 		inp = inp_list[i];
1140 		INP_RLOCK(inp);
1141 		if (!in_pcbrele_rlocked(inp))
1142 			INP_RUNLOCK(inp);
1143 	}
1144 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1145 
1146 	if (!error) {
1147 		struct epoch_tracker et;
1148 		/*
1149 		 * Give the user an updated idea of our state.  If the
1150 		 * generation differs from what we told her before, she knows
1151 		 * that something happened while we were processing this
1152 		 * request, and it might be necessary to retry.
1153 		 */
1154 		INP_INFO_RLOCK_ET(&V_ripcbinfo, et);
1155 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1156 		xig.xig_sogen = so_gencnt;
1157 		xig.xig_count = V_ripcbinfo.ipi_count;
1158 		INP_INFO_RUNLOCK_ET(&V_ripcbinfo, et);
1159 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1160 	}
1161 	free(inp_list, M_TEMP);
1162 	return (error);
1163 }
1164 
1165 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1166     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1167     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1168 
1169 #ifdef INET
1170 struct pr_usrreqs rip_usrreqs = {
1171 	.pru_abort =		rip_abort,
1172 	.pru_attach =		rip_attach,
1173 	.pru_bind =		rip_bind,
1174 	.pru_connect =		rip_connect,
1175 	.pru_control =		in_control,
1176 	.pru_detach =		rip_detach,
1177 	.pru_disconnect =	rip_disconnect,
1178 	.pru_peeraddr =		in_getpeeraddr,
1179 	.pru_send =		rip_send,
1180 	.pru_shutdown =		rip_shutdown,
1181 	.pru_sockaddr =		in_getsockaddr,
1182 	.pru_sosetlabel =	in_pcbsosetlabel,
1183 	.pru_close =		rip_close,
1184 };
1185 #endif /* INET */
1186