xref: /freebsd/sys/netinet/raw_ip.c (revision b633e08c705fe43180567eae26923d6f6f98c8d9)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_route.h"
42 
43 #include <sys/param.h>
44 #include <sys/jail.h>
45 #include <sys/kernel.h>
46 #include <sys/eventhandler.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/protosw.h>
53 #include <sys/rmlock.h>
54 #include <sys/rwlock.h>
55 #include <sys/signalvar.h>
56 #include <sys/socket.h>
57 #include <sys/socketvar.h>
58 #include <sys/sx.h>
59 #include <sys/sysctl.h>
60 #include <sys/systm.h>
61 
62 #include <vm/uma.h>
63 
64 #include <net/if.h>
65 #include <net/if_var.h>
66 #include <net/route.h>
67 #include <net/route/route_ctl.h>
68 #include <net/vnet.h>
69 
70 #include <netinet/in.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in_fib.h>
73 #include <netinet/in_pcb.h>
74 #include <netinet/in_var.h>
75 #include <netinet/if_ether.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip_var.h>
78 #include <netinet/ip_mroute.h>
79 #include <netinet/ip_icmp.h>
80 
81 #include <netipsec/ipsec_support.h>
82 
83 #include <machine/stdarg.h>
84 #include <security/mac/mac_framework.h>
85 
86 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
87 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
88     &VNET_NAME(ip_defttl), 0,
89     "Maximum TTL on IP packets");
90 
91 VNET_DEFINE(struct inpcbhead, ripcb);
92 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
93 
94 #define	V_ripcb			VNET(ripcb)
95 #define	V_ripcbinfo		VNET(ripcbinfo)
96 
97 /*
98  * Control and data hooks for ipfw, dummynet, divert and so on.
99  * The data hooks are not used here but it is convenient
100  * to keep them all in one place.
101  */
102 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
103 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
104 
105 int	(*ip_dn_ctl_ptr)(struct sockopt *);
106 int	(*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *);
107 void	(*ip_divert_ptr)(struct mbuf *, bool);
108 int	(*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool);
109 
110 #ifdef INET
111 /*
112  * Hooks for multicast routing. They all default to NULL, so leave them not
113  * initialized and rely on BSS being set to 0.
114  */
115 
116 /*
117  * The socket used to communicate with the multicast routing daemon.
118  */
119 VNET_DEFINE(struct socket *, ip_mrouter);
120 
121 /*
122  * The various mrouter and rsvp functions.
123  */
124 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
125 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
126 int (*ip_mrouter_done)(void);
127 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
128 		   struct ip_moptions *);
129 int (*mrt_ioctl)(u_long, caddr_t, int);
130 int (*legal_vif_num)(int);
131 u_long (*ip_mcast_src)(int);
132 
133 int (*rsvp_input_p)(struct mbuf **, int *, int);
134 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
135 void (*ip_rsvp_force_done)(struct socket *);
136 #endif /* INET */
137 
138 extern	struct protosw inetsw[];
139 
140 u_long	rip_sendspace = 9216;
141 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
142     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
143 
144 u_long	rip_recvspace = 9216;
145 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
146     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
147 
148 /*
149  * Hash functions
150  */
151 
152 #define INP_PCBHASH_RAW_SIZE	256
153 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
154         (((proto) + (laddr) + (faddr)) % (mask) + 1)
155 
156 #ifdef INET
157 static void
158 rip_inshash(struct inpcb *inp)
159 {
160 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
161 	struct inpcbhead *pcbhash;
162 	int hash;
163 
164 	INP_INFO_WLOCK_ASSERT(pcbinfo);
165 	INP_WLOCK_ASSERT(inp);
166 
167 	if (inp->inp_ip_p != 0 &&
168 	    inp->inp_laddr.s_addr != INADDR_ANY &&
169 	    inp->inp_faddr.s_addr != INADDR_ANY) {
170 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
171 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
172 	} else
173 		hash = 0;
174 	pcbhash = &pcbinfo->ipi_hashbase[hash];
175 	CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
176 }
177 
178 static void
179 rip_delhash(struct inpcb *inp)
180 {
181 
182 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
183 	INP_WLOCK_ASSERT(inp);
184 
185 	CK_LIST_REMOVE(inp, inp_hash);
186 }
187 #endif /* INET */
188 
189 /*
190  * Raw interface to IP protocol.
191  */
192 
193 /*
194  * Initialize raw connection block q.
195  */
196 static void
197 rip_zone_change(void *tag)
198 {
199 
200 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
201 }
202 
203 static int
204 rip_inpcb_init(void *mem, int size, int flags)
205 {
206 	struct inpcb *inp = mem;
207 
208 	INP_LOCK_INIT(inp, "inp", "rawinp");
209 	return (0);
210 }
211 
212 void
213 rip_init(void)
214 {
215 
216 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
217 	    1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE);
218 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
219 	    EVENTHANDLER_PRI_ANY);
220 }
221 
222 #ifdef VIMAGE
223 static void
224 rip_destroy(void *unused __unused)
225 {
226 
227 	in_pcbinfo_destroy(&V_ripcbinfo);
228 }
229 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
230 #endif
231 
232 #ifdef INET
233 static int
234 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
235     struct sockaddr_in *ripsrc)
236 {
237 	int policyfail = 0;
238 
239 	INP_LOCK_ASSERT(last);
240 
241 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
242 	/* check AH/ESP integrity. */
243 	if (IPSEC_ENABLED(ipv4)) {
244 		if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
245 			policyfail = 1;
246 	}
247 #endif /* IPSEC */
248 #ifdef MAC
249 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
250 		policyfail = 1;
251 #endif
252 	/* Check the minimum TTL for socket. */
253 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
254 		policyfail = 1;
255 	if (!policyfail) {
256 		struct mbuf *opts = NULL;
257 		struct socket *so;
258 
259 		so = last->inp_socket;
260 		if ((last->inp_flags & INP_CONTROLOPTS) ||
261 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
262 			ip_savecontrol(last, &opts, ip, n);
263 		SOCKBUF_LOCK(&so->so_rcv);
264 		if (sbappendaddr_locked(&so->so_rcv,
265 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
266 			/* should notify about lost packet */
267 			m_freem(n);
268 			if (opts)
269 				m_freem(opts);
270 			SOCKBUF_UNLOCK(&so->so_rcv);
271 		} else
272 			sorwakeup_locked(so);
273 	} else
274 		m_freem(n);
275 	return (policyfail);
276 }
277 
278 /*
279  * Setup generic address and protocol structures for raw_input routine, then
280  * pass them along with mbuf chain.
281  */
282 int
283 rip_input(struct mbuf **mp, int *offp, int proto)
284 {
285 	struct ifnet *ifp;
286 	struct mbuf *m = *mp;
287 	struct ip *ip = mtod(m, struct ip *);
288 	struct inpcb *inp, *last;
289 	struct sockaddr_in ripsrc;
290 	int hash;
291 
292 	NET_EPOCH_ASSERT();
293 
294 	*mp = NULL;
295 
296 	bzero(&ripsrc, sizeof(ripsrc));
297 	ripsrc.sin_len = sizeof(ripsrc);
298 	ripsrc.sin_family = AF_INET;
299 	ripsrc.sin_addr = ip->ip_src;
300 	last = NULL;
301 
302 	ifp = m->m_pkthdr.rcvif;
303 
304 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
305 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
306 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
307 		if (inp->inp_ip_p != proto)
308 			continue;
309 #ifdef INET6
310 		/* XXX inp locking */
311 		if ((inp->inp_vflag & INP_IPV4) == 0)
312 			continue;
313 #endif
314 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
315 			continue;
316 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
317 			continue;
318 		if (last != NULL) {
319 			struct mbuf *n;
320 
321 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
322 			if (n != NULL)
323 			    (void) rip_append(last, ip, n, &ripsrc);
324 			/* XXX count dropped packet */
325 			INP_RUNLOCK(last);
326 			last = NULL;
327 		}
328 		INP_RLOCK(inp);
329 		if (__predict_false(inp->inp_flags2 & INP_FREED))
330 			goto skip_1;
331 		if (jailed_without_vnet(inp->inp_cred)) {
332 			/*
333 			 * XXX: If faddr was bound to multicast group,
334 			 * jailed raw socket will drop datagram.
335 			 */
336 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
337 				goto skip_1;
338 		}
339 		last = inp;
340 		continue;
341 	skip_1:
342 		INP_RUNLOCK(inp);
343 	}
344 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
345 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
346 			continue;
347 #ifdef INET6
348 		/* XXX inp locking */
349 		if ((inp->inp_vflag & INP_IPV4) == 0)
350 			continue;
351 #endif
352 		if (!in_nullhost(inp->inp_laddr) &&
353 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
354 			continue;
355 		if (!in_nullhost(inp->inp_faddr) &&
356 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
357 			continue;
358 		if (last != NULL) {
359 			struct mbuf *n;
360 
361 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
362 			if (n != NULL)
363 				(void) rip_append(last, ip, n, &ripsrc);
364 			/* XXX count dropped packet */
365 			INP_RUNLOCK(last);
366 			last = NULL;
367 		}
368 		INP_RLOCK(inp);
369 		if (__predict_false(inp->inp_flags2 & INP_FREED))
370 			goto skip_2;
371 		if (jailed_without_vnet(inp->inp_cred)) {
372 			/*
373 			 * Allow raw socket in jail to receive multicast;
374 			 * assume process had PRIV_NETINET_RAW at attach,
375 			 * and fall through into normal filter path if so.
376 			 */
377 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
378 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
379 				goto skip_2;
380 		}
381 		/*
382 		 * If this raw socket has multicast state, and we
383 		 * have received a multicast, check if this socket
384 		 * should receive it, as multicast filtering is now
385 		 * the responsibility of the transport layer.
386 		 */
387 		if (inp->inp_moptions != NULL &&
388 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
389 			/*
390 			 * If the incoming datagram is for IGMP, allow it
391 			 * through unconditionally to the raw socket.
392 			 *
393 			 * In the case of IGMPv2, we may not have explicitly
394 			 * joined the group, and may have set IFF_ALLMULTI
395 			 * on the interface. imo_multi_filter() may discard
396 			 * control traffic we actually need to see.
397 			 *
398 			 * Userland multicast routing daemons should continue
399 			 * filter the control traffic appropriately.
400 			 */
401 			int blocked;
402 
403 			blocked = MCAST_PASS;
404 			if (proto != IPPROTO_IGMP) {
405 				struct sockaddr_in group;
406 
407 				bzero(&group, sizeof(struct sockaddr_in));
408 				group.sin_len = sizeof(struct sockaddr_in);
409 				group.sin_family = AF_INET;
410 				group.sin_addr = ip->ip_dst;
411 
412 				blocked = imo_multi_filter(inp->inp_moptions,
413 				    ifp,
414 				    (struct sockaddr *)&group,
415 				    (struct sockaddr *)&ripsrc);
416 			}
417 
418 			if (blocked != MCAST_PASS) {
419 				IPSTAT_INC(ips_notmember);
420 				goto skip_2;
421 			}
422 		}
423 		last = inp;
424 		continue;
425 	skip_2:
426 		INP_RUNLOCK(inp);
427 	}
428 	if (last != NULL) {
429 		if (rip_append(last, ip, m, &ripsrc) != 0)
430 			IPSTAT_INC(ips_delivered);
431 		INP_RUNLOCK(last);
432 	} else {
433 		if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
434 			IPSTAT_INC(ips_noproto);
435 			IPSTAT_DEC(ips_delivered);
436 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
437 		} else {
438 			m_freem(m);
439 		}
440 	}
441 	return (IPPROTO_DONE);
442 }
443 
444 /*
445  * Generate IP header and pass packet to ip_output.  Tack on options user may
446  * have setup with control call.
447  */
448 int
449 rip_output(struct mbuf *m, struct socket *so, ...)
450 {
451 	struct epoch_tracker et;
452 	struct ip *ip;
453 	int error;
454 	struct inpcb *inp = sotoinpcb(so);
455 	va_list ap;
456 	u_long dst;
457 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
458 	    IP_ALLOWBROADCAST;
459 	int cnt, hlen;
460 	u_char opttype, optlen, *cp;
461 
462 	va_start(ap, so);
463 	dst = va_arg(ap, u_long);
464 	va_end(ap);
465 
466 	/*
467 	 * If the user handed us a complete IP packet, use it.  Otherwise,
468 	 * allocate an mbuf for a header and fill it in.
469 	 */
470 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
471 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
472 			m_freem(m);
473 			return(EMSGSIZE);
474 		}
475 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
476 		if (m == NULL)
477 			return(ENOBUFS);
478 
479 		INP_RLOCK(inp);
480 		ip = mtod(m, struct ip *);
481 		ip->ip_tos = inp->inp_ip_tos;
482 		if (inp->inp_flags & INP_DONTFRAG)
483 			ip->ip_off = htons(IP_DF);
484 		else
485 			ip->ip_off = htons(0);
486 		ip->ip_p = inp->inp_ip_p;
487 		ip->ip_len = htons(m->m_pkthdr.len);
488 		ip->ip_src = inp->inp_laddr;
489 		ip->ip_dst.s_addr = dst;
490 #ifdef ROUTE_MPATH
491 		if (CALC_FLOWID_OUTBOUND) {
492 			uint32_t hash_type, hash_val;
493 
494 			hash_val = fib4_calc_software_hash(ip->ip_src,
495 			    ip->ip_dst, 0, 0, ip->ip_p, &hash_type);
496 			m->m_pkthdr.flowid = hash_val;
497 			M_HASHTYPE_SET(m, hash_type);
498 			flags |= IP_NODEFAULTFLOWID;
499 		}
500 #endif
501 		if (jailed(inp->inp_cred)) {
502 			/*
503 			 * prison_local_ip4() would be good enough but would
504 			 * let a source of INADDR_ANY pass, which we do not
505 			 * want to see from jails.
506 			 */
507 			if (ip->ip_src.s_addr == INADDR_ANY) {
508 				NET_EPOCH_ENTER(et);
509 				error = in_pcbladdr(inp, &ip->ip_dst,
510 				    &ip->ip_src, inp->inp_cred);
511 				NET_EPOCH_EXIT(et);
512 			} else {
513 				error = prison_local_ip4(inp->inp_cred,
514 				    &ip->ip_src);
515 			}
516 			if (error != 0) {
517 				INP_RUNLOCK(inp);
518 				m_freem(m);
519 				return (error);
520 			}
521 		}
522 		ip->ip_ttl = inp->inp_ip_ttl;
523 	} else {
524 		if (m->m_pkthdr.len > IP_MAXPACKET) {
525 			m_freem(m);
526 			return (EMSGSIZE);
527 		}
528 		if (m->m_pkthdr.len < sizeof(*ip)) {
529 			m_freem(m);
530 			return (EINVAL);
531 		}
532 		m = m_pullup(m, sizeof(*ip));
533 		if (m == NULL)
534 			return (ENOMEM);
535 		ip = mtod(m, struct ip *);
536 		hlen = ip->ip_hl << 2;
537 		if (m->m_len < hlen) {
538 			m = m_pullup(m, hlen);
539 			if (m == NULL)
540 				return (EINVAL);
541 			ip = mtod(m, struct ip *);
542 		}
543 #ifdef ROUTE_MPATH
544 		if (CALC_FLOWID_OUTBOUND) {
545 			uint32_t hash_type, hash_val;
546 
547 			hash_val = fib4_calc_software_hash(ip->ip_dst,
548 			    ip->ip_src, 0, 0, ip->ip_p, &hash_type);
549 			m->m_pkthdr.flowid = hash_val;
550 			M_HASHTYPE_SET(m, hash_type);
551 			flags |= IP_NODEFAULTFLOWID;
552 		}
553 #endif
554 		INP_RLOCK(inp);
555 		/*
556 		 * Don't allow both user specified and setsockopt options,
557 		 * and don't allow packet length sizes that will crash.
558 		 */
559 		if ((hlen < sizeof (*ip))
560 		    || ((hlen > sizeof (*ip)) && inp->inp_options)
561 		    || (ntohs(ip->ip_len) != m->m_pkthdr.len)) {
562 			INP_RUNLOCK(inp);
563 			m_freem(m);
564 			return (EINVAL);
565 		}
566 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
567 		if (error != 0) {
568 			INP_RUNLOCK(inp);
569 			m_freem(m);
570 			return (error);
571 		}
572 		/*
573 		 * Don't allow IP options which do not have the required
574 		 * structure as specified in section 3.1 of RFC 791 on
575 		 * pages 15-23.
576 		 */
577 		cp = (u_char *)(ip + 1);
578 		cnt = hlen - sizeof (struct ip);
579 		for (; cnt > 0; cnt -= optlen, cp += optlen) {
580 			opttype = cp[IPOPT_OPTVAL];
581 			if (opttype == IPOPT_EOL)
582 				break;
583 			if (opttype == IPOPT_NOP) {
584 				optlen = 1;
585 				continue;
586 			}
587 			if (cnt < IPOPT_OLEN + sizeof(u_char)) {
588 				INP_RUNLOCK(inp);
589 				m_freem(m);
590 				return (EINVAL);
591 			}
592 			optlen = cp[IPOPT_OLEN];
593 			if (optlen < IPOPT_OLEN + sizeof(u_char) ||
594 			    optlen > cnt) {
595 				INP_RUNLOCK(inp);
596 				m_freem(m);
597 				return (EINVAL);
598 			}
599 		}
600 		/*
601 		 * This doesn't allow application to specify ID of zero,
602 		 * but we got this limitation from the beginning of history.
603 		 */
604 		if (ip->ip_id == 0)
605 			ip_fillid(ip);
606 
607 		/*
608 		 * XXX prevent ip_output from overwriting header fields.
609 		 */
610 		flags |= IP_RAWOUTPUT;
611 		IPSTAT_INC(ips_rawout);
612 	}
613 
614 	if (inp->inp_flags & INP_ONESBCAST)
615 		flags |= IP_SENDONES;
616 
617 #ifdef MAC
618 	mac_inpcb_create_mbuf(inp, m);
619 #endif
620 
621 	NET_EPOCH_ENTER(et);
622 	error = ip_output(m, inp->inp_options, NULL, flags,
623 	    inp->inp_moptions, inp);
624 	NET_EPOCH_EXIT(et);
625 	INP_RUNLOCK(inp);
626 	return (error);
627 }
628 
629 /*
630  * Raw IP socket option processing.
631  *
632  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
633  * only be created by a privileged process, and as such, socket option
634  * operations to manage system properties on any raw socket were allowed to
635  * take place without explicit additional access control checks.  However,
636  * raw sockets can now also be created in jail(), and therefore explicit
637  * checks are now required.  Likewise, raw sockets can be used by a process
638  * after it gives up privilege, so some caution is required.  For options
639  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
640  * performed in ip_ctloutput() and therefore no check occurs here.
641  * Unilaterally checking priv_check() here breaks normal IP socket option
642  * operations on raw sockets.
643  *
644  * When adding new socket options here, make sure to add access control
645  * checks here as necessary.
646  *
647  * XXX-BZ inp locking?
648  */
649 int
650 rip_ctloutput(struct socket *so, struct sockopt *sopt)
651 {
652 	struct	inpcb *inp = sotoinpcb(so);
653 	int	error, optval;
654 
655 	if (sopt->sopt_level != IPPROTO_IP) {
656 		if ((sopt->sopt_level == SOL_SOCKET) &&
657 		    (sopt->sopt_name == SO_SETFIB)) {
658 			inp->inp_inc.inc_fibnum = so->so_fibnum;
659 			return (0);
660 		}
661 		return (EINVAL);
662 	}
663 
664 	error = 0;
665 	switch (sopt->sopt_dir) {
666 	case SOPT_GET:
667 		switch (sopt->sopt_name) {
668 		case IP_HDRINCL:
669 			optval = inp->inp_flags & INP_HDRINCL;
670 			error = sooptcopyout(sopt, &optval, sizeof optval);
671 			break;
672 
673 		case IP_FW3:	/* generic ipfw v.3 functions */
674 		case IP_FW_ADD:	/* ADD actually returns the body... */
675 		case IP_FW_GET:
676 		case IP_FW_TABLE_GETSIZE:
677 		case IP_FW_TABLE_LIST:
678 		case IP_FW_NAT_GET_CONFIG:
679 		case IP_FW_NAT_GET_LOG:
680 			if (V_ip_fw_ctl_ptr != NULL)
681 				error = V_ip_fw_ctl_ptr(sopt);
682 			else
683 				error = ENOPROTOOPT;
684 			break;
685 
686 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
687 		case IP_DUMMYNET_GET:
688 			if (ip_dn_ctl_ptr != NULL)
689 				error = ip_dn_ctl_ptr(sopt);
690 			else
691 				error = ENOPROTOOPT;
692 			break ;
693 
694 		case MRT_INIT:
695 		case MRT_DONE:
696 		case MRT_ADD_VIF:
697 		case MRT_DEL_VIF:
698 		case MRT_ADD_MFC:
699 		case MRT_DEL_MFC:
700 		case MRT_VERSION:
701 		case MRT_ASSERT:
702 		case MRT_API_SUPPORT:
703 		case MRT_API_CONFIG:
704 		case MRT_ADD_BW_UPCALL:
705 		case MRT_DEL_BW_UPCALL:
706 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
707 			if (error != 0)
708 				return (error);
709 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
710 				EOPNOTSUPP;
711 			break;
712 
713 		default:
714 			error = ip_ctloutput(so, sopt);
715 			break;
716 		}
717 		break;
718 
719 	case SOPT_SET:
720 		switch (sopt->sopt_name) {
721 		case IP_HDRINCL:
722 			error = sooptcopyin(sopt, &optval, sizeof optval,
723 					    sizeof optval);
724 			if (error)
725 				break;
726 			if (optval)
727 				inp->inp_flags |= INP_HDRINCL;
728 			else
729 				inp->inp_flags &= ~INP_HDRINCL;
730 			break;
731 
732 		case IP_FW3:	/* generic ipfw v.3 functions */
733 		case IP_FW_ADD:
734 		case IP_FW_DEL:
735 		case IP_FW_FLUSH:
736 		case IP_FW_ZERO:
737 		case IP_FW_RESETLOG:
738 		case IP_FW_TABLE_ADD:
739 		case IP_FW_TABLE_DEL:
740 		case IP_FW_TABLE_FLUSH:
741 		case IP_FW_NAT_CFG:
742 		case IP_FW_NAT_DEL:
743 			if (V_ip_fw_ctl_ptr != NULL)
744 				error = V_ip_fw_ctl_ptr(sopt);
745 			else
746 				error = ENOPROTOOPT;
747 			break;
748 
749 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
750 		case IP_DUMMYNET_CONFIGURE:
751 		case IP_DUMMYNET_DEL:
752 		case IP_DUMMYNET_FLUSH:
753 			if (ip_dn_ctl_ptr != NULL)
754 				error = ip_dn_ctl_ptr(sopt);
755 			else
756 				error = ENOPROTOOPT ;
757 			break ;
758 
759 		case IP_RSVP_ON:
760 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
761 			if (error != 0)
762 				return (error);
763 			error = ip_rsvp_init(so);
764 			break;
765 
766 		case IP_RSVP_OFF:
767 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
768 			if (error != 0)
769 				return (error);
770 			error = ip_rsvp_done();
771 			break;
772 
773 		case IP_RSVP_VIF_ON:
774 		case IP_RSVP_VIF_OFF:
775 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
776 			if (error != 0)
777 				return (error);
778 			error = ip_rsvp_vif ?
779 				ip_rsvp_vif(so, sopt) : EINVAL;
780 			break;
781 
782 		case MRT_INIT:
783 		case MRT_DONE:
784 		case MRT_ADD_VIF:
785 		case MRT_DEL_VIF:
786 		case MRT_ADD_MFC:
787 		case MRT_DEL_MFC:
788 		case MRT_VERSION:
789 		case MRT_ASSERT:
790 		case MRT_API_SUPPORT:
791 		case MRT_API_CONFIG:
792 		case MRT_ADD_BW_UPCALL:
793 		case MRT_DEL_BW_UPCALL:
794 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
795 			if (error != 0)
796 				return (error);
797 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
798 					EOPNOTSUPP;
799 			break;
800 
801 		default:
802 			error = ip_ctloutput(so, sopt);
803 			break;
804 		}
805 		break;
806 	}
807 
808 	return (error);
809 }
810 
811 /*
812  * This function exists solely to receive the PRC_IFDOWN messages which are
813  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
814  * in_ifadown() to remove all routes corresponding to that address.  It also
815  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
816  * routes.
817  */
818 void
819 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
820 {
821 	struct rm_priotracker in_ifa_tracker;
822 	struct in_ifaddr *ia;
823 	struct ifnet *ifp;
824 	int err;
825 	int flags;
826 
827 	switch (cmd) {
828 	case PRC_IFDOWN:
829 		IN_IFADDR_RLOCK(&in_ifa_tracker);
830 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
831 			if (ia->ia_ifa.ifa_addr == sa
832 			    && (ia->ia_flags & IFA_ROUTE)) {
833 				ifa_ref(&ia->ia_ifa);
834 				IN_IFADDR_RUNLOCK(&in_ifa_tracker);
835 				/*
836 				 * in_scrubprefix() kills the interface route.
837 				 */
838 				in_scrubprefix(ia, 0);
839 				/*
840 				 * in_ifadown gets rid of all the rest of the
841 				 * routes.  This is not quite the right thing
842 				 * to do, but at least if we are running a
843 				 * routing process they will come back.
844 				 */
845 				in_ifadown(&ia->ia_ifa, 0);
846 				ifa_free(&ia->ia_ifa);
847 				break;
848 			}
849 		}
850 		if (ia == NULL)		/* If ia matched, already unlocked. */
851 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
852 		break;
853 
854 	case PRC_IFUP:
855 		IN_IFADDR_RLOCK(&in_ifa_tracker);
856 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
857 			if (ia->ia_ifa.ifa_addr == sa)
858 				break;
859 		}
860 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
861 			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
862 			return;
863 		}
864 		ifa_ref(&ia->ia_ifa);
865 		IN_IFADDR_RUNLOCK(&in_ifa_tracker);
866 		flags = RTF_UP;
867 		ifp = ia->ia_ifa.ifa_ifp;
868 
869 		if ((ifp->if_flags & IFF_LOOPBACK)
870 		    || (ifp->if_flags & IFF_POINTOPOINT))
871 			flags |= RTF_HOST;
872 
873 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
874 
875 		rt_addrmsg(RTM_ADD, &ia->ia_ifa, ia->ia_ifp->if_fib);
876 		err = in_handle_ifaddr_route(RTM_ADD, ia);
877 		if (err == 0)
878 			ia->ia_flags |= IFA_ROUTE;
879 
880 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
881 
882 		ifa_free(&ia->ia_ifa);
883 		break;
884 	}
885 }
886 
887 static int
888 rip_attach(struct socket *so, int proto, struct thread *td)
889 {
890 	struct inpcb *inp;
891 	int error;
892 
893 	inp = sotoinpcb(so);
894 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
895 
896 	error = priv_check(td, PRIV_NETINET_RAW);
897 	if (error)
898 		return (error);
899 	if (proto >= IPPROTO_MAX || proto < 0)
900 		return EPROTONOSUPPORT;
901 	error = soreserve(so, rip_sendspace, rip_recvspace);
902 	if (error)
903 		return (error);
904 	INP_INFO_WLOCK(&V_ripcbinfo);
905 	error = in_pcballoc(so, &V_ripcbinfo);
906 	if (error) {
907 		INP_INFO_WUNLOCK(&V_ripcbinfo);
908 		return (error);
909 	}
910 	inp = (struct inpcb *)so->so_pcb;
911 	inp->inp_vflag |= INP_IPV4;
912 	inp->inp_ip_p = proto;
913 	inp->inp_ip_ttl = V_ip_defttl;
914 	rip_inshash(inp);
915 	INP_INFO_WUNLOCK(&V_ripcbinfo);
916 	INP_WUNLOCK(inp);
917 	return (0);
918 }
919 
920 static void
921 rip_detach(struct socket *so)
922 {
923 	struct inpcb *inp;
924 
925 	inp = sotoinpcb(so);
926 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
927 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
928 	    ("rip_detach: not closed"));
929 
930 	INP_INFO_WLOCK(&V_ripcbinfo);
931 	INP_WLOCK(inp);
932 	rip_delhash(inp);
933 	if (so == V_ip_mrouter && ip_mrouter_done)
934 		ip_mrouter_done();
935 	if (ip_rsvp_force_done)
936 		ip_rsvp_force_done(so);
937 	if (so == V_ip_rsvpd)
938 		ip_rsvp_done();
939 	in_pcbdetach(inp);
940 	in_pcbfree(inp);
941 	INP_INFO_WUNLOCK(&V_ripcbinfo);
942 }
943 
944 static void
945 rip_dodisconnect(struct socket *so, struct inpcb *inp)
946 {
947 	struct inpcbinfo *pcbinfo;
948 
949 	pcbinfo = inp->inp_pcbinfo;
950 	INP_INFO_WLOCK(pcbinfo);
951 	INP_WLOCK(inp);
952 	rip_delhash(inp);
953 	inp->inp_faddr.s_addr = INADDR_ANY;
954 	rip_inshash(inp);
955 	SOCK_LOCK(so);
956 	so->so_state &= ~SS_ISCONNECTED;
957 	SOCK_UNLOCK(so);
958 	INP_WUNLOCK(inp);
959 	INP_INFO_WUNLOCK(pcbinfo);
960 }
961 
962 static void
963 rip_abort(struct socket *so)
964 {
965 	struct inpcb *inp;
966 
967 	inp = sotoinpcb(so);
968 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
969 
970 	rip_dodisconnect(so, inp);
971 }
972 
973 static void
974 rip_close(struct socket *so)
975 {
976 	struct inpcb *inp;
977 
978 	inp = sotoinpcb(so);
979 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
980 
981 	rip_dodisconnect(so, inp);
982 }
983 
984 static int
985 rip_disconnect(struct socket *so)
986 {
987 	struct inpcb *inp;
988 
989 	if ((so->so_state & SS_ISCONNECTED) == 0)
990 		return (ENOTCONN);
991 
992 	inp = sotoinpcb(so);
993 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
994 
995 	rip_dodisconnect(so, inp);
996 	return (0);
997 }
998 
999 static int
1000 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1001 {
1002 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1003 	struct inpcb *inp;
1004 	int error;
1005 
1006 	if (nam->sa_family != AF_INET)
1007 		return (EAFNOSUPPORT);
1008 	if (nam->sa_len != sizeof(*addr))
1009 		return (EINVAL);
1010 
1011 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
1012 	if (error != 0)
1013 		return (error);
1014 
1015 	inp = sotoinpcb(so);
1016 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
1017 
1018 	if (CK_STAILQ_EMPTY(&V_ifnet) ||
1019 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
1020 	    (addr->sin_addr.s_addr &&
1021 	     (inp->inp_flags & INP_BINDANY) == 0 &&
1022 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
1023 		return (EADDRNOTAVAIL);
1024 
1025 	INP_INFO_WLOCK(&V_ripcbinfo);
1026 	INP_WLOCK(inp);
1027 	rip_delhash(inp);
1028 	inp->inp_laddr = addr->sin_addr;
1029 	rip_inshash(inp);
1030 	INP_WUNLOCK(inp);
1031 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1032 	return (0);
1033 }
1034 
1035 static int
1036 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1037 {
1038 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1039 	struct inpcb *inp;
1040 
1041 	if (nam->sa_len != sizeof(*addr))
1042 		return (EINVAL);
1043 	if (CK_STAILQ_EMPTY(&V_ifnet))
1044 		return (EADDRNOTAVAIL);
1045 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
1046 		return (EAFNOSUPPORT);
1047 
1048 	inp = sotoinpcb(so);
1049 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
1050 
1051 	INP_INFO_WLOCK(&V_ripcbinfo);
1052 	INP_WLOCK(inp);
1053 	rip_delhash(inp);
1054 	inp->inp_faddr = addr->sin_addr;
1055 	rip_inshash(inp);
1056 	soisconnected(so);
1057 	INP_WUNLOCK(inp);
1058 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1059 	return (0);
1060 }
1061 
1062 static int
1063 rip_shutdown(struct socket *so)
1064 {
1065 	struct inpcb *inp;
1066 
1067 	inp = sotoinpcb(so);
1068 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1069 
1070 	INP_WLOCK(inp);
1071 	socantsendmore(so);
1072 	INP_WUNLOCK(inp);
1073 	return (0);
1074 }
1075 
1076 static int
1077 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1078     struct mbuf *control, struct thread *td)
1079 {
1080 	struct inpcb *inp;
1081 	u_long dst;
1082 	int error;
1083 
1084 	inp = sotoinpcb(so);
1085 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1086 
1087 	if (control != NULL) {
1088 		m_freem(control);
1089 		control = NULL;
1090 	}
1091 
1092 	/*
1093 	 * Note: 'dst' reads below are unlocked.
1094 	 */
1095 	if (so->so_state & SS_ISCONNECTED) {
1096 		if (nam) {
1097 			error = EISCONN;
1098 			goto release;
1099 		}
1100 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1101 	} else {
1102 		error = 0;
1103 		if (nam == NULL)
1104 			error = ENOTCONN;
1105 		else if (nam->sa_family != AF_INET)
1106 			error = EAFNOSUPPORT;
1107 		else if (nam->sa_len != sizeof(struct sockaddr_in))
1108 			error = EINVAL;
1109 		if (error != 0)
1110 			goto release;
1111 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1112 	}
1113 	return (rip_output(m, so, dst));
1114 
1115 release:
1116 	m_freem(m);
1117 	return (error);
1118 }
1119 #endif /* INET */
1120 
1121 static int
1122 rip_pcblist(SYSCTL_HANDLER_ARGS)
1123 {
1124 	struct xinpgen xig;
1125 	struct epoch_tracker et;
1126 	struct inpcb *inp;
1127 	int error;
1128 
1129 	if (req->newptr != 0)
1130 		return (EPERM);
1131 
1132 	if (req->oldptr == 0) {
1133 		int n;
1134 
1135 		n = V_ripcbinfo.ipi_count;
1136 		n += imax(n / 8, 10);
1137 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1138 		return (0);
1139 	}
1140 
1141 	if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
1142 		return (error);
1143 
1144 	bzero(&xig, sizeof(xig));
1145 	xig.xig_len = sizeof xig;
1146 	xig.xig_count = V_ripcbinfo.ipi_count;
1147 	xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1148 	xig.xig_sogen = so_gencnt;
1149 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1150 	if (error)
1151 		return (error);
1152 
1153 	NET_EPOCH_ENTER(et);
1154 	for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead);
1155 	    inp != NULL;
1156 	    inp = CK_LIST_NEXT(inp, inp_list)) {
1157 		INP_RLOCK(inp);
1158 		if (inp->inp_gencnt <= xig.xig_gen &&
1159 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1160 			struct xinpcb xi;
1161 
1162 			in_pcbtoxinpcb(inp, &xi);
1163 			INP_RUNLOCK(inp);
1164 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1165 			if (error)
1166 				break;
1167 		} else
1168 			INP_RUNLOCK(inp);
1169 	}
1170 	NET_EPOCH_EXIT(et);
1171 
1172 	if (!error) {
1173 		/*
1174 		 * Give the user an updated idea of our state.  If the
1175 		 * generation differs from what we told her before, she knows
1176 		 * that something happened while we were processing this
1177 		 * request, and it might be necessary to retry.
1178 		 */
1179 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1180 		xig.xig_sogen = so_gencnt;
1181 		xig.xig_count = V_ripcbinfo.ipi_count;
1182 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1183 	}
1184 
1185 	return (error);
1186 }
1187 
1188 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1189     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1190     rip_pcblist, "S,xinpcb",
1191     "List of active raw IP sockets");
1192 
1193 #ifdef INET
1194 struct pr_usrreqs rip_usrreqs = {
1195 	.pru_abort =		rip_abort,
1196 	.pru_attach =		rip_attach,
1197 	.pru_bind =		rip_bind,
1198 	.pru_connect =		rip_connect,
1199 	.pru_control =		in_control,
1200 	.pru_detach =		rip_detach,
1201 	.pru_disconnect =	rip_disconnect,
1202 	.pru_peeraddr =		in_getpeeraddr,
1203 	.pru_send =		rip_send,
1204 	.pru_shutdown =		rip_shutdown,
1205 	.pru_sockaddr =		in_getsockaddr,
1206 	.pru_sosetlabel =	in_pcbsosetlabel,
1207 	.pru_close =		rip_close,
1208 };
1209 #endif /* INET */
1210