xref: /freebsd/sys/netinet/raw_ip.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1988, 1993
5  *	The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_inet.h"
39 #include "opt_inet6.h"
40 #include "opt_ipsec.h"
41 #include "opt_route.h"
42 
43 #include <sys/param.h>
44 #include <sys/jail.h>
45 #include <sys/kernel.h>
46 #include <sys/eventhandler.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/protosw.h>
53 #include <sys/rwlock.h>
54 #include <sys/signalvar.h>
55 #include <sys/socket.h>
56 #include <sys/socketvar.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/systm.h>
60 
61 #include <vm/uma.h>
62 
63 #include <net/if.h>
64 #include <net/if_var.h>
65 #include <net/route.h>
66 #include <net/route/route_ctl.h>
67 #include <net/vnet.h>
68 
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/in_fib.h>
72 #include <netinet/in_pcb.h>
73 #include <netinet/in_var.h>
74 #include <netinet/if_ether.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip_var.h>
77 #include <netinet/ip_mroute.h>
78 #include <netinet/ip_icmp.h>
79 
80 #include <netipsec/ipsec_support.h>
81 
82 #include <machine/stdarg.h>
83 #include <security/mac/mac_framework.h>
84 
85 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
86 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
87     &VNET_NAME(ip_defttl), 0,
88     "Maximum TTL on IP packets");
89 
90 VNET_DEFINE(struct inpcbhead, ripcb);
91 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
92 
93 #define	V_ripcb			VNET(ripcb)
94 #define	V_ripcbinfo		VNET(ripcbinfo)
95 
96 /*
97  * Control and data hooks for ipfw, dummynet, divert and so on.
98  * The data hooks are not used here but it is convenient
99  * to keep them all in one place.
100  */
101 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
102 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
103 
104 int	(*ip_dn_ctl_ptr)(struct sockopt *);
105 int	(*ip_dn_io_ptr)(struct mbuf **, struct ip_fw_args *);
106 void	(*ip_divert_ptr)(struct mbuf *, bool);
107 int	(*ng_ipfw_input_p)(struct mbuf **, struct ip_fw_args *, bool);
108 
109 #ifdef INET
110 /*
111  * Hooks for multicast routing. They all default to NULL, so leave them not
112  * initialized and rely on BSS being set to 0.
113  */
114 
115 /*
116  * The socket used to communicate with the multicast routing daemon.
117  */
118 VNET_DEFINE(struct socket *, ip_mrouter);
119 
120 /*
121  * The various mrouter and rsvp functions.
122  */
123 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
124 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
125 int (*ip_mrouter_done)(void);
126 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
127 		   struct ip_moptions *);
128 int (*mrt_ioctl)(u_long, caddr_t, int);
129 int (*legal_vif_num)(int);
130 u_long (*ip_mcast_src)(int);
131 
132 int (*rsvp_input_p)(struct mbuf **, int *, int);
133 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
134 void (*ip_rsvp_force_done)(struct socket *);
135 #endif /* INET */
136 
137 extern	struct protosw inetsw[];
138 
139 u_long	rip_sendspace = 9216;
140 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
141     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
142 
143 u_long	rip_recvspace = 9216;
144 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
145     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
146 
147 /*
148  * Hash functions
149  */
150 
151 #define INP_PCBHASH_RAW_SIZE	256
152 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
153         (((proto) + (laddr) + (faddr)) % (mask) + 1)
154 
155 #ifdef INET
156 static void
157 rip_inshash(struct inpcb *inp)
158 {
159 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
160 	struct inpcbhead *pcbhash;
161 	int hash;
162 
163 	INP_INFO_WLOCK_ASSERT(pcbinfo);
164 	INP_WLOCK_ASSERT(inp);
165 
166 	if (inp->inp_ip_p != 0 &&
167 	    inp->inp_laddr.s_addr != INADDR_ANY &&
168 	    inp->inp_faddr.s_addr != INADDR_ANY) {
169 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
170 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
171 	} else
172 		hash = 0;
173 	pcbhash = &pcbinfo->ipi_hashbase[hash];
174 	CK_LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
175 }
176 
177 static void
178 rip_delhash(struct inpcb *inp)
179 {
180 
181 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
182 	INP_WLOCK_ASSERT(inp);
183 
184 	CK_LIST_REMOVE(inp, inp_hash);
185 }
186 #endif /* INET */
187 
188 /*
189  * Raw interface to IP protocol.
190  */
191 
192 /*
193  * Initialize raw connection block q.
194  */
195 static void
196 rip_zone_change(void *tag)
197 {
198 
199 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
200 }
201 
202 static int
203 rip_inpcb_init(void *mem, int size, int flags)
204 {
205 	struct inpcb *inp = mem;
206 
207 	INP_LOCK_INIT(inp, "inp", "rawinp");
208 	return (0);
209 }
210 
211 void
212 rip_init(void)
213 {
214 
215 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
216 	    1, "ripcb", rip_inpcb_init, IPI_HASHFIELDS_NONE);
217 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
218 	    EVENTHANDLER_PRI_ANY);
219 }
220 
221 #ifdef VIMAGE
222 static void
223 rip_destroy(void *unused __unused)
224 {
225 
226 	in_pcbinfo_destroy(&V_ripcbinfo);
227 }
228 VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
229 #endif
230 
231 #ifdef INET
232 static int
233 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
234     struct sockaddr_in *ripsrc)
235 {
236 	int policyfail = 0;
237 
238 	INP_LOCK_ASSERT(last);
239 
240 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
241 	/* check AH/ESP integrity. */
242 	if (IPSEC_ENABLED(ipv4)) {
243 		if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
244 			policyfail = 1;
245 	}
246 #endif /* IPSEC */
247 #ifdef MAC
248 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
249 		policyfail = 1;
250 #endif
251 	/* Check the minimum TTL for socket. */
252 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
253 		policyfail = 1;
254 	if (!policyfail) {
255 		struct mbuf *opts = NULL;
256 		struct socket *so;
257 
258 		so = last->inp_socket;
259 		if ((last->inp_flags & INP_CONTROLOPTS) ||
260 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
261 			ip_savecontrol(last, &opts, ip, n);
262 		SOCKBUF_LOCK(&so->so_rcv);
263 		if (sbappendaddr_locked(&so->so_rcv,
264 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
265 			soroverflow_locked(so);
266 			m_freem(n);
267 			if (opts)
268 				m_freem(opts);
269 		} else
270 			sorwakeup_locked(so);
271 	} else
272 		m_freem(n);
273 	return (policyfail);
274 }
275 
276 /*
277  * Setup generic address and protocol structures for raw_input routine, then
278  * pass them along with mbuf chain.
279  */
280 int
281 rip_input(struct mbuf **mp, int *offp, int proto)
282 {
283 	struct ifnet *ifp;
284 	struct mbuf *m = *mp;
285 	struct ip *ip = mtod(m, struct ip *);
286 	struct inpcb *inp, *last;
287 	struct sockaddr_in ripsrc;
288 	int hash;
289 
290 	NET_EPOCH_ASSERT();
291 
292 	*mp = NULL;
293 
294 	bzero(&ripsrc, sizeof(ripsrc));
295 	ripsrc.sin_len = sizeof(ripsrc);
296 	ripsrc.sin_family = AF_INET;
297 	ripsrc.sin_addr = ip->ip_src;
298 	last = NULL;
299 
300 	ifp = m->m_pkthdr.rcvif;
301 
302 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
303 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
304 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
305 		if (inp->inp_ip_p != proto)
306 			continue;
307 #ifdef INET6
308 		/* XXX inp locking */
309 		if ((inp->inp_vflag & INP_IPV4) == 0)
310 			continue;
311 #endif
312 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
313 			continue;
314 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
315 			continue;
316 		if (last != NULL) {
317 			struct mbuf *n;
318 
319 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
320 			if (n != NULL)
321 			    (void) rip_append(last, ip, n, &ripsrc);
322 			/* XXX count dropped packet */
323 			INP_RUNLOCK(last);
324 			last = NULL;
325 		}
326 		INP_RLOCK(inp);
327 		if (__predict_false(inp->inp_flags2 & INP_FREED))
328 			goto skip_1;
329 		if (jailed_without_vnet(inp->inp_cred)) {
330 			/*
331 			 * XXX: If faddr was bound to multicast group,
332 			 * jailed raw socket will drop datagram.
333 			 */
334 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
335 				goto skip_1;
336 		}
337 		last = inp;
338 		continue;
339 	skip_1:
340 		INP_RUNLOCK(inp);
341 	}
342 	CK_LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
343 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
344 			continue;
345 #ifdef INET6
346 		/* XXX inp locking */
347 		if ((inp->inp_vflag & INP_IPV4) == 0)
348 			continue;
349 #endif
350 		if (!in_nullhost(inp->inp_laddr) &&
351 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
352 			continue;
353 		if (!in_nullhost(inp->inp_faddr) &&
354 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
355 			continue;
356 		if (last != NULL) {
357 			struct mbuf *n;
358 
359 			n = m_copym(m, 0, M_COPYALL, M_NOWAIT);
360 			if (n != NULL)
361 				(void) rip_append(last, ip, n, &ripsrc);
362 			/* XXX count dropped packet */
363 			INP_RUNLOCK(last);
364 			last = NULL;
365 		}
366 		INP_RLOCK(inp);
367 		if (__predict_false(inp->inp_flags2 & INP_FREED))
368 			goto skip_2;
369 		if (jailed_without_vnet(inp->inp_cred)) {
370 			/*
371 			 * Allow raw socket in jail to receive multicast;
372 			 * assume process had PRIV_NETINET_RAW at attach,
373 			 * and fall through into normal filter path if so.
374 			 */
375 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
376 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
377 				goto skip_2;
378 		}
379 		/*
380 		 * If this raw socket has multicast state, and we
381 		 * have received a multicast, check if this socket
382 		 * should receive it, as multicast filtering is now
383 		 * the responsibility of the transport layer.
384 		 */
385 		if (inp->inp_moptions != NULL &&
386 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
387 			/*
388 			 * If the incoming datagram is for IGMP, allow it
389 			 * through unconditionally to the raw socket.
390 			 *
391 			 * In the case of IGMPv2, we may not have explicitly
392 			 * joined the group, and may have set IFF_ALLMULTI
393 			 * on the interface. imo_multi_filter() may discard
394 			 * control traffic we actually need to see.
395 			 *
396 			 * Userland multicast routing daemons should continue
397 			 * filter the control traffic appropriately.
398 			 */
399 			int blocked;
400 
401 			blocked = MCAST_PASS;
402 			if (proto != IPPROTO_IGMP) {
403 				struct sockaddr_in group;
404 
405 				bzero(&group, sizeof(struct sockaddr_in));
406 				group.sin_len = sizeof(struct sockaddr_in);
407 				group.sin_family = AF_INET;
408 				group.sin_addr = ip->ip_dst;
409 
410 				blocked = imo_multi_filter(inp->inp_moptions,
411 				    ifp,
412 				    (struct sockaddr *)&group,
413 				    (struct sockaddr *)&ripsrc);
414 			}
415 
416 			if (blocked != MCAST_PASS) {
417 				IPSTAT_INC(ips_notmember);
418 				goto skip_2;
419 			}
420 		}
421 		last = inp;
422 		continue;
423 	skip_2:
424 		INP_RUNLOCK(inp);
425 	}
426 	if (last != NULL) {
427 		if (rip_append(last, ip, m, &ripsrc) != 0)
428 			IPSTAT_INC(ips_delivered);
429 		INP_RUNLOCK(last);
430 	} else {
431 		if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
432 			IPSTAT_INC(ips_noproto);
433 			IPSTAT_DEC(ips_delivered);
434 			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
435 		} else {
436 			m_freem(m);
437 		}
438 	}
439 	return (IPPROTO_DONE);
440 }
441 
442 /*
443  * Generate IP header and pass packet to ip_output.  Tack on options user may
444  * have setup with control call.
445  */
446 int
447 rip_output(struct mbuf *m, struct socket *so, ...)
448 {
449 	struct epoch_tracker et;
450 	struct ip *ip;
451 	int error;
452 	struct inpcb *inp = sotoinpcb(so);
453 	va_list ap;
454 	u_long dst;
455 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
456 	    IP_ALLOWBROADCAST;
457 	int cnt, hlen;
458 	u_char opttype, optlen, *cp;
459 
460 	va_start(ap, so);
461 	dst = va_arg(ap, u_long);
462 	va_end(ap);
463 
464 	/*
465 	 * If the user handed us a complete IP packet, use it.  Otherwise,
466 	 * allocate an mbuf for a header and fill it in.
467 	 */
468 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
469 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
470 			m_freem(m);
471 			return(EMSGSIZE);
472 		}
473 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
474 		if (m == NULL)
475 			return(ENOBUFS);
476 
477 		INP_RLOCK(inp);
478 		ip = mtod(m, struct ip *);
479 		ip->ip_tos = inp->inp_ip_tos;
480 		if (inp->inp_flags & INP_DONTFRAG)
481 			ip->ip_off = htons(IP_DF);
482 		else
483 			ip->ip_off = htons(0);
484 		ip->ip_p = inp->inp_ip_p;
485 		ip->ip_len = htons(m->m_pkthdr.len);
486 		ip->ip_src = inp->inp_laddr;
487 		ip->ip_dst.s_addr = dst;
488 #ifdef ROUTE_MPATH
489 		if (CALC_FLOWID_OUTBOUND) {
490 			uint32_t hash_type, hash_val;
491 
492 			hash_val = fib4_calc_software_hash(ip->ip_src,
493 			    ip->ip_dst, 0, 0, ip->ip_p, &hash_type);
494 			m->m_pkthdr.flowid = hash_val;
495 			M_HASHTYPE_SET(m, hash_type);
496 			flags |= IP_NODEFAULTFLOWID;
497 		}
498 #endif
499 		if (jailed(inp->inp_cred)) {
500 			/*
501 			 * prison_local_ip4() would be good enough but would
502 			 * let a source of INADDR_ANY pass, which we do not
503 			 * want to see from jails.
504 			 */
505 			if (ip->ip_src.s_addr == INADDR_ANY) {
506 				NET_EPOCH_ENTER(et);
507 				error = in_pcbladdr(inp, &ip->ip_dst,
508 				    &ip->ip_src, inp->inp_cred);
509 				NET_EPOCH_EXIT(et);
510 			} else {
511 				error = prison_local_ip4(inp->inp_cred,
512 				    &ip->ip_src);
513 			}
514 			if (error != 0) {
515 				INP_RUNLOCK(inp);
516 				m_freem(m);
517 				return (error);
518 			}
519 		}
520 		ip->ip_ttl = inp->inp_ip_ttl;
521 	} else {
522 		if (m->m_pkthdr.len > IP_MAXPACKET) {
523 			m_freem(m);
524 			return (EMSGSIZE);
525 		}
526 		if (m->m_pkthdr.len < sizeof(*ip)) {
527 			m_freem(m);
528 			return (EINVAL);
529 		}
530 		m = m_pullup(m, sizeof(*ip));
531 		if (m == NULL)
532 			return (ENOMEM);
533 		ip = mtod(m, struct ip *);
534 		hlen = ip->ip_hl << 2;
535 		if (m->m_len < hlen) {
536 			m = m_pullup(m, hlen);
537 			if (m == NULL)
538 				return (EINVAL);
539 			ip = mtod(m, struct ip *);
540 		}
541 #ifdef ROUTE_MPATH
542 		if (CALC_FLOWID_OUTBOUND) {
543 			uint32_t hash_type, hash_val;
544 
545 			hash_val = fib4_calc_software_hash(ip->ip_dst,
546 			    ip->ip_src, 0, 0, ip->ip_p, &hash_type);
547 			m->m_pkthdr.flowid = hash_val;
548 			M_HASHTYPE_SET(m, hash_type);
549 			flags |= IP_NODEFAULTFLOWID;
550 		}
551 #endif
552 		INP_RLOCK(inp);
553 		/*
554 		 * Don't allow both user specified and setsockopt options,
555 		 * and don't allow packet length sizes that will crash.
556 		 */
557 		if ((hlen < sizeof (*ip))
558 		    || ((hlen > sizeof (*ip)) && inp->inp_options)
559 		    || (ntohs(ip->ip_len) != m->m_pkthdr.len)) {
560 			INP_RUNLOCK(inp);
561 			m_freem(m);
562 			return (EINVAL);
563 		}
564 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
565 		if (error != 0) {
566 			INP_RUNLOCK(inp);
567 			m_freem(m);
568 			return (error);
569 		}
570 		/*
571 		 * Don't allow IP options which do not have the required
572 		 * structure as specified in section 3.1 of RFC 791 on
573 		 * pages 15-23.
574 		 */
575 		cp = (u_char *)(ip + 1);
576 		cnt = hlen - sizeof (struct ip);
577 		for (; cnt > 0; cnt -= optlen, cp += optlen) {
578 			opttype = cp[IPOPT_OPTVAL];
579 			if (opttype == IPOPT_EOL)
580 				break;
581 			if (opttype == IPOPT_NOP) {
582 				optlen = 1;
583 				continue;
584 			}
585 			if (cnt < IPOPT_OLEN + sizeof(u_char)) {
586 				INP_RUNLOCK(inp);
587 				m_freem(m);
588 				return (EINVAL);
589 			}
590 			optlen = cp[IPOPT_OLEN];
591 			if (optlen < IPOPT_OLEN + sizeof(u_char) ||
592 			    optlen > cnt) {
593 				INP_RUNLOCK(inp);
594 				m_freem(m);
595 				return (EINVAL);
596 			}
597 		}
598 		/*
599 		 * This doesn't allow application to specify ID of zero,
600 		 * but we got this limitation from the beginning of history.
601 		 */
602 		if (ip->ip_id == 0)
603 			ip_fillid(ip);
604 
605 		/*
606 		 * XXX prevent ip_output from overwriting header fields.
607 		 */
608 		flags |= IP_RAWOUTPUT;
609 		IPSTAT_INC(ips_rawout);
610 	}
611 
612 	if (inp->inp_flags & INP_ONESBCAST)
613 		flags |= IP_SENDONES;
614 
615 #ifdef MAC
616 	mac_inpcb_create_mbuf(inp, m);
617 #endif
618 
619 	NET_EPOCH_ENTER(et);
620 	error = ip_output(m, inp->inp_options, NULL, flags,
621 	    inp->inp_moptions, inp);
622 	NET_EPOCH_EXIT(et);
623 	INP_RUNLOCK(inp);
624 	return (error);
625 }
626 
627 /*
628  * Raw IP socket option processing.
629  *
630  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
631  * only be created by a privileged process, and as such, socket option
632  * operations to manage system properties on any raw socket were allowed to
633  * take place without explicit additional access control checks.  However,
634  * raw sockets can now also be created in jail(), and therefore explicit
635  * checks are now required.  Likewise, raw sockets can be used by a process
636  * after it gives up privilege, so some caution is required.  For options
637  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
638  * performed in ip_ctloutput() and therefore no check occurs here.
639  * Unilaterally checking priv_check() here breaks normal IP socket option
640  * operations on raw sockets.
641  *
642  * When adding new socket options here, make sure to add access control
643  * checks here as necessary.
644  *
645  * XXX-BZ inp locking?
646  */
647 int
648 rip_ctloutput(struct socket *so, struct sockopt *sopt)
649 {
650 	struct	inpcb *inp = sotoinpcb(so);
651 	int	error, optval;
652 
653 	if (sopt->sopt_level != IPPROTO_IP) {
654 		if ((sopt->sopt_level == SOL_SOCKET) &&
655 		    (sopt->sopt_name == SO_SETFIB)) {
656 			inp->inp_inc.inc_fibnum = so->so_fibnum;
657 			return (0);
658 		}
659 		return (EINVAL);
660 	}
661 
662 	error = 0;
663 	switch (sopt->sopt_dir) {
664 	case SOPT_GET:
665 		switch (sopt->sopt_name) {
666 		case IP_HDRINCL:
667 			optval = inp->inp_flags & INP_HDRINCL;
668 			error = sooptcopyout(sopt, &optval, sizeof optval);
669 			break;
670 
671 		case IP_FW3:	/* generic ipfw v.3 functions */
672 		case IP_FW_ADD:	/* ADD actually returns the body... */
673 		case IP_FW_GET:
674 		case IP_FW_TABLE_GETSIZE:
675 		case IP_FW_TABLE_LIST:
676 		case IP_FW_NAT_GET_CONFIG:
677 		case IP_FW_NAT_GET_LOG:
678 			if (V_ip_fw_ctl_ptr != NULL)
679 				error = V_ip_fw_ctl_ptr(sopt);
680 			else
681 				error = ENOPROTOOPT;
682 			break;
683 
684 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
685 		case IP_DUMMYNET_GET:
686 			if (ip_dn_ctl_ptr != NULL)
687 				error = ip_dn_ctl_ptr(sopt);
688 			else
689 				error = ENOPROTOOPT;
690 			break ;
691 
692 		case MRT_INIT:
693 		case MRT_DONE:
694 		case MRT_ADD_VIF:
695 		case MRT_DEL_VIF:
696 		case MRT_ADD_MFC:
697 		case MRT_DEL_MFC:
698 		case MRT_VERSION:
699 		case MRT_ASSERT:
700 		case MRT_API_SUPPORT:
701 		case MRT_API_CONFIG:
702 		case MRT_ADD_BW_UPCALL:
703 		case MRT_DEL_BW_UPCALL:
704 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
705 			if (error != 0)
706 				return (error);
707 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
708 				EOPNOTSUPP;
709 			break;
710 
711 		default:
712 			error = ip_ctloutput(so, sopt);
713 			break;
714 		}
715 		break;
716 
717 	case SOPT_SET:
718 		switch (sopt->sopt_name) {
719 		case IP_HDRINCL:
720 			error = sooptcopyin(sopt, &optval, sizeof optval,
721 					    sizeof optval);
722 			if (error)
723 				break;
724 			if (optval)
725 				inp->inp_flags |= INP_HDRINCL;
726 			else
727 				inp->inp_flags &= ~INP_HDRINCL;
728 			break;
729 
730 		case IP_FW3:	/* generic ipfw v.3 functions */
731 		case IP_FW_ADD:
732 		case IP_FW_DEL:
733 		case IP_FW_FLUSH:
734 		case IP_FW_ZERO:
735 		case IP_FW_RESETLOG:
736 		case IP_FW_TABLE_ADD:
737 		case IP_FW_TABLE_DEL:
738 		case IP_FW_TABLE_FLUSH:
739 		case IP_FW_NAT_CFG:
740 		case IP_FW_NAT_DEL:
741 			if (V_ip_fw_ctl_ptr != NULL)
742 				error = V_ip_fw_ctl_ptr(sopt);
743 			else
744 				error = ENOPROTOOPT;
745 			break;
746 
747 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
748 		case IP_DUMMYNET_CONFIGURE:
749 		case IP_DUMMYNET_DEL:
750 		case IP_DUMMYNET_FLUSH:
751 			if (ip_dn_ctl_ptr != NULL)
752 				error = ip_dn_ctl_ptr(sopt);
753 			else
754 				error = ENOPROTOOPT ;
755 			break ;
756 
757 		case IP_RSVP_ON:
758 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
759 			if (error != 0)
760 				return (error);
761 			error = ip_rsvp_init(so);
762 			break;
763 
764 		case IP_RSVP_OFF:
765 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
766 			if (error != 0)
767 				return (error);
768 			error = ip_rsvp_done();
769 			break;
770 
771 		case IP_RSVP_VIF_ON:
772 		case IP_RSVP_VIF_OFF:
773 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
774 			if (error != 0)
775 				return (error);
776 			error = ip_rsvp_vif ?
777 				ip_rsvp_vif(so, sopt) : EINVAL;
778 			break;
779 
780 		case MRT_INIT:
781 		case MRT_DONE:
782 		case MRT_ADD_VIF:
783 		case MRT_DEL_VIF:
784 		case MRT_ADD_MFC:
785 		case MRT_DEL_MFC:
786 		case MRT_VERSION:
787 		case MRT_ASSERT:
788 		case MRT_API_SUPPORT:
789 		case MRT_API_CONFIG:
790 		case MRT_ADD_BW_UPCALL:
791 		case MRT_DEL_BW_UPCALL:
792 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
793 			if (error != 0)
794 				return (error);
795 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
796 					EOPNOTSUPP;
797 			break;
798 
799 		default:
800 			error = ip_ctloutput(so, sopt);
801 			break;
802 		}
803 		break;
804 	}
805 
806 	return (error);
807 }
808 
809 /*
810  * This function exists solely to receive the PRC_IFDOWN messages which are
811  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
812  * in_ifadown() to remove all routes corresponding to that address.  It also
813  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
814  * routes.
815  */
816 void
817 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
818 {
819 	struct in_ifaddr *ia;
820 	struct ifnet *ifp;
821 	int err;
822 	int flags;
823 
824 	NET_EPOCH_ASSERT();
825 
826 	switch (cmd) {
827 	case PRC_IFDOWN:
828 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
829 			if (ia->ia_ifa.ifa_addr == sa
830 			    && (ia->ia_flags & IFA_ROUTE)) {
831 				ifa_ref(&ia->ia_ifa);
832 				/*
833 				 * in_scrubprefix() kills the interface route.
834 				 */
835 				in_scrubprefix(ia, 0);
836 				/*
837 				 * in_ifadown gets rid of all the rest of the
838 				 * routes.  This is not quite the right thing
839 				 * to do, but at least if we are running a
840 				 * routing process they will come back.
841 				 */
842 				in_ifadown(&ia->ia_ifa, 0);
843 				ifa_free(&ia->ia_ifa);
844 				break;
845 			}
846 		}
847 		break;
848 
849 	case PRC_IFUP:
850 		CK_STAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
851 			if (ia->ia_ifa.ifa_addr == sa)
852 				break;
853 		}
854 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE))
855 			return;
856 		ifa_ref(&ia->ia_ifa);
857 		flags = RTF_UP;
858 		ifp = ia->ia_ifa.ifa_ifp;
859 
860 		if ((ifp->if_flags & IFF_LOOPBACK)
861 		    || (ifp->if_flags & IFF_POINTOPOINT))
862 			flags |= RTF_HOST;
863 
864 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
865 
866 		rt_addrmsg(RTM_ADD, &ia->ia_ifa, ia->ia_ifp->if_fib);
867 		err = in_handle_ifaddr_route(RTM_ADD, ia);
868 		if (err == 0)
869 			ia->ia_flags |= IFA_ROUTE;
870 
871 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
872 
873 		ifa_free(&ia->ia_ifa);
874 		break;
875 #if defined(IPSEC) || defined(IPSEC_SUPPORT)
876 	case PRC_MSGSIZE:
877 		if (IPSEC_ENABLED(ipv4))
878 			IPSEC_CTLINPUT(ipv4, cmd, sa, vip);
879 		break;
880 #endif
881 	}
882 }
883 
884 static int
885 rip_attach(struct socket *so, int proto, struct thread *td)
886 {
887 	struct inpcb *inp;
888 	int error;
889 
890 	inp = sotoinpcb(so);
891 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
892 
893 	error = priv_check(td, PRIV_NETINET_RAW);
894 	if (error)
895 		return (error);
896 	if (proto >= IPPROTO_MAX || proto < 0)
897 		return EPROTONOSUPPORT;
898 	error = soreserve(so, rip_sendspace, rip_recvspace);
899 	if (error)
900 		return (error);
901 	INP_INFO_WLOCK(&V_ripcbinfo);
902 	error = in_pcballoc(so, &V_ripcbinfo);
903 	if (error) {
904 		INP_INFO_WUNLOCK(&V_ripcbinfo);
905 		return (error);
906 	}
907 	inp = (struct inpcb *)so->so_pcb;
908 	inp->inp_vflag |= INP_IPV4;
909 	inp->inp_ip_p = proto;
910 	inp->inp_ip_ttl = V_ip_defttl;
911 	rip_inshash(inp);
912 	INP_INFO_WUNLOCK(&V_ripcbinfo);
913 	INP_WUNLOCK(inp);
914 	return (0);
915 }
916 
917 static void
918 rip_detach(struct socket *so)
919 {
920 	struct inpcb *inp;
921 
922 	inp = sotoinpcb(so);
923 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
924 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
925 	    ("rip_detach: not closed"));
926 
927 	INP_INFO_WLOCK(&V_ripcbinfo);
928 	INP_WLOCK(inp);
929 	rip_delhash(inp);
930 	if (so == V_ip_mrouter && ip_mrouter_done)
931 		ip_mrouter_done();
932 	if (ip_rsvp_force_done)
933 		ip_rsvp_force_done(so);
934 	if (so == V_ip_rsvpd)
935 		ip_rsvp_done();
936 	in_pcbdetach(inp);
937 	in_pcbfree(inp);
938 	INP_INFO_WUNLOCK(&V_ripcbinfo);
939 }
940 
941 static void
942 rip_dodisconnect(struct socket *so, struct inpcb *inp)
943 {
944 	struct inpcbinfo *pcbinfo;
945 
946 	pcbinfo = inp->inp_pcbinfo;
947 	INP_INFO_WLOCK(pcbinfo);
948 	INP_WLOCK(inp);
949 	rip_delhash(inp);
950 	inp->inp_faddr.s_addr = INADDR_ANY;
951 	rip_inshash(inp);
952 	SOCK_LOCK(so);
953 	so->so_state &= ~SS_ISCONNECTED;
954 	SOCK_UNLOCK(so);
955 	INP_WUNLOCK(inp);
956 	INP_INFO_WUNLOCK(pcbinfo);
957 }
958 
959 static void
960 rip_abort(struct socket *so)
961 {
962 	struct inpcb *inp;
963 
964 	inp = sotoinpcb(so);
965 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
966 
967 	rip_dodisconnect(so, inp);
968 }
969 
970 static void
971 rip_close(struct socket *so)
972 {
973 	struct inpcb *inp;
974 
975 	inp = sotoinpcb(so);
976 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
977 
978 	rip_dodisconnect(so, inp);
979 }
980 
981 static int
982 rip_disconnect(struct socket *so)
983 {
984 	struct inpcb *inp;
985 
986 	if ((so->so_state & SS_ISCONNECTED) == 0)
987 		return (ENOTCONN);
988 
989 	inp = sotoinpcb(so);
990 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
991 
992 	rip_dodisconnect(so, inp);
993 	return (0);
994 }
995 
996 static int
997 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
998 {
999 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1000 	struct inpcb *inp;
1001 	int error;
1002 
1003 	if (nam->sa_family != AF_INET)
1004 		return (EAFNOSUPPORT);
1005 	if (nam->sa_len != sizeof(*addr))
1006 		return (EINVAL);
1007 
1008 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
1009 	if (error != 0)
1010 		return (error);
1011 
1012 	inp = sotoinpcb(so);
1013 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
1014 
1015 	if (CK_STAILQ_EMPTY(&V_ifnet) ||
1016 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
1017 	    (addr->sin_addr.s_addr &&
1018 	     (inp->inp_flags & INP_BINDANY) == 0 &&
1019 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
1020 		return (EADDRNOTAVAIL);
1021 
1022 	INP_INFO_WLOCK(&V_ripcbinfo);
1023 	INP_WLOCK(inp);
1024 	rip_delhash(inp);
1025 	inp->inp_laddr = addr->sin_addr;
1026 	rip_inshash(inp);
1027 	INP_WUNLOCK(inp);
1028 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1029 	return (0);
1030 }
1031 
1032 static int
1033 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1034 {
1035 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
1036 	struct inpcb *inp;
1037 
1038 	if (nam->sa_len != sizeof(*addr))
1039 		return (EINVAL);
1040 	if (CK_STAILQ_EMPTY(&V_ifnet))
1041 		return (EADDRNOTAVAIL);
1042 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
1043 		return (EAFNOSUPPORT);
1044 
1045 	inp = sotoinpcb(so);
1046 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
1047 
1048 	INP_INFO_WLOCK(&V_ripcbinfo);
1049 	INP_WLOCK(inp);
1050 	rip_delhash(inp);
1051 	inp->inp_faddr = addr->sin_addr;
1052 	rip_inshash(inp);
1053 	soisconnected(so);
1054 	INP_WUNLOCK(inp);
1055 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1056 	return (0);
1057 }
1058 
1059 static int
1060 rip_shutdown(struct socket *so)
1061 {
1062 	struct inpcb *inp;
1063 
1064 	inp = sotoinpcb(so);
1065 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1066 
1067 	INP_WLOCK(inp);
1068 	socantsendmore(so);
1069 	INP_WUNLOCK(inp);
1070 	return (0);
1071 }
1072 
1073 static int
1074 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1075     struct mbuf *control, struct thread *td)
1076 {
1077 	struct inpcb *inp;
1078 	u_long dst;
1079 	int error;
1080 
1081 	inp = sotoinpcb(so);
1082 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1083 
1084 	if (control != NULL) {
1085 		m_freem(control);
1086 		control = NULL;
1087 	}
1088 
1089 	/*
1090 	 * Note: 'dst' reads below are unlocked.
1091 	 */
1092 	if (so->so_state & SS_ISCONNECTED) {
1093 		if (nam) {
1094 			error = EISCONN;
1095 			goto release;
1096 		}
1097 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1098 	} else {
1099 		error = 0;
1100 		if (nam == NULL)
1101 			error = ENOTCONN;
1102 		else if (nam->sa_family != AF_INET)
1103 			error = EAFNOSUPPORT;
1104 		else if (nam->sa_len != sizeof(struct sockaddr_in))
1105 			error = EINVAL;
1106 		if (error != 0)
1107 			goto release;
1108 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1109 	}
1110 	return (rip_output(m, so, dst));
1111 
1112 release:
1113 	m_freem(m);
1114 	return (error);
1115 }
1116 #endif /* INET */
1117 
1118 static int
1119 rip_pcblist(SYSCTL_HANDLER_ARGS)
1120 {
1121 	struct xinpgen xig;
1122 	struct epoch_tracker et;
1123 	struct inpcb *inp;
1124 	int error;
1125 
1126 	if (req->newptr != 0)
1127 		return (EPERM);
1128 
1129 	if (req->oldptr == 0) {
1130 		int n;
1131 
1132 		n = V_ripcbinfo.ipi_count;
1133 		n += imax(n / 8, 10);
1134 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1135 		return (0);
1136 	}
1137 
1138 	if ((error = sysctl_wire_old_buffer(req, 0)) != 0)
1139 		return (error);
1140 
1141 	bzero(&xig, sizeof(xig));
1142 	xig.xig_len = sizeof xig;
1143 	xig.xig_count = V_ripcbinfo.ipi_count;
1144 	xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1145 	xig.xig_sogen = so_gencnt;
1146 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1147 	if (error)
1148 		return (error);
1149 
1150 	NET_EPOCH_ENTER(et);
1151 	for (inp = CK_LIST_FIRST(V_ripcbinfo.ipi_listhead);
1152 	    inp != NULL;
1153 	    inp = CK_LIST_NEXT(inp, inp_list)) {
1154 		INP_RLOCK(inp);
1155 		if (inp->inp_gencnt <= xig.xig_gen &&
1156 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1157 			struct xinpcb xi;
1158 
1159 			in_pcbtoxinpcb(inp, &xi);
1160 			INP_RUNLOCK(inp);
1161 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1162 			if (error)
1163 				break;
1164 		} else
1165 			INP_RUNLOCK(inp);
1166 	}
1167 	NET_EPOCH_EXIT(et);
1168 
1169 	if (!error) {
1170 		/*
1171 		 * Give the user an updated idea of our state.  If the
1172 		 * generation differs from what we told her before, she knows
1173 		 * that something happened while we were processing this
1174 		 * request, and it might be necessary to retry.
1175 		 */
1176 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1177 		xig.xig_sogen = so_gencnt;
1178 		xig.xig_count = V_ripcbinfo.ipi_count;
1179 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1180 	}
1181 
1182 	return (error);
1183 }
1184 
1185 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1186     CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1187     rip_pcblist, "S,xinpcb",
1188     "List of active raw IP sockets");
1189 
1190 #ifdef INET
1191 struct pr_usrreqs rip_usrreqs = {
1192 	.pru_abort =		rip_abort,
1193 	.pru_attach =		rip_attach,
1194 	.pru_bind =		rip_bind,
1195 	.pru_connect =		rip_connect,
1196 	.pru_control =		in_control,
1197 	.pru_detach =		rip_detach,
1198 	.pru_disconnect =	rip_disconnect,
1199 	.pru_peeraddr =		in_getpeeraddr,
1200 	.pru_send =		rip_send,
1201 	.pru_shutdown =		rip_shutdown,
1202 	.pru_sockaddr =		in_getsockaddr,
1203 	.pru_sosetlabel =	in_pcbsosetlabel,
1204 	.pru_close =		rip_close,
1205 };
1206 #endif /* INET */
1207