xref: /freebsd/sys/netinet/ip_input.c (revision aa64588d28258aef88cc33b8043112e8856948d0)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_bootp.h"
36 #include "opt_ipfw.h"
37 #include "opt_ipstealth.h"
38 #include "opt_ipsec.h"
39 #include "opt_route.h"
40 #include "opt_carp.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
49 #include <sys/socket.h>
50 #include <sys/time.h>
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/rwlock.h>
54 #include <sys/syslog.h>
55 #include <sys/sysctl.h>
56 
57 #include <net/pfil.h>
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/if_var.h>
61 #include <net/if_dl.h>
62 #include <net/route.h>
63 #include <net/netisr.h>
64 #include <net/vnet.h>
65 #include <net/flowtable.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_systm.h>
69 #include <netinet/in_var.h>
70 #include <netinet/ip.h>
71 #include <netinet/in_pcb.h>
72 #include <netinet/ip_var.h>
73 #include <netinet/ip_fw.h>
74 #include <netinet/ip_icmp.h>
75 #include <netinet/ip_options.h>
76 #include <machine/in_cksum.h>
77 #ifdef DEV_CARP
78 #include <netinet/ip_carp.h>
79 #endif
80 #ifdef IPSEC
81 #include <netinet/ip_ipsec.h>
82 #endif /* IPSEC */
83 
84 #include <sys/socketvar.h>
85 
86 #include <security/mac/mac_framework.h>
87 
88 #ifdef CTASSERT
89 CTASSERT(sizeof(struct ip) == 20);
90 #endif
91 
92 struct	rwlock in_ifaddr_lock;
93 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
94 
95 VNET_DEFINE(int, rsvp_on);
96 
97 VNET_DEFINE(int, ipforwarding);
98 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
99     &VNET_NAME(ipforwarding), 0,
100     "Enable IP forwarding between interfaces");
101 
102 static VNET_DEFINE(int, ipsendredirects) = 1;	/* XXX */
103 #define	V_ipsendredirects	VNET(ipsendredirects)
104 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
105     &VNET_NAME(ipsendredirects), 0,
106     "Enable sending IP redirects");
107 
108 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
109 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
110     &VNET_NAME(ip_defttl), 0,
111     "Maximum TTL on IP packets");
112 
113 static VNET_DEFINE(int, ip_keepfaith);
114 #define	V_ip_keepfaith		VNET(ip_keepfaith)
115 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
116     &VNET_NAME(ip_keepfaith), 0,
117     "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
118 
119 static VNET_DEFINE(int, ip_sendsourcequench);
120 #define	V_ip_sendsourcequench	VNET(ip_sendsourcequench)
121 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
122     &VNET_NAME(ip_sendsourcequench), 0,
123     "Enable the transmission of source quench packets");
124 
125 VNET_DEFINE(int, ip_do_randomid);
126 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
127     &VNET_NAME(ip_do_randomid), 0,
128     "Assign random ip_id values");
129 
130 /*
131  * XXX - Setting ip_checkinterface mostly implements the receive side of
132  * the Strong ES model described in RFC 1122, but since the routing table
133  * and transmit implementation do not implement the Strong ES model,
134  * setting this to 1 results in an odd hybrid.
135  *
136  * XXX - ip_checkinterface currently must be disabled if you use ipnat
137  * to translate the destination address to another local interface.
138  *
139  * XXX - ip_checkinterface must be disabled if you add IP aliases
140  * to the loopback interface instead of the interface where the
141  * packets for those addresses are received.
142  */
143 static VNET_DEFINE(int, ip_checkinterface);
144 #define	V_ip_checkinterface	VNET(ip_checkinterface)
145 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
146     &VNET_NAME(ip_checkinterface), 0,
147     "Verify packet arrives on correct interface");
148 
149 VNET_DEFINE(struct pfil_head, inet_pfil_hook);	/* Packet filter hooks */
150 
151 static struct netisr_handler ip_nh = {
152 	.nh_name = "ip",
153 	.nh_handler = ip_input,
154 	.nh_proto = NETISR_IP,
155 	.nh_policy = NETISR_POLICY_FLOW,
156 };
157 
158 extern	struct domain inetdomain;
159 extern	struct protosw inetsw[];
160 u_char	ip_protox[IPPROTO_MAX];
161 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead);  /* first inet address */
162 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table  */
163 VNET_DEFINE(u_long, in_ifaddrhmask);		/* mask for hash table */
164 
165 VNET_DEFINE(struct ipstat, ipstat);
166 SYSCTL_VNET_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW,
167     &VNET_NAME(ipstat), ipstat,
168     "IP statistics (struct ipstat, netinet/ip_var.h)");
169 
170 static VNET_DEFINE(uma_zone_t, ipq_zone);
171 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
172 static struct mtx ipqlock;
173 
174 #define	V_ipq_zone		VNET(ipq_zone)
175 #define	V_ipq			VNET(ipq)
176 
177 #define	IPQ_LOCK()	mtx_lock(&ipqlock)
178 #define	IPQ_UNLOCK()	mtx_unlock(&ipqlock)
179 #define	IPQ_LOCK_INIT()	mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
180 #define	IPQ_LOCK_ASSERT()	mtx_assert(&ipqlock, MA_OWNED)
181 
182 static void	maxnipq_update(void);
183 static void	ipq_zone_change(void *);
184 static void	ip_drain_locked(void);
185 
186 static VNET_DEFINE(int, maxnipq);  /* Administrative limit on # reass queues. */
187 static VNET_DEFINE(int, nipq);			/* Total # of reass queues */
188 #define	V_maxnipq		VNET(maxnipq)
189 #define	V_nipq			VNET(nipq)
190 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD,
191     &VNET_NAME(nipq), 0,
192     "Current number of IPv4 fragment reassembly queue entries");
193 
194 static VNET_DEFINE(int, maxfragsperpacket);
195 #define	V_maxfragsperpacket	VNET(maxfragsperpacket)
196 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
197     &VNET_NAME(maxfragsperpacket), 0,
198     "Maximum number of IPv4 fragments allowed per packet");
199 
200 struct callout	ipport_tick_callout;
201 
202 #ifdef IPCTL_DEFMTU
203 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
204     &ip_mtu, 0, "Default MTU");
205 #endif
206 
207 #ifdef IPSTEALTH
208 VNET_DEFINE(int, ipstealth);
209 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
210     &VNET_NAME(ipstealth), 0,
211     "IP stealth mode, no TTL decrementation on forwarding");
212 #endif
213 
214 #ifdef FLOWTABLE
215 static VNET_DEFINE(int, ip_output_flowtable_size) = 2048;
216 VNET_DEFINE(struct flowtable *, ip_ft);
217 #define	V_ip_output_flowtable_size	VNET(ip_output_flowtable_size)
218 
219 SYSCTL_VNET_INT(_net_inet_ip, OID_AUTO, output_flowtable_size, CTLFLAG_RDTUN,
220     &VNET_NAME(ip_output_flowtable_size), 2048,
221     "number of entries in the per-cpu output flow caches");
222 #endif
223 
224 VNET_DEFINE(int, fw_one_pass) = 1;
225 
226 static void	ip_freef(struct ipqhead *, struct ipq *);
227 
228 /*
229  * Kernel module interface for updating ipstat.  The argument is an index
230  * into ipstat treated as an array of u_long.  While this encodes the general
231  * layout of ipstat into the caller, it doesn't encode its location, so that
232  * future changes to add, for example, per-CPU stats support won't cause
233  * binary compatibility problems for kernel modules.
234  */
235 void
236 kmod_ipstat_inc(int statnum)
237 {
238 
239 	(*((u_long *)&V_ipstat + statnum))++;
240 }
241 
242 void
243 kmod_ipstat_dec(int statnum)
244 {
245 
246 	(*((u_long *)&V_ipstat + statnum))--;
247 }
248 
249 static int
250 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
251 {
252 	int error, qlimit;
253 
254 	netisr_getqlimit(&ip_nh, &qlimit);
255 	error = sysctl_handle_int(oidp, &qlimit, 0, req);
256 	if (error || !req->newptr)
257 		return (error);
258 	if (qlimit < 1)
259 		return (EINVAL);
260 	return (netisr_setqlimit(&ip_nh, qlimit));
261 }
262 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen,
263     CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I",
264     "Maximum size of the IP input queue");
265 
266 static int
267 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS)
268 {
269 	u_int64_t qdrops_long;
270 	int error, qdrops;
271 
272 	netisr_getqdrops(&ip_nh, &qdrops_long);
273 	qdrops = qdrops_long;
274 	error = sysctl_handle_int(oidp, &qdrops, 0, req);
275 	if (error || !req->newptr)
276 		return (error);
277 	if (qdrops != 0)
278 		return (EINVAL);
279 	netisr_clearqdrops(&ip_nh);
280 	return (0);
281 }
282 
283 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops,
284     CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I",
285     "Number of packets dropped from the IP input queue");
286 
287 /*
288  * IP initialization: fill in IP protocol switch table.
289  * All protocols not implemented in kernel go to raw IP protocol handler.
290  */
291 void
292 ip_init(void)
293 {
294 	struct protosw *pr;
295 	int i;
296 
297 	V_ip_id = time_second & 0xffff;
298 
299 	TAILQ_INIT(&V_in_ifaddrhead);
300 	V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
301 
302 	/* Initialize IP reassembly queue. */
303 	for (i = 0; i < IPREASS_NHASH; i++)
304 		TAILQ_INIT(&V_ipq[i]);
305 	V_maxnipq = nmbclusters / 32;
306 	V_maxfragsperpacket = 16;
307 	V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
308 	    NULL, UMA_ALIGN_PTR, 0);
309 	maxnipq_update();
310 
311 	/* Initialize packet filter hooks. */
312 	V_inet_pfil_hook.ph_type = PFIL_TYPE_AF;
313 	V_inet_pfil_hook.ph_af = AF_INET;
314 	if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0)
315 		printf("%s: WARNING: unable to register pfil hook, "
316 			"error %d\n", __func__, i);
317 
318 #ifdef FLOWTABLE
319 	if (TUNABLE_INT_FETCH("net.inet.ip.output_flowtable_size",
320 		&V_ip_output_flowtable_size)) {
321 		if (V_ip_output_flowtable_size < 256)
322 			V_ip_output_flowtable_size = 256;
323 		if (!powerof2(V_ip_output_flowtable_size)) {
324 			printf("flowtable must be power of 2 size\n");
325 			V_ip_output_flowtable_size = 2048;
326 		}
327 	} else {
328 		/*
329 		 * round up to the next power of 2
330 		 */
331 		V_ip_output_flowtable_size = 1 << fls((1024 + maxusers * 64)-1);
332 	}
333 	V_ip_ft = flowtable_alloc("ipv4", V_ip_output_flowtable_size, FL_PCPU);
334 #endif
335 
336 	/* Skip initialization of globals for non-default instances. */
337 	if (!IS_DEFAULT_VNET(curvnet))
338 		return;
339 
340 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
341 	if (pr == NULL)
342 		panic("ip_init: PF_INET not found");
343 
344 	/* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
345 	for (i = 0; i < IPPROTO_MAX; i++)
346 		ip_protox[i] = pr - inetsw;
347 	/*
348 	 * Cycle through IP protocols and put them into the appropriate place
349 	 * in ip_protox[].
350 	 */
351 	for (pr = inetdomain.dom_protosw;
352 	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
353 		if (pr->pr_domain->dom_family == PF_INET &&
354 		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
355 			/* Be careful to only index valid IP protocols. */
356 			if (pr->pr_protocol < IPPROTO_MAX)
357 				ip_protox[pr->pr_protocol] = pr - inetsw;
358 		}
359 
360 	/* Start ipport_tick. */
361 	callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
362 	callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL);
363 	EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
364 		SHUTDOWN_PRI_DEFAULT);
365 	EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
366 		NULL, EVENTHANDLER_PRI_ANY);
367 
368 	/* Initialize various other remaining things. */
369 	IPQ_LOCK_INIT();
370 	netisr_register(&ip_nh);
371 }
372 
373 #ifdef VIMAGE
374 void
375 ip_destroy(void)
376 {
377 
378 	/* Cleanup in_ifaddr hash table; should be empty. */
379 	hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask);
380 
381 	IPQ_LOCK();
382 	ip_drain_locked();
383 	IPQ_UNLOCK();
384 
385 	uma_zdestroy(V_ipq_zone);
386 }
387 #endif
388 
389 void
390 ip_fini(void *xtp)
391 {
392 
393 	callout_stop(&ipport_tick_callout);
394 }
395 
396 /*
397  * Ip input routine.  Checksum and byte swap header.  If fragmented
398  * try to reassemble.  Process options.  Pass to next level.
399  */
400 void
401 ip_input(struct mbuf *m)
402 {
403 	struct ip *ip = NULL;
404 	struct in_ifaddr *ia = NULL;
405 	struct ifaddr *ifa;
406 	struct ifnet *ifp;
407 	int    checkif, hlen = 0;
408 	u_short sum;
409 	int dchg = 0;				/* dest changed after fw */
410 	struct in_addr odst;			/* original dst address */
411 
412 	M_ASSERTPKTHDR(m);
413 
414 	if (m->m_flags & M_FASTFWD_OURS) {
415 		/*
416 		 * Firewall or NAT changed destination to local.
417 		 * We expect ip_len and ip_off to be in host byte order.
418 		 */
419 		m->m_flags &= ~M_FASTFWD_OURS;
420 		/* Set up some basics that will be used later. */
421 		ip = mtod(m, struct ip *);
422 		hlen = ip->ip_hl << 2;
423 		goto ours;
424 	}
425 
426 	IPSTAT_INC(ips_total);
427 
428 	if (m->m_pkthdr.len < sizeof(struct ip))
429 		goto tooshort;
430 
431 	if (m->m_len < sizeof (struct ip) &&
432 	    (m = m_pullup(m, sizeof (struct ip))) == NULL) {
433 		IPSTAT_INC(ips_toosmall);
434 		return;
435 	}
436 	ip = mtod(m, struct ip *);
437 
438 	if (ip->ip_v != IPVERSION) {
439 		IPSTAT_INC(ips_badvers);
440 		goto bad;
441 	}
442 
443 	hlen = ip->ip_hl << 2;
444 	if (hlen < sizeof(struct ip)) {	/* minimum header length */
445 		IPSTAT_INC(ips_badhlen);
446 		goto bad;
447 	}
448 	if (hlen > m->m_len) {
449 		if ((m = m_pullup(m, hlen)) == NULL) {
450 			IPSTAT_INC(ips_badhlen);
451 			return;
452 		}
453 		ip = mtod(m, struct ip *);
454 	}
455 
456 	/* 127/8 must not appear on wire - RFC1122 */
457 	ifp = m->m_pkthdr.rcvif;
458 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
459 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
460 		if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
461 			IPSTAT_INC(ips_badaddr);
462 			goto bad;
463 		}
464 	}
465 
466 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
467 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
468 	} else {
469 		if (hlen == sizeof(struct ip)) {
470 			sum = in_cksum_hdr(ip);
471 		} else {
472 			sum = in_cksum(m, hlen);
473 		}
474 	}
475 	if (sum) {
476 		IPSTAT_INC(ips_badsum);
477 		goto bad;
478 	}
479 
480 #ifdef ALTQ
481 	if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
482 		/* packet is dropped by traffic conditioner */
483 		return;
484 #endif
485 
486 	/*
487 	 * Convert fields to host representation.
488 	 */
489 	ip->ip_len = ntohs(ip->ip_len);
490 	if (ip->ip_len < hlen) {
491 		IPSTAT_INC(ips_badlen);
492 		goto bad;
493 	}
494 	ip->ip_off = ntohs(ip->ip_off);
495 
496 	/*
497 	 * Check that the amount of data in the buffers
498 	 * is as at least much as the IP header would have us expect.
499 	 * Trim mbufs if longer than we expect.
500 	 * Drop packet if shorter than we expect.
501 	 */
502 	if (m->m_pkthdr.len < ip->ip_len) {
503 tooshort:
504 		IPSTAT_INC(ips_tooshort);
505 		goto bad;
506 	}
507 	if (m->m_pkthdr.len > ip->ip_len) {
508 		if (m->m_len == m->m_pkthdr.len) {
509 			m->m_len = ip->ip_len;
510 			m->m_pkthdr.len = ip->ip_len;
511 		} else
512 			m_adj(m, ip->ip_len - m->m_pkthdr.len);
513 	}
514 #ifdef IPSEC
515 	/*
516 	 * Bypass packet filtering for packets from a tunnel (gif).
517 	 */
518 	if (ip_ipsec_filtertunnel(m))
519 		goto passin;
520 #endif /* IPSEC */
521 
522 	/*
523 	 * Run through list of hooks for input packets.
524 	 *
525 	 * NB: Beware of the destination address changing (e.g.
526 	 *     by NAT rewriting).  When this happens, tell
527 	 *     ip_forward to do the right thing.
528 	 */
529 
530 	/* Jump over all PFIL processing if hooks are not active. */
531 	if (!PFIL_HOOKED(&V_inet_pfil_hook))
532 		goto passin;
533 
534 	odst = ip->ip_dst;
535 	if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0)
536 		return;
537 	if (m == NULL)			/* consumed by filter */
538 		return;
539 
540 	ip = mtod(m, struct ip *);
541 	dchg = (odst.s_addr != ip->ip_dst.s_addr);
542 	ifp = m->m_pkthdr.rcvif;
543 
544 #ifdef IPFIREWALL_FORWARD
545 	if (m->m_flags & M_FASTFWD_OURS) {
546 		m->m_flags &= ~M_FASTFWD_OURS;
547 		goto ours;
548 	}
549 	if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) {
550 		/*
551 		 * Directly ship the packet on.  This allows forwarding
552 		 * packets originally destined to us to some other directly
553 		 * connected host.
554 		 */
555 		ip_forward(m, dchg);
556 		return;
557 	}
558 #endif /* IPFIREWALL_FORWARD */
559 
560 passin:
561 	/*
562 	 * Process options and, if not destined for us,
563 	 * ship it on.  ip_dooptions returns 1 when an
564 	 * error was detected (causing an icmp message
565 	 * to be sent and the original packet to be freed).
566 	 */
567 	if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
568 		return;
569 
570         /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
571          * matter if it is destined to another node, or whether it is
572          * a multicast one, RSVP wants it! and prevents it from being forwarded
573          * anywhere else. Also checks if the rsvp daemon is running before
574 	 * grabbing the packet.
575          */
576 	if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
577 		goto ours;
578 
579 	/*
580 	 * Check our list of addresses, to see if the packet is for us.
581 	 * If we don't have any addresses, assume any unicast packet
582 	 * we receive might be for us (and let the upper layers deal
583 	 * with it).
584 	 */
585 	if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
586 	    (m->m_flags & (M_MCAST|M_BCAST)) == 0)
587 		goto ours;
588 
589 	/*
590 	 * Enable a consistency check between the destination address
591 	 * and the arrival interface for a unicast packet (the RFC 1122
592 	 * strong ES model) if IP forwarding is disabled and the packet
593 	 * is not locally generated and the packet is not subject to
594 	 * 'ipfw fwd'.
595 	 *
596 	 * XXX - Checking also should be disabled if the destination
597 	 * address is ipnat'ed to a different interface.
598 	 *
599 	 * XXX - Checking is incompatible with IP aliases added
600 	 * to the loopback interface instead of the interface where
601 	 * the packets are received.
602 	 *
603 	 * XXX - This is the case for carp vhost IPs as well so we
604 	 * insert a workaround. If the packet got here, we already
605 	 * checked with carp_iamatch() and carp_forus().
606 	 */
607 	checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
608 	    ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
609 #ifdef DEV_CARP
610 	    !ifp->if_carp &&
611 #endif
612 	    (dchg == 0);
613 
614 	/*
615 	 * Check for exact addresses in the hash bucket.
616 	 */
617 	/* IN_IFADDR_RLOCK(); */
618 	LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
619 		/*
620 		 * If the address matches, verify that the packet
621 		 * arrived via the correct interface if checking is
622 		 * enabled.
623 		 */
624 		if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
625 		    (!checkif || ia->ia_ifp == ifp)) {
626 			ifa_ref(&ia->ia_ifa);
627 			/* IN_IFADDR_RUNLOCK(); */
628 			goto ours;
629 		}
630 	}
631 	/* IN_IFADDR_RUNLOCK(); */
632 
633 	/*
634 	 * Check for broadcast addresses.
635 	 *
636 	 * Only accept broadcast packets that arrive via the matching
637 	 * interface.  Reception of forwarded directed broadcasts would
638 	 * be handled via ip_forward() and ether_output() with the loopback
639 	 * into the stack for SIMPLEX interfaces handled by ether_output().
640 	 */
641 	if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
642 		IF_ADDR_LOCK(ifp);
643 	        TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
644 			if (ifa->ifa_addr->sa_family != AF_INET)
645 				continue;
646 			ia = ifatoia(ifa);
647 			if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
648 			    ip->ip_dst.s_addr) {
649 				ifa_ref(ifa);
650 				IF_ADDR_UNLOCK(ifp);
651 				goto ours;
652 			}
653 			if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
654 				ifa_ref(ifa);
655 				IF_ADDR_UNLOCK(ifp);
656 				goto ours;
657 			}
658 #ifdef BOOTP_COMPAT
659 			if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
660 				ifa_ref(ifa);
661 				IF_ADDR_UNLOCK(ifp);
662 				goto ours;
663 			}
664 #endif
665 		}
666 		IF_ADDR_UNLOCK(ifp);
667 		ia = NULL;
668 	}
669 	/* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
670 	if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
671 		IPSTAT_INC(ips_cantforward);
672 		m_freem(m);
673 		return;
674 	}
675 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
676 		if (V_ip_mrouter) {
677 			/*
678 			 * If we are acting as a multicast router, all
679 			 * incoming multicast packets are passed to the
680 			 * kernel-level multicast forwarding function.
681 			 * The packet is returned (relatively) intact; if
682 			 * ip_mforward() returns a non-zero value, the packet
683 			 * must be discarded, else it may be accepted below.
684 			 */
685 			if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) {
686 				IPSTAT_INC(ips_cantforward);
687 				m_freem(m);
688 				return;
689 			}
690 
691 			/*
692 			 * The process-level routing daemon needs to receive
693 			 * all multicast IGMP packets, whether or not this
694 			 * host belongs to their destination groups.
695 			 */
696 			if (ip->ip_p == IPPROTO_IGMP)
697 				goto ours;
698 			IPSTAT_INC(ips_forward);
699 		}
700 		/*
701 		 * Assume the packet is for us, to avoid prematurely taking
702 		 * a lock on the in_multi hash. Protocols must perform
703 		 * their own filtering and update statistics accordingly.
704 		 */
705 		goto ours;
706 	}
707 	if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
708 		goto ours;
709 	if (ip->ip_dst.s_addr == INADDR_ANY)
710 		goto ours;
711 
712 	/*
713 	 * FAITH(Firewall Aided Internet Translator)
714 	 */
715 	if (ifp && ifp->if_type == IFT_FAITH) {
716 		if (V_ip_keepfaith) {
717 			if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
718 				goto ours;
719 		}
720 		m_freem(m);
721 		return;
722 	}
723 
724 	/*
725 	 * Not for us; forward if possible and desirable.
726 	 */
727 	if (V_ipforwarding == 0) {
728 		IPSTAT_INC(ips_cantforward);
729 		m_freem(m);
730 	} else {
731 #ifdef IPSEC
732 		if (ip_ipsec_fwd(m))
733 			goto bad;
734 #endif /* IPSEC */
735 		ip_forward(m, dchg);
736 	}
737 	return;
738 
739 ours:
740 #ifdef IPSTEALTH
741 	/*
742 	 * IPSTEALTH: Process non-routing options only
743 	 * if the packet is destined for us.
744 	 */
745 	if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1)) {
746 		if (ia != NULL)
747 			ifa_free(&ia->ia_ifa);
748 		return;
749 	}
750 #endif /* IPSTEALTH */
751 
752 	/* Count the packet in the ip address stats */
753 	if (ia != NULL) {
754 		ia->ia_ifa.if_ipackets++;
755 		ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
756 		ifa_free(&ia->ia_ifa);
757 	}
758 
759 	/*
760 	 * Attempt reassembly; if it succeeds, proceed.
761 	 * ip_reass() will return a different mbuf.
762 	 */
763 	if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
764 		m = ip_reass(m);
765 		if (m == NULL)
766 			return;
767 		ip = mtod(m, struct ip *);
768 		/* Get the header length of the reassembled packet */
769 		hlen = ip->ip_hl << 2;
770 	}
771 
772 	/*
773 	 * Further protocols expect the packet length to be w/o the
774 	 * IP header.
775 	 */
776 	ip->ip_len -= hlen;
777 
778 #ifdef IPSEC
779 	/*
780 	 * enforce IPsec policy checking if we are seeing last header.
781 	 * note that we do not visit this with protocols with pcb layer
782 	 * code - like udp/tcp/raw ip.
783 	 */
784 	if (ip_ipsec_input(m))
785 		goto bad;
786 #endif /* IPSEC */
787 
788 	/*
789 	 * Switch out to protocol's input routine.
790 	 */
791 	IPSTAT_INC(ips_delivered);
792 
793 	(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
794 	return;
795 bad:
796 	m_freem(m);
797 }
798 
799 /*
800  * After maxnipq has been updated, propagate the change to UMA.  The UMA zone
801  * max has slightly different semantics than the sysctl, for historical
802  * reasons.
803  */
804 static void
805 maxnipq_update(void)
806 {
807 
808 	/*
809 	 * -1 for unlimited allocation.
810 	 */
811 	if (V_maxnipq < 0)
812 		uma_zone_set_max(V_ipq_zone, 0);
813 	/*
814 	 * Positive number for specific bound.
815 	 */
816 	if (V_maxnipq > 0)
817 		uma_zone_set_max(V_ipq_zone, V_maxnipq);
818 	/*
819 	 * Zero specifies no further fragment queue allocation -- set the
820 	 * bound very low, but rely on implementation elsewhere to actually
821 	 * prevent allocation and reclaim current queues.
822 	 */
823 	if (V_maxnipq == 0)
824 		uma_zone_set_max(V_ipq_zone, 1);
825 }
826 
827 static void
828 ipq_zone_change(void *tag)
829 {
830 
831 	if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
832 		V_maxnipq = nmbclusters / 32;
833 		maxnipq_update();
834 	}
835 }
836 
837 static int
838 sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
839 {
840 	int error, i;
841 
842 	i = V_maxnipq;
843 	error = sysctl_handle_int(oidp, &i, 0, req);
844 	if (error || !req->newptr)
845 		return (error);
846 
847 	/*
848 	 * XXXRW: Might be a good idea to sanity check the argument and place
849 	 * an extreme upper bound.
850 	 */
851 	if (i < -1)
852 		return (EINVAL);
853 	V_maxnipq = i;
854 	maxnipq_update();
855 	return (0);
856 }
857 
858 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
859     NULL, 0, sysctl_maxnipq, "I",
860     "Maximum number of IPv4 fragment reassembly queue entries");
861 
862 /*
863  * Take incoming datagram fragment and try to reassemble it into
864  * whole datagram.  If the argument is the first fragment or one
865  * in between the function will return NULL and store the mbuf
866  * in the fragment chain.  If the argument is the last fragment
867  * the packet will be reassembled and the pointer to the new
868  * mbuf returned for further processing.  Only m_tags attached
869  * to the first packet/fragment are preserved.
870  * The IP header is *NOT* adjusted out of iplen.
871  */
872 struct mbuf *
873 ip_reass(struct mbuf *m)
874 {
875 	struct ip *ip;
876 	struct mbuf *p, *q, *nq, *t;
877 	struct ipq *fp = NULL;
878 	struct ipqhead *head;
879 	int i, hlen, next;
880 	u_int8_t ecn, ecn0;
881 	u_short hash;
882 
883 	/* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
884 	if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
885 		IPSTAT_INC(ips_fragments);
886 		IPSTAT_INC(ips_fragdropped);
887 		m_freem(m);
888 		return (NULL);
889 	}
890 
891 	ip = mtod(m, struct ip *);
892 	hlen = ip->ip_hl << 2;
893 
894 	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
895 	head = &V_ipq[hash];
896 	IPQ_LOCK();
897 
898 	/*
899 	 * Look for queue of fragments
900 	 * of this datagram.
901 	 */
902 	TAILQ_FOREACH(fp, head, ipq_list)
903 		if (ip->ip_id == fp->ipq_id &&
904 		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
905 		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
906 #ifdef MAC
907 		    mac_ipq_match(m, fp) &&
908 #endif
909 		    ip->ip_p == fp->ipq_p)
910 			goto found;
911 
912 	fp = NULL;
913 
914 	/*
915 	 * Attempt to trim the number of allocated fragment queues if it
916 	 * exceeds the administrative limit.
917 	 */
918 	if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
919 		/*
920 		 * drop something from the tail of the current queue
921 		 * before proceeding further
922 		 */
923 		struct ipq *q = TAILQ_LAST(head, ipqhead);
924 		if (q == NULL) {   /* gak */
925 			for (i = 0; i < IPREASS_NHASH; i++) {
926 				struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
927 				if (r) {
928 					IPSTAT_ADD(ips_fragtimeout,
929 					    r->ipq_nfrags);
930 					ip_freef(&V_ipq[i], r);
931 					break;
932 				}
933 			}
934 		} else {
935 			IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags);
936 			ip_freef(head, q);
937 		}
938 	}
939 
940 found:
941 	/*
942 	 * Adjust ip_len to not reflect header,
943 	 * convert offset of this to bytes.
944 	 */
945 	ip->ip_len -= hlen;
946 	if (ip->ip_off & IP_MF) {
947 		/*
948 		 * Make sure that fragments have a data length
949 		 * that's a non-zero multiple of 8 bytes.
950 		 */
951 		if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
952 			IPSTAT_INC(ips_toosmall); /* XXX */
953 			goto dropfrag;
954 		}
955 		m->m_flags |= M_FRAG;
956 	} else
957 		m->m_flags &= ~M_FRAG;
958 	ip->ip_off <<= 3;
959 
960 
961 	/*
962 	 * Attempt reassembly; if it succeeds, proceed.
963 	 * ip_reass() will return a different mbuf.
964 	 */
965 	IPSTAT_INC(ips_fragments);
966 	m->m_pkthdr.header = ip;
967 
968 	/* Previous ip_reass() started here. */
969 	/*
970 	 * Presence of header sizes in mbufs
971 	 * would confuse code below.
972 	 */
973 	m->m_data += hlen;
974 	m->m_len -= hlen;
975 
976 	/*
977 	 * If first fragment to arrive, create a reassembly queue.
978 	 */
979 	if (fp == NULL) {
980 		fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
981 		if (fp == NULL)
982 			goto dropfrag;
983 #ifdef MAC
984 		if (mac_ipq_init(fp, M_NOWAIT) != 0) {
985 			uma_zfree(V_ipq_zone, fp);
986 			fp = NULL;
987 			goto dropfrag;
988 		}
989 		mac_ipq_create(m, fp);
990 #endif
991 		TAILQ_INSERT_HEAD(head, fp, ipq_list);
992 		V_nipq++;
993 		fp->ipq_nfrags = 1;
994 		fp->ipq_ttl = IPFRAGTTL;
995 		fp->ipq_p = ip->ip_p;
996 		fp->ipq_id = ip->ip_id;
997 		fp->ipq_src = ip->ip_src;
998 		fp->ipq_dst = ip->ip_dst;
999 		fp->ipq_frags = m;
1000 		m->m_nextpkt = NULL;
1001 		goto done;
1002 	} else {
1003 		fp->ipq_nfrags++;
1004 #ifdef MAC
1005 		mac_ipq_update(m, fp);
1006 #endif
1007 	}
1008 
1009 #define GETIP(m)	((struct ip*)((m)->m_pkthdr.header))
1010 
1011 	/*
1012 	 * Handle ECN by comparing this segment with the first one;
1013 	 * if CE is set, do not lose CE.
1014 	 * drop if CE and not-ECT are mixed for the same packet.
1015 	 */
1016 	ecn = ip->ip_tos & IPTOS_ECN_MASK;
1017 	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
1018 	if (ecn == IPTOS_ECN_CE) {
1019 		if (ecn0 == IPTOS_ECN_NOTECT)
1020 			goto dropfrag;
1021 		if (ecn0 != IPTOS_ECN_CE)
1022 			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
1023 	}
1024 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
1025 		goto dropfrag;
1026 
1027 	/*
1028 	 * Find a segment which begins after this one does.
1029 	 */
1030 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
1031 		if (GETIP(q)->ip_off > ip->ip_off)
1032 			break;
1033 
1034 	/*
1035 	 * If there is a preceding segment, it may provide some of
1036 	 * our data already.  If so, drop the data from the incoming
1037 	 * segment.  If it provides all of our data, drop us, otherwise
1038 	 * stick new segment in the proper place.
1039 	 *
1040 	 * If some of the data is dropped from the the preceding
1041 	 * segment, then it's checksum is invalidated.
1042 	 */
1043 	if (p) {
1044 		i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
1045 		if (i > 0) {
1046 			if (i >= ip->ip_len)
1047 				goto dropfrag;
1048 			m_adj(m, i);
1049 			m->m_pkthdr.csum_flags = 0;
1050 			ip->ip_off += i;
1051 			ip->ip_len -= i;
1052 		}
1053 		m->m_nextpkt = p->m_nextpkt;
1054 		p->m_nextpkt = m;
1055 	} else {
1056 		m->m_nextpkt = fp->ipq_frags;
1057 		fp->ipq_frags = m;
1058 	}
1059 
1060 	/*
1061 	 * While we overlap succeeding segments trim them or,
1062 	 * if they are completely covered, dequeue them.
1063 	 */
1064 	for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
1065 	     q = nq) {
1066 		i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
1067 		if (i < GETIP(q)->ip_len) {
1068 			GETIP(q)->ip_len -= i;
1069 			GETIP(q)->ip_off += i;
1070 			m_adj(q, i);
1071 			q->m_pkthdr.csum_flags = 0;
1072 			break;
1073 		}
1074 		nq = q->m_nextpkt;
1075 		m->m_nextpkt = nq;
1076 		IPSTAT_INC(ips_fragdropped);
1077 		fp->ipq_nfrags--;
1078 		m_freem(q);
1079 	}
1080 
1081 	/*
1082 	 * Check for complete reassembly and perform frag per packet
1083 	 * limiting.
1084 	 *
1085 	 * Frag limiting is performed here so that the nth frag has
1086 	 * a chance to complete the packet before we drop the packet.
1087 	 * As a result, n+1 frags are actually allowed per packet, but
1088 	 * only n will ever be stored. (n = maxfragsperpacket.)
1089 	 *
1090 	 */
1091 	next = 0;
1092 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1093 		if (GETIP(q)->ip_off != next) {
1094 			if (fp->ipq_nfrags > V_maxfragsperpacket) {
1095 				IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1096 				ip_freef(head, fp);
1097 			}
1098 			goto done;
1099 		}
1100 		next += GETIP(q)->ip_len;
1101 	}
1102 	/* Make sure the last packet didn't have the IP_MF flag */
1103 	if (p->m_flags & M_FRAG) {
1104 		if (fp->ipq_nfrags > V_maxfragsperpacket) {
1105 			IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1106 			ip_freef(head, fp);
1107 		}
1108 		goto done;
1109 	}
1110 
1111 	/*
1112 	 * Reassembly is complete.  Make sure the packet is a sane size.
1113 	 */
1114 	q = fp->ipq_frags;
1115 	ip = GETIP(q);
1116 	if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
1117 		IPSTAT_INC(ips_toolong);
1118 		IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1119 		ip_freef(head, fp);
1120 		goto done;
1121 	}
1122 
1123 	/*
1124 	 * Concatenate fragments.
1125 	 */
1126 	m = q;
1127 	t = m->m_next;
1128 	m->m_next = NULL;
1129 	m_cat(m, t);
1130 	nq = q->m_nextpkt;
1131 	q->m_nextpkt = NULL;
1132 	for (q = nq; q != NULL; q = nq) {
1133 		nq = q->m_nextpkt;
1134 		q->m_nextpkt = NULL;
1135 		m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1136 		m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1137 		m_cat(m, q);
1138 	}
1139 	/*
1140 	 * In order to do checksumming faster we do 'end-around carry' here
1141 	 * (and not in for{} loop), though it implies we are not going to
1142 	 * reassemble more than 64k fragments.
1143 	 */
1144 	m->m_pkthdr.csum_data =
1145 	    (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16);
1146 #ifdef MAC
1147 	mac_ipq_reassemble(fp, m);
1148 	mac_ipq_destroy(fp);
1149 #endif
1150 
1151 	/*
1152 	 * Create header for new ip packet by modifying header of first
1153 	 * packet;  dequeue and discard fragment reassembly header.
1154 	 * Make header visible.
1155 	 */
1156 	ip->ip_len = (ip->ip_hl << 2) + next;
1157 	ip->ip_src = fp->ipq_src;
1158 	ip->ip_dst = fp->ipq_dst;
1159 	TAILQ_REMOVE(head, fp, ipq_list);
1160 	V_nipq--;
1161 	uma_zfree(V_ipq_zone, fp);
1162 	m->m_len += (ip->ip_hl << 2);
1163 	m->m_data -= (ip->ip_hl << 2);
1164 	/* some debugging cruft by sklower, below, will go away soon */
1165 	if (m->m_flags & M_PKTHDR)	/* XXX this should be done elsewhere */
1166 		m_fixhdr(m);
1167 	IPSTAT_INC(ips_reassembled);
1168 	IPQ_UNLOCK();
1169 	return (m);
1170 
1171 dropfrag:
1172 	IPSTAT_INC(ips_fragdropped);
1173 	if (fp != NULL)
1174 		fp->ipq_nfrags--;
1175 	m_freem(m);
1176 done:
1177 	IPQ_UNLOCK();
1178 	return (NULL);
1179 
1180 #undef GETIP
1181 }
1182 
1183 /*
1184  * Free a fragment reassembly header and all
1185  * associated datagrams.
1186  */
1187 static void
1188 ip_freef(struct ipqhead *fhp, struct ipq *fp)
1189 {
1190 	struct mbuf *q;
1191 
1192 	IPQ_LOCK_ASSERT();
1193 
1194 	while (fp->ipq_frags) {
1195 		q = fp->ipq_frags;
1196 		fp->ipq_frags = q->m_nextpkt;
1197 		m_freem(q);
1198 	}
1199 	TAILQ_REMOVE(fhp, fp, ipq_list);
1200 	uma_zfree(V_ipq_zone, fp);
1201 	V_nipq--;
1202 }
1203 
1204 /*
1205  * IP timer processing;
1206  * if a timer expires on a reassembly
1207  * queue, discard it.
1208  */
1209 void
1210 ip_slowtimo(void)
1211 {
1212 	VNET_ITERATOR_DECL(vnet_iter);
1213 	struct ipq *fp;
1214 	int i;
1215 
1216 	VNET_LIST_RLOCK_NOSLEEP();
1217 	IPQ_LOCK();
1218 	VNET_FOREACH(vnet_iter) {
1219 		CURVNET_SET(vnet_iter);
1220 		for (i = 0; i < IPREASS_NHASH; i++) {
1221 			for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
1222 				struct ipq *fpp;
1223 
1224 				fpp = fp;
1225 				fp = TAILQ_NEXT(fp, ipq_list);
1226 				if(--fpp->ipq_ttl == 0) {
1227 					IPSTAT_ADD(ips_fragtimeout,
1228 					    fpp->ipq_nfrags);
1229 					ip_freef(&V_ipq[i], fpp);
1230 				}
1231 			}
1232 		}
1233 		/*
1234 		 * If we are over the maximum number of fragments
1235 		 * (due to the limit being lowered), drain off
1236 		 * enough to get down to the new limit.
1237 		 */
1238 		if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
1239 			for (i = 0; i < IPREASS_NHASH; i++) {
1240 				while (V_nipq > V_maxnipq &&
1241 				    !TAILQ_EMPTY(&V_ipq[i])) {
1242 					IPSTAT_ADD(ips_fragdropped,
1243 					    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1244 					ip_freef(&V_ipq[i],
1245 					    TAILQ_FIRST(&V_ipq[i]));
1246 				}
1247 			}
1248 		}
1249 		CURVNET_RESTORE();
1250 	}
1251 	IPQ_UNLOCK();
1252 	VNET_LIST_RUNLOCK_NOSLEEP();
1253 }
1254 
1255 /*
1256  * Drain off all datagram fragments.
1257  */
1258 static void
1259 ip_drain_locked(void)
1260 {
1261 	int     i;
1262 
1263 	IPQ_LOCK_ASSERT();
1264 
1265 	for (i = 0; i < IPREASS_NHASH; i++) {
1266 		while(!TAILQ_EMPTY(&V_ipq[i])) {
1267 			IPSTAT_ADD(ips_fragdropped,
1268 			    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1269 			ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
1270 		}
1271 	}
1272 }
1273 
1274 void
1275 ip_drain(void)
1276 {
1277 	VNET_ITERATOR_DECL(vnet_iter);
1278 
1279 	VNET_LIST_RLOCK_NOSLEEP();
1280 	IPQ_LOCK();
1281 	VNET_FOREACH(vnet_iter) {
1282 		CURVNET_SET(vnet_iter);
1283 		ip_drain_locked();
1284 		CURVNET_RESTORE();
1285 	}
1286 	IPQ_UNLOCK();
1287 	VNET_LIST_RUNLOCK_NOSLEEP();
1288 	in_rtqdrain();
1289 }
1290 
1291 /*
1292  * The protocol to be inserted into ip_protox[] must be already registered
1293  * in inetsw[], either statically or through pf_proto_register().
1294  */
1295 int
1296 ipproto_register(u_char ipproto)
1297 {
1298 	struct protosw *pr;
1299 
1300 	/* Sanity checks. */
1301 	if (ipproto == 0)
1302 		return (EPROTONOSUPPORT);
1303 
1304 	/*
1305 	 * The protocol slot must not be occupied by another protocol
1306 	 * already.  An index pointing to IPPROTO_RAW is unused.
1307 	 */
1308 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1309 	if (pr == NULL)
1310 		return (EPFNOSUPPORT);
1311 	if (ip_protox[ipproto] != pr - inetsw)	/* IPPROTO_RAW */
1312 		return (EEXIST);
1313 
1314 	/* Find the protocol position in inetsw[] and set the index. */
1315 	for (pr = inetdomain.dom_protosw;
1316 	     pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1317 		if (pr->pr_domain->dom_family == PF_INET &&
1318 		    pr->pr_protocol && pr->pr_protocol == ipproto) {
1319 			/* Be careful to only index valid IP protocols. */
1320 			if (pr->pr_protocol < IPPROTO_MAX) {
1321 				ip_protox[pr->pr_protocol] = pr - inetsw;
1322 				return (0);
1323 			} else
1324 				return (EINVAL);
1325 		}
1326 	}
1327 	return (EPROTONOSUPPORT);
1328 }
1329 
1330 int
1331 ipproto_unregister(u_char ipproto)
1332 {
1333 	struct protosw *pr;
1334 
1335 	/* Sanity checks. */
1336 	if (ipproto == 0)
1337 		return (EPROTONOSUPPORT);
1338 
1339 	/* Check if the protocol was indeed registered. */
1340 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1341 	if (pr == NULL)
1342 		return (EPFNOSUPPORT);
1343 	if (ip_protox[ipproto] == pr - inetsw)  /* IPPROTO_RAW */
1344 		return (ENOENT);
1345 
1346 	/* Reset the protocol slot to IPPROTO_RAW. */
1347 	ip_protox[ipproto] = pr - inetsw;
1348 	return (0);
1349 }
1350 
1351 /*
1352  * Given address of next destination (final or next hop), return (referenced)
1353  * internet address info of interface to be used to get there.
1354  */
1355 struct in_ifaddr *
1356 ip_rtaddr(struct in_addr dst, u_int fibnum)
1357 {
1358 	struct route sro;
1359 	struct sockaddr_in *sin;
1360 	struct in_ifaddr *ia;
1361 
1362 	bzero(&sro, sizeof(sro));
1363 	sin = (struct sockaddr_in *)&sro.ro_dst;
1364 	sin->sin_family = AF_INET;
1365 	sin->sin_len = sizeof(*sin);
1366 	sin->sin_addr = dst;
1367 	in_rtalloc_ign(&sro, 0, fibnum);
1368 
1369 	if (sro.ro_rt == NULL)
1370 		return (NULL);
1371 
1372 	ia = ifatoia(sro.ro_rt->rt_ifa);
1373 	ifa_ref(&ia->ia_ifa);
1374 	RTFREE(sro.ro_rt);
1375 	return (ia);
1376 }
1377 
1378 u_char inetctlerrmap[PRC_NCMDS] = {
1379 	0,		0,		0,		0,
1380 	0,		EMSGSIZE,	EHOSTDOWN,	EHOSTUNREACH,
1381 	EHOSTUNREACH,	EHOSTUNREACH,	ECONNREFUSED,	ECONNREFUSED,
1382 	EMSGSIZE,	EHOSTUNREACH,	0,		0,
1383 	0,		0,		EHOSTUNREACH,	0,
1384 	ENOPROTOOPT,	ECONNREFUSED
1385 };
1386 
1387 /*
1388  * Forward a packet.  If some error occurs return the sender
1389  * an icmp packet.  Note we can't always generate a meaningful
1390  * icmp message because icmp doesn't have a large enough repertoire
1391  * of codes and types.
1392  *
1393  * If not forwarding, just drop the packet.  This could be confusing
1394  * if ipforwarding was zero but some routing protocol was advancing
1395  * us as a gateway to somewhere.  However, we must let the routing
1396  * protocol deal with that.
1397  *
1398  * The srcrt parameter indicates whether the packet is being forwarded
1399  * via a source route.
1400  */
1401 void
1402 ip_forward(struct mbuf *m, int srcrt)
1403 {
1404 	struct ip *ip = mtod(m, struct ip *);
1405 	struct in_ifaddr *ia;
1406 	struct mbuf *mcopy;
1407 	struct in_addr dest;
1408 	struct route ro;
1409 	int error, type = 0, code = 0, mtu = 0;
1410 
1411 	if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1412 		IPSTAT_INC(ips_cantforward);
1413 		m_freem(m);
1414 		return;
1415 	}
1416 #ifdef IPSTEALTH
1417 	if (!V_ipstealth) {
1418 #endif
1419 		if (ip->ip_ttl <= IPTTLDEC) {
1420 			icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1421 			    0, 0);
1422 			return;
1423 		}
1424 #ifdef IPSTEALTH
1425 	}
1426 #endif
1427 
1428 	ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
1429 #ifndef IPSEC
1430 	/*
1431 	 * 'ia' may be NULL if there is no route for this destination.
1432 	 * In case of IPsec, Don't discard it just yet, but pass it to
1433 	 * ip_output in case of outgoing IPsec policy.
1434 	 */
1435 	if (!srcrt && ia == NULL) {
1436 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1437 		return;
1438 	}
1439 #endif
1440 
1441 	/*
1442 	 * Save the IP header and at most 8 bytes of the payload,
1443 	 * in case we need to generate an ICMP message to the src.
1444 	 *
1445 	 * XXX this can be optimized a lot by saving the data in a local
1446 	 * buffer on the stack (72 bytes at most), and only allocating the
1447 	 * mbuf if really necessary. The vast majority of the packets
1448 	 * are forwarded without having to send an ICMP back (either
1449 	 * because unnecessary, or because rate limited), so we are
1450 	 * really we are wasting a lot of work here.
1451 	 *
1452 	 * We don't use m_copy() because it might return a reference
1453 	 * to a shared cluster. Both this function and ip_output()
1454 	 * assume exclusive access to the IP header in `m', so any
1455 	 * data in a cluster may change before we reach icmp_error().
1456 	 */
1457 	MGETHDR(mcopy, M_DONTWAIT, m->m_type);
1458 	if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) {
1459 		/*
1460 		 * It's probably ok if the pkthdr dup fails (because
1461 		 * the deep copy of the tag chain failed), but for now
1462 		 * be conservative and just discard the copy since
1463 		 * code below may some day want the tags.
1464 		 */
1465 		m_free(mcopy);
1466 		mcopy = NULL;
1467 	}
1468 	if (mcopy != NULL) {
1469 		mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy));
1470 		mcopy->m_pkthdr.len = mcopy->m_len;
1471 		m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1472 	}
1473 
1474 #ifdef IPSTEALTH
1475 	if (!V_ipstealth) {
1476 #endif
1477 		ip->ip_ttl -= IPTTLDEC;
1478 #ifdef IPSTEALTH
1479 	}
1480 #endif
1481 
1482 	/*
1483 	 * If forwarding packet using same interface that it came in on,
1484 	 * perhaps should send a redirect to sender to shortcut a hop.
1485 	 * Only send redirect if source is sending directly to us,
1486 	 * and if packet was not source routed (or has any options).
1487 	 * Also, don't send redirect if forwarding using a default route
1488 	 * or a route modified by a redirect.
1489 	 */
1490 	dest.s_addr = 0;
1491 	if (!srcrt && V_ipsendredirects &&
1492 	    ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) {
1493 		struct sockaddr_in *sin;
1494 		struct rtentry *rt;
1495 
1496 		bzero(&ro, sizeof(ro));
1497 		sin = (struct sockaddr_in *)&ro.ro_dst;
1498 		sin->sin_family = AF_INET;
1499 		sin->sin_len = sizeof(*sin);
1500 		sin->sin_addr = ip->ip_dst;
1501 		in_rtalloc_ign(&ro, 0, M_GETFIB(m));
1502 
1503 		rt = ro.ro_rt;
1504 
1505 		if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1506 		    satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1507 #define	RTA(rt)	((struct in_ifaddr *)(rt->rt_ifa))
1508 			u_long src = ntohl(ip->ip_src.s_addr);
1509 
1510 			if (RTA(rt) &&
1511 			    (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1512 				if (rt->rt_flags & RTF_GATEWAY)
1513 					dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1514 				else
1515 					dest.s_addr = ip->ip_dst.s_addr;
1516 				/* Router requirements says to only send host redirects */
1517 				type = ICMP_REDIRECT;
1518 				code = ICMP_REDIRECT_HOST;
1519 			}
1520 		}
1521 		if (rt)
1522 			RTFREE(rt);
1523 	}
1524 
1525 	/*
1526 	 * Try to cache the route MTU from ip_output so we can consider it for
1527 	 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191.
1528 	 */
1529 	bzero(&ro, sizeof(ro));
1530 
1531 	error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL);
1532 
1533 	if (error == EMSGSIZE && ro.ro_rt)
1534 		mtu = ro.ro_rt->rt_rmx.rmx_mtu;
1535 	if (ro.ro_rt)
1536 		RTFREE(ro.ro_rt);
1537 
1538 	if (error)
1539 		IPSTAT_INC(ips_cantforward);
1540 	else {
1541 		IPSTAT_INC(ips_forward);
1542 		if (type)
1543 			IPSTAT_INC(ips_redirectsent);
1544 		else {
1545 			if (mcopy)
1546 				m_freem(mcopy);
1547 			if (ia != NULL)
1548 				ifa_free(&ia->ia_ifa);
1549 			return;
1550 		}
1551 	}
1552 	if (mcopy == NULL) {
1553 		if (ia != NULL)
1554 			ifa_free(&ia->ia_ifa);
1555 		return;
1556 	}
1557 
1558 	switch (error) {
1559 
1560 	case 0:				/* forwarded, but need redirect */
1561 		/* type, code set above */
1562 		break;
1563 
1564 	case ENETUNREACH:
1565 	case EHOSTUNREACH:
1566 	case ENETDOWN:
1567 	case EHOSTDOWN:
1568 	default:
1569 		type = ICMP_UNREACH;
1570 		code = ICMP_UNREACH_HOST;
1571 		break;
1572 
1573 	case EMSGSIZE:
1574 		type = ICMP_UNREACH;
1575 		code = ICMP_UNREACH_NEEDFRAG;
1576 
1577 #ifdef IPSEC
1578 		/*
1579 		 * If IPsec is configured for this path,
1580 		 * override any possibly mtu value set by ip_output.
1581 		 */
1582 		mtu = ip_ipsec_mtu(mcopy, mtu);
1583 #endif /* IPSEC */
1584 		/*
1585 		 * If the MTU was set before make sure we are below the
1586 		 * interface MTU.
1587 		 * If the MTU wasn't set before use the interface mtu or
1588 		 * fall back to the next smaller mtu step compared to the
1589 		 * current packet size.
1590 		 */
1591 		if (mtu != 0) {
1592 			if (ia != NULL)
1593 				mtu = min(mtu, ia->ia_ifp->if_mtu);
1594 		} else {
1595 			if (ia != NULL)
1596 				mtu = ia->ia_ifp->if_mtu;
1597 			else
1598 				mtu = ip_next_mtu(ip->ip_len, 0);
1599 		}
1600 		IPSTAT_INC(ips_cantfrag);
1601 		break;
1602 
1603 	case ENOBUFS:
1604 		/*
1605 		 * A router should not generate ICMP_SOURCEQUENCH as
1606 		 * required in RFC1812 Requirements for IP Version 4 Routers.
1607 		 * Source quench could be a big problem under DoS attacks,
1608 		 * or if the underlying interface is rate-limited.
1609 		 * Those who need source quench packets may re-enable them
1610 		 * via the net.inet.ip.sendsourcequench sysctl.
1611 		 */
1612 		if (V_ip_sendsourcequench == 0) {
1613 			m_freem(mcopy);
1614 			if (ia != NULL)
1615 				ifa_free(&ia->ia_ifa);
1616 			return;
1617 		} else {
1618 			type = ICMP_SOURCEQUENCH;
1619 			code = 0;
1620 		}
1621 		break;
1622 
1623 	case EACCES:			/* ipfw denied packet */
1624 		m_freem(mcopy);
1625 		if (ia != NULL)
1626 			ifa_free(&ia->ia_ifa);
1627 		return;
1628 	}
1629 	if (ia != NULL)
1630 		ifa_free(&ia->ia_ifa);
1631 	icmp_error(mcopy, type, code, dest.s_addr, mtu);
1632 }
1633 
1634 void
1635 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1636     struct mbuf *m)
1637 {
1638 
1639 	if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1640 		struct bintime bt;
1641 
1642 		bintime(&bt);
1643 		if (inp->inp_socket->so_options & SO_BINTIME) {
1644 			*mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt),
1645 			SCM_BINTIME, SOL_SOCKET);
1646 			if (*mp)
1647 				mp = &(*mp)->m_next;
1648 		}
1649 		if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1650 			struct timeval tv;
1651 
1652 			bintime2timeval(&bt, &tv);
1653 			*mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
1654 				SCM_TIMESTAMP, SOL_SOCKET);
1655 			if (*mp)
1656 				mp = &(*mp)->m_next;
1657 		}
1658 	}
1659 	if (inp->inp_flags & INP_RECVDSTADDR) {
1660 		*mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
1661 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1662 		if (*mp)
1663 			mp = &(*mp)->m_next;
1664 	}
1665 	if (inp->inp_flags & INP_RECVTTL) {
1666 		*mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
1667 		    sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1668 		if (*mp)
1669 			mp = &(*mp)->m_next;
1670 	}
1671 #ifdef notyet
1672 	/* XXX
1673 	 * Moving these out of udp_input() made them even more broken
1674 	 * than they already were.
1675 	 */
1676 	/* options were tossed already */
1677 	if (inp->inp_flags & INP_RECVOPTS) {
1678 		*mp = sbcreatecontrol((caddr_t) opts_deleted_above,
1679 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1680 		if (*mp)
1681 			mp = &(*mp)->m_next;
1682 	}
1683 	/* ip_srcroute doesn't do what we want here, need to fix */
1684 	if (inp->inp_flags & INP_RECVRETOPTS) {
1685 		*mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
1686 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1687 		if (*mp)
1688 			mp = &(*mp)->m_next;
1689 	}
1690 #endif
1691 	if (inp->inp_flags & INP_RECVIF) {
1692 		struct ifnet *ifp;
1693 		struct sdlbuf {
1694 			struct sockaddr_dl sdl;
1695 			u_char	pad[32];
1696 		} sdlbuf;
1697 		struct sockaddr_dl *sdp;
1698 		struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1699 
1700 		if (((ifp = m->m_pkthdr.rcvif))
1701 		&& ( ifp->if_index && (ifp->if_index <= V_if_index))) {
1702 			sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1703 			/*
1704 			 * Change our mind and don't try copy.
1705 			 */
1706 			if ((sdp->sdl_family != AF_LINK)
1707 			|| (sdp->sdl_len > sizeof(sdlbuf))) {
1708 				goto makedummy;
1709 			}
1710 			bcopy(sdp, sdl2, sdp->sdl_len);
1711 		} else {
1712 makedummy:
1713 			sdl2->sdl_len
1714 				= offsetof(struct sockaddr_dl, sdl_data[0]);
1715 			sdl2->sdl_family = AF_LINK;
1716 			sdl2->sdl_index = 0;
1717 			sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1718 		}
1719 		*mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
1720 			IP_RECVIF, IPPROTO_IP);
1721 		if (*mp)
1722 			mp = &(*mp)->m_next;
1723 	}
1724 }
1725 
1726 /*
1727  * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the
1728  * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on
1729  * locking.  This code remains in ip_input.c as ip_mroute.c is optionally
1730  * compiled.
1731  */
1732 static VNET_DEFINE(int, ip_rsvp_on);
1733 VNET_DEFINE(struct socket *, ip_rsvpd);
1734 
1735 #define	V_ip_rsvp_on		VNET(ip_rsvp_on)
1736 
1737 int
1738 ip_rsvp_init(struct socket *so)
1739 {
1740 
1741 	if (so->so_type != SOCK_RAW ||
1742 	    so->so_proto->pr_protocol != IPPROTO_RSVP)
1743 		return EOPNOTSUPP;
1744 
1745 	if (V_ip_rsvpd != NULL)
1746 		return EADDRINUSE;
1747 
1748 	V_ip_rsvpd = so;
1749 	/*
1750 	 * This may seem silly, but we need to be sure we don't over-increment
1751 	 * the RSVP counter, in case something slips up.
1752 	 */
1753 	if (!V_ip_rsvp_on) {
1754 		V_ip_rsvp_on = 1;
1755 		V_rsvp_on++;
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 int
1762 ip_rsvp_done(void)
1763 {
1764 
1765 	V_ip_rsvpd = NULL;
1766 	/*
1767 	 * This may seem silly, but we need to be sure we don't over-decrement
1768 	 * the RSVP counter, in case something slips up.
1769 	 */
1770 	if (V_ip_rsvp_on) {
1771 		V_ip_rsvp_on = 0;
1772 		V_rsvp_on--;
1773 	}
1774 	return 0;
1775 }
1776 
1777 void
1778 rsvp_input(struct mbuf *m, int off)	/* XXX must fixup manually */
1779 {
1780 
1781 	if (rsvp_input_p) { /* call the real one if loaded */
1782 		rsvp_input_p(m, off);
1783 		return;
1784 	}
1785 
1786 	/* Can still get packets with rsvp_on = 0 if there is a local member
1787 	 * of the group to which the RSVP packet is addressed.  But in this
1788 	 * case we want to throw the packet away.
1789 	 */
1790 
1791 	if (!V_rsvp_on) {
1792 		m_freem(m);
1793 		return;
1794 	}
1795 
1796 	if (V_ip_rsvpd != NULL) {
1797 		rip_input(m, off);
1798 		return;
1799 	}
1800 	/* Drop the packet */
1801 	m_freem(m);
1802 }
1803