xref: /freebsd/sys/netinet/ip_input.c (revision 1ab64a6127a184af40fdb4c13a39019a76a57216)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_bootp.h"
36 #include "opt_ipfw.h"
37 #include "opt_ipstealth.h"
38 #include "opt_ipsec.h"
39 #include "opt_route.h"
40 #include "opt_rss.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/domain.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/time.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/syslog.h>
55 #include <sys/sysctl.h>
56 
57 #include <net/pfil.h>
58 #include <net/if.h>
59 #include <net/if_types.h>
60 #include <net/if_var.h>
61 #include <net/if_dl.h>
62 #include <net/route.h>
63 #include <net/netisr.h>
64 #include <net/rss_config.h>
65 #include <net/vnet.h>
66 
67 #include <netinet/in.h>
68 #include <netinet/in_kdtrace.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/in_var.h>
71 #include <netinet/ip.h>
72 #include <netinet/in_pcb.h>
73 #include <netinet/ip_var.h>
74 #include <netinet/ip_fw.h>
75 #include <netinet/ip_icmp.h>
76 #include <netinet/ip_options.h>
77 #include <machine/in_cksum.h>
78 #include <netinet/ip_carp.h>
79 #ifdef IPSEC
80 #include <netinet/ip_ipsec.h>
81 #endif /* IPSEC */
82 #include <netinet/in_rss.h>
83 
84 #include <sys/socketvar.h>
85 
86 #include <security/mac/mac_framework.h>
87 
88 #ifdef CTASSERT
89 CTASSERT(sizeof(struct ip) == 20);
90 #endif
91 
92 struct	rwlock in_ifaddr_lock;
93 RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
94 
95 VNET_DEFINE(int, rsvp_on);
96 
97 VNET_DEFINE(int, ipforwarding);
98 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_VNET | CTLFLAG_RW,
99     &VNET_NAME(ipforwarding), 0,
100     "Enable IP forwarding between interfaces");
101 
102 static VNET_DEFINE(int, ipsendredirects) = 1;	/* XXX */
103 #define	V_ipsendredirects	VNET(ipsendredirects)
104 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_VNET | CTLFLAG_RW,
105     &VNET_NAME(ipsendredirects), 0,
106     "Enable sending IP redirects");
107 
108 /*
109  * XXX - Setting ip_checkinterface mostly implements the receive side of
110  * the Strong ES model described in RFC 1122, but since the routing table
111  * and transmit implementation do not implement the Strong ES model,
112  * setting this to 1 results in an odd hybrid.
113  *
114  * XXX - ip_checkinterface currently must be disabled if you use ipnat
115  * to translate the destination address to another local interface.
116  *
117  * XXX - ip_checkinterface must be disabled if you add IP aliases
118  * to the loopback interface instead of the interface where the
119  * packets for those addresses are received.
120  */
121 static VNET_DEFINE(int, ip_checkinterface);
122 #define	V_ip_checkinterface	VNET(ip_checkinterface)
123 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_VNET | CTLFLAG_RW,
124     &VNET_NAME(ip_checkinterface), 0,
125     "Verify packet arrives on correct interface");
126 
127 VNET_DEFINE(struct pfil_head, inet_pfil_hook);	/* Packet filter hooks */
128 
129 static struct netisr_handler ip_nh = {
130 	.nh_name = "ip",
131 	.nh_handler = ip_input,
132 	.nh_proto = NETISR_IP,
133 #ifdef	RSS
134 	.nh_m2cpuid = rss_soft_m2cpuid,
135 	.nh_policy = NETISR_POLICY_CPU,
136 	.nh_dispatch = NETISR_DISPATCH_HYBRID,
137 #else
138 	.nh_policy = NETISR_POLICY_FLOW,
139 #endif
140 };
141 
142 #ifdef	RSS
143 /*
144  * Directly dispatched frames are currently assumed
145  * to have a flowid already calculated.
146  *
147  * It should likely have something that assert it
148  * actually has valid flow details.
149  */
150 static struct netisr_handler ip_direct_nh = {
151 	.nh_name = "ip_direct",
152 	.nh_handler = ip_direct_input,
153 	.nh_proto = NETISR_IP_DIRECT,
154 	.nh_m2cpuid = rss_m2cpuid,
155 	.nh_policy = NETISR_POLICY_CPU,
156 	.nh_dispatch = NETISR_DISPATCH_HYBRID,
157 };
158 #endif
159 
160 extern	struct domain inetdomain;
161 extern	struct protosw inetsw[];
162 u_char	ip_protox[IPPROTO_MAX];
163 VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead);  /* first inet address */
164 VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table  */
165 VNET_DEFINE(u_long, in_ifaddrhmask);		/* mask for hash table */
166 
167 static VNET_DEFINE(uma_zone_t, ipq_zone);
168 static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
169 static struct mtx_padalign ipqlock[IPREASS_NHASH];
170 
171 #define	V_ipq_zone		VNET(ipq_zone)
172 #define	V_ipq			VNET(ipq)
173 
174 /*
175  * The ipqlock array is global, /not/ per-VNET.
176  */
177 #define	IPQ_LOCK(i)	mtx_lock(&ipqlock[(i)])
178 #define	IPQ_UNLOCK(i)	mtx_unlock(&ipqlock[(i)])
179 #define	IPQ_LOCK_INIT(i)	mtx_init(&ipqlock[(i)], "ipqlock", NULL, MTX_DEF)
180 #define	IPQ_LOCK_ASSERT(i)	mtx_assert(&ipqlock[(i)], MA_OWNED)
181 
182 static void	maxnipq_update(void);
183 static void	ipq_zone_change(void *);
184 static void	ip_drain_locked(void);
185 
186 static VNET_DEFINE(int, maxnipq);  /* Administrative limit on # reass queues. */
187 static VNET_DEFINE(int, nipq);			/* Total # of reass queues */
188 #define	V_maxnipq		VNET(maxnipq)
189 #define	V_nipq			VNET(nipq)
190 SYSCTL_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET | CTLFLAG_RD,
191     &VNET_NAME(nipq), 0,
192     "Current number of IPv4 fragment reassembly queue entries");
193 
194 static VNET_DEFINE(int, maxfragsperpacket);
195 #define	V_maxfragsperpacket	VNET(maxfragsperpacket)
196 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
197     &VNET_NAME(maxfragsperpacket), 0,
198     "Maximum number of IPv4 fragments allowed per packet");
199 
200 #ifdef IPCTL_DEFMTU
201 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
202     &ip_mtu, 0, "Default MTU");
203 #endif
204 
205 #ifdef IPSTEALTH
206 VNET_DEFINE(int, ipstealth);
207 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_VNET | CTLFLAG_RW,
208     &VNET_NAME(ipstealth), 0,
209     "IP stealth mode, no TTL decrementation on forwarding");
210 #endif
211 
212 static void	ip_freef(struct ipqhead *, int, struct ipq *);
213 
214 /*
215  * IP statistics are stored in the "array" of counter(9)s.
216  */
217 VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat);
218 VNET_PCPUSTAT_SYSINIT(ipstat);
219 SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat,
220     "IP statistics (struct ipstat, netinet/ip_var.h)");
221 
222 #ifdef VIMAGE
223 VNET_PCPUSTAT_SYSUNINIT(ipstat);
224 #endif /* VIMAGE */
225 
226 /*
227  * Kernel module interface for updating ipstat.  The argument is an index
228  * into ipstat treated as an array.
229  */
230 void
231 kmod_ipstat_inc(int statnum)
232 {
233 
234 	counter_u64_add(VNET(ipstat)[statnum], 1);
235 }
236 
237 void
238 kmod_ipstat_dec(int statnum)
239 {
240 
241 	counter_u64_add(VNET(ipstat)[statnum], -1);
242 }
243 
244 static int
245 sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
246 {
247 	int error, qlimit;
248 
249 	netisr_getqlimit(&ip_nh, &qlimit);
250 	error = sysctl_handle_int(oidp, &qlimit, 0, req);
251 	if (error || !req->newptr)
252 		return (error);
253 	if (qlimit < 1)
254 		return (EINVAL);
255 	return (netisr_setqlimit(&ip_nh, qlimit));
256 }
257 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen,
258     CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I",
259     "Maximum size of the IP input queue");
260 
261 static int
262 sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS)
263 {
264 	u_int64_t qdrops_long;
265 	int error, qdrops;
266 
267 	netisr_getqdrops(&ip_nh, &qdrops_long);
268 	qdrops = qdrops_long;
269 	error = sysctl_handle_int(oidp, &qdrops, 0, req);
270 	if (error || !req->newptr)
271 		return (error);
272 	if (qdrops != 0)
273 		return (EINVAL);
274 	netisr_clearqdrops(&ip_nh);
275 	return (0);
276 }
277 
278 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops,
279     CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I",
280     "Number of packets dropped from the IP input queue");
281 
282 #ifdef	RSS
283 static int
284 sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS)
285 {
286 	int error, qlimit;
287 
288 	netisr_getqlimit(&ip_direct_nh, &qlimit);
289 	error = sysctl_handle_int(oidp, &qlimit, 0, req);
290 	if (error || !req->newptr)
291 		return (error);
292 	if (qlimit < 1)
293 		return (EINVAL);
294 	return (netisr_setqlimit(&ip_direct_nh, qlimit));
295 }
296 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_direct_queue_maxlen,
297     CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_direct_queue_maxlen, "I",
298     "Maximum size of the IP direct input queue");
299 
300 static int
301 sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS)
302 {
303 	u_int64_t qdrops_long;
304 	int error, qdrops;
305 
306 	netisr_getqdrops(&ip_direct_nh, &qdrops_long);
307 	qdrops = qdrops_long;
308 	error = sysctl_handle_int(oidp, &qdrops, 0, req);
309 	if (error || !req->newptr)
310 		return (error);
311 	if (qdrops != 0)
312 		return (EINVAL);
313 	netisr_clearqdrops(&ip_direct_nh);
314 	return (0);
315 }
316 
317 SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_direct_queue_drops,
318     CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_direct_queue_drops, "I",
319     "Number of packets dropped from the IP direct input queue");
320 #endif	/* RSS */
321 
322 /*
323  * IP initialization: fill in IP protocol switch table.
324  * All protocols not implemented in kernel go to raw IP protocol handler.
325  */
326 void
327 ip_init(void)
328 {
329 	struct protosw *pr;
330 	int i;
331 
332 	TAILQ_INIT(&V_in_ifaddrhead);
333 	V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
334 
335 	/* Initialize IP reassembly queue. */
336 	for (i = 0; i < IPREASS_NHASH; i++)
337 		TAILQ_INIT(&V_ipq[i]);
338 	V_maxnipq = nmbclusters / 32;
339 	V_maxfragsperpacket = 16;
340 	V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
341 	    NULL, UMA_ALIGN_PTR, 0);
342 	maxnipq_update();
343 
344 	/* Initialize packet filter hooks. */
345 	V_inet_pfil_hook.ph_type = PFIL_TYPE_AF;
346 	V_inet_pfil_hook.ph_af = AF_INET;
347 	if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0)
348 		printf("%s: WARNING: unable to register pfil hook, "
349 			"error %d\n", __func__, i);
350 
351 	/* Skip initialization of globals for non-default instances. */
352 	if (!IS_DEFAULT_VNET(curvnet))
353 		return;
354 
355 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
356 	if (pr == NULL)
357 		panic("ip_init: PF_INET not found");
358 
359 	/* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
360 	for (i = 0; i < IPPROTO_MAX; i++)
361 		ip_protox[i] = pr - inetsw;
362 	/*
363 	 * Cycle through IP protocols and put them into the appropriate place
364 	 * in ip_protox[].
365 	 */
366 	for (pr = inetdomain.dom_protosw;
367 	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
368 		if (pr->pr_domain->dom_family == PF_INET &&
369 		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
370 			/* Be careful to only index valid IP protocols. */
371 			if (pr->pr_protocol < IPPROTO_MAX)
372 				ip_protox[pr->pr_protocol] = pr - inetsw;
373 		}
374 
375 	EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
376 		NULL, EVENTHANDLER_PRI_ANY);
377 
378 	/* Initialize various other remaining things. */
379 	for (i = 0; i < IPREASS_NHASH; i++)
380 		IPQ_LOCK_INIT(i);
381 	netisr_register(&ip_nh);
382 #ifdef	RSS
383 	netisr_register(&ip_direct_nh);
384 #endif
385 }
386 
387 #ifdef VIMAGE
388 void
389 ip_destroy(void)
390 {
391 	int i;
392 
393 	if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0)
394 		printf("%s: WARNING: unable to unregister pfil hook, "
395 		    "error %d\n", __func__, i);
396 
397 	/* Cleanup in_ifaddr hash table; should be empty. */
398 	hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask);
399 
400 	ip_drain_locked();
401 
402 	uma_zdestroy(V_ipq_zone);
403 }
404 #endif
405 
406 #ifdef	RSS
407 /*
408  * IP direct input routine.
409  *
410  * This is called when reinjecting completed fragments where
411  * all of the previous checking and book-keeping has been done.
412  */
413 void
414 ip_direct_input(struct mbuf *m)
415 {
416 	struct ip *ip;
417 	int hlen;
418 
419 	ip = mtod(m, struct ip *);
420 	hlen = ip->ip_hl << 2;
421 
422 	IPSTAT_INC(ips_delivered);
423 	(*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p);
424 	return;
425 }
426 #endif
427 
428 /*
429  * Ip input routine.  Checksum and byte swap header.  If fragmented
430  * try to reassemble.  Process options.  Pass to next level.
431  */
432 void
433 ip_input(struct mbuf *m)
434 {
435 	struct ip *ip = NULL;
436 	struct in_ifaddr *ia = NULL;
437 	struct ifaddr *ifa;
438 	struct ifnet *ifp;
439 	int    checkif, hlen = 0;
440 	uint16_t sum, ip_len;
441 	int dchg = 0;				/* dest changed after fw */
442 	struct in_addr odst;			/* original dst address */
443 
444 	M_ASSERTPKTHDR(m);
445 
446 	if (m->m_flags & M_FASTFWD_OURS) {
447 		m->m_flags &= ~M_FASTFWD_OURS;
448 		/* Set up some basics that will be used later. */
449 		ip = mtod(m, struct ip *);
450 		hlen = ip->ip_hl << 2;
451 		ip_len = ntohs(ip->ip_len);
452 		goto ours;
453 	}
454 
455 	IPSTAT_INC(ips_total);
456 
457 	if (m->m_pkthdr.len < sizeof(struct ip))
458 		goto tooshort;
459 
460 	if (m->m_len < sizeof (struct ip) &&
461 	    (m = m_pullup(m, sizeof (struct ip))) == NULL) {
462 		IPSTAT_INC(ips_toosmall);
463 		return;
464 	}
465 	ip = mtod(m, struct ip *);
466 
467 	if (ip->ip_v != IPVERSION) {
468 		IPSTAT_INC(ips_badvers);
469 		goto bad;
470 	}
471 
472 	hlen = ip->ip_hl << 2;
473 	if (hlen < sizeof(struct ip)) {	/* minimum header length */
474 		IPSTAT_INC(ips_badhlen);
475 		goto bad;
476 	}
477 	if (hlen > m->m_len) {
478 		if ((m = m_pullup(m, hlen)) == NULL) {
479 			IPSTAT_INC(ips_badhlen);
480 			return;
481 		}
482 		ip = mtod(m, struct ip *);
483 	}
484 
485 	IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL);
486 
487 	/* 127/8 must not appear on wire - RFC1122 */
488 	ifp = m->m_pkthdr.rcvif;
489 	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
490 	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
491 		if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
492 			IPSTAT_INC(ips_badaddr);
493 			goto bad;
494 		}
495 	}
496 
497 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
498 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
499 	} else {
500 		if (hlen == sizeof(struct ip)) {
501 			sum = in_cksum_hdr(ip);
502 		} else {
503 			sum = in_cksum(m, hlen);
504 		}
505 	}
506 	if (sum) {
507 		IPSTAT_INC(ips_badsum);
508 		goto bad;
509 	}
510 
511 #ifdef ALTQ
512 	if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
513 		/* packet is dropped by traffic conditioner */
514 		return;
515 #endif
516 
517 	ip_len = ntohs(ip->ip_len);
518 	if (ip_len < hlen) {
519 		IPSTAT_INC(ips_badlen);
520 		goto bad;
521 	}
522 
523 	/*
524 	 * Check that the amount of data in the buffers
525 	 * is as at least much as the IP header would have us expect.
526 	 * Trim mbufs if longer than we expect.
527 	 * Drop packet if shorter than we expect.
528 	 */
529 	if (m->m_pkthdr.len < ip_len) {
530 tooshort:
531 		IPSTAT_INC(ips_tooshort);
532 		goto bad;
533 	}
534 	if (m->m_pkthdr.len > ip_len) {
535 		if (m->m_len == m->m_pkthdr.len) {
536 			m->m_len = ip_len;
537 			m->m_pkthdr.len = ip_len;
538 		} else
539 			m_adj(m, ip_len - m->m_pkthdr.len);
540 	}
541 
542 #ifdef IPSEC
543 	/*
544 	 * Bypass packet filtering for packets previously handled by IPsec.
545 	 */
546 	if (ip_ipsec_filtertunnel(m))
547 		goto passin;
548 #endif /* IPSEC */
549 
550 	/*
551 	 * Run through list of hooks for input packets.
552 	 *
553 	 * NB: Beware of the destination address changing (e.g.
554 	 *     by NAT rewriting).  When this happens, tell
555 	 *     ip_forward to do the right thing.
556 	 */
557 
558 	/* Jump over all PFIL processing if hooks are not active. */
559 	if (!PFIL_HOOKED(&V_inet_pfil_hook))
560 		goto passin;
561 
562 	odst = ip->ip_dst;
563 	if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0)
564 		return;
565 	if (m == NULL)			/* consumed by filter */
566 		return;
567 
568 	ip = mtod(m, struct ip *);
569 	dchg = (odst.s_addr != ip->ip_dst.s_addr);
570 	ifp = m->m_pkthdr.rcvif;
571 
572 	if (m->m_flags & M_FASTFWD_OURS) {
573 		m->m_flags &= ~M_FASTFWD_OURS;
574 		goto ours;
575 	}
576 	if (m->m_flags & M_IP_NEXTHOP) {
577 		dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL);
578 		if (dchg != 0) {
579 			/*
580 			 * Directly ship the packet on.  This allows
581 			 * forwarding packets originally destined to us
582 			 * to some other directly connected host.
583 			 */
584 			ip_forward(m, 1);
585 			return;
586 		}
587 	}
588 passin:
589 
590 	/*
591 	 * Process options and, if not destined for us,
592 	 * ship it on.  ip_dooptions returns 1 when an
593 	 * error was detected (causing an icmp message
594 	 * to be sent and the original packet to be freed).
595 	 */
596 	if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
597 		return;
598 
599         /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
600          * matter if it is destined to another node, or whether it is
601          * a multicast one, RSVP wants it! and prevents it from being forwarded
602          * anywhere else. Also checks if the rsvp daemon is running before
603 	 * grabbing the packet.
604          */
605 	if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
606 		goto ours;
607 
608 	/*
609 	 * Check our list of addresses, to see if the packet is for us.
610 	 * If we don't have any addresses, assume any unicast packet
611 	 * we receive might be for us (and let the upper layers deal
612 	 * with it).
613 	 */
614 	if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
615 	    (m->m_flags & (M_MCAST|M_BCAST)) == 0)
616 		goto ours;
617 
618 	/*
619 	 * Enable a consistency check between the destination address
620 	 * and the arrival interface for a unicast packet (the RFC 1122
621 	 * strong ES model) if IP forwarding is disabled and the packet
622 	 * is not locally generated and the packet is not subject to
623 	 * 'ipfw fwd'.
624 	 *
625 	 * XXX - Checking also should be disabled if the destination
626 	 * address is ipnat'ed to a different interface.
627 	 *
628 	 * XXX - Checking is incompatible with IP aliases added
629 	 * to the loopback interface instead of the interface where
630 	 * the packets are received.
631 	 *
632 	 * XXX - This is the case for carp vhost IPs as well so we
633 	 * insert a workaround. If the packet got here, we already
634 	 * checked with carp_iamatch() and carp_forus().
635 	 */
636 	checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
637 	    ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
638 	    ifp->if_carp == NULL && (dchg == 0);
639 
640 	/*
641 	 * Check for exact addresses in the hash bucket.
642 	 */
643 	/* IN_IFADDR_RLOCK(); */
644 	LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
645 		/*
646 		 * If the address matches, verify that the packet
647 		 * arrived via the correct interface if checking is
648 		 * enabled.
649 		 */
650 		if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
651 		    (!checkif || ia->ia_ifp == ifp)) {
652 			counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
653 			counter_u64_add(ia->ia_ifa.ifa_ibytes,
654 			    m->m_pkthdr.len);
655 			/* IN_IFADDR_RUNLOCK(); */
656 			goto ours;
657 		}
658 	}
659 	/* IN_IFADDR_RUNLOCK(); */
660 
661 	/*
662 	 * Check for broadcast addresses.
663 	 *
664 	 * Only accept broadcast packets that arrive via the matching
665 	 * interface.  Reception of forwarded directed broadcasts would
666 	 * be handled via ip_forward() and ether_output() with the loopback
667 	 * into the stack for SIMPLEX interfaces handled by ether_output().
668 	 */
669 	if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
670 		IF_ADDR_RLOCK(ifp);
671 	        TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
672 			if (ifa->ifa_addr->sa_family != AF_INET)
673 				continue;
674 			ia = ifatoia(ifa);
675 			if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
676 			    ip->ip_dst.s_addr) {
677 				counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
678 				counter_u64_add(ia->ia_ifa.ifa_ibytes,
679 				    m->m_pkthdr.len);
680 				IF_ADDR_RUNLOCK(ifp);
681 				goto ours;
682 			}
683 #ifdef BOOTP_COMPAT
684 			if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
685 				counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
686 				counter_u64_add(ia->ia_ifa.ifa_ibytes,
687 				    m->m_pkthdr.len);
688 				IF_ADDR_RUNLOCK(ifp);
689 				goto ours;
690 			}
691 #endif
692 		}
693 		IF_ADDR_RUNLOCK(ifp);
694 		ia = NULL;
695 	}
696 	/* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
697 	if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
698 		IPSTAT_INC(ips_cantforward);
699 		m_freem(m);
700 		return;
701 	}
702 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
703 		if (V_ip_mrouter) {
704 			/*
705 			 * If we are acting as a multicast router, all
706 			 * incoming multicast packets are passed to the
707 			 * kernel-level multicast forwarding function.
708 			 * The packet is returned (relatively) intact; if
709 			 * ip_mforward() returns a non-zero value, the packet
710 			 * must be discarded, else it may be accepted below.
711 			 */
712 			if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) {
713 				IPSTAT_INC(ips_cantforward);
714 				m_freem(m);
715 				return;
716 			}
717 
718 			/*
719 			 * The process-level routing daemon needs to receive
720 			 * all multicast IGMP packets, whether or not this
721 			 * host belongs to their destination groups.
722 			 */
723 			if (ip->ip_p == IPPROTO_IGMP)
724 				goto ours;
725 			IPSTAT_INC(ips_forward);
726 		}
727 		/*
728 		 * Assume the packet is for us, to avoid prematurely taking
729 		 * a lock on the in_multi hash. Protocols must perform
730 		 * their own filtering and update statistics accordingly.
731 		 */
732 		goto ours;
733 	}
734 	if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
735 		goto ours;
736 	if (ip->ip_dst.s_addr == INADDR_ANY)
737 		goto ours;
738 
739 	/*
740 	 * Not for us; forward if possible and desirable.
741 	 */
742 	if (V_ipforwarding == 0) {
743 		IPSTAT_INC(ips_cantforward);
744 		m_freem(m);
745 	} else {
746 		ip_forward(m, dchg);
747 	}
748 	return;
749 
750 ours:
751 #ifdef IPSTEALTH
752 	/*
753 	 * IPSTEALTH: Process non-routing options only
754 	 * if the packet is destined for us.
755 	 */
756 	if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1))
757 		return;
758 #endif /* IPSTEALTH */
759 
760 	/*
761 	 * Attempt reassembly; if it succeeds, proceed.
762 	 * ip_reass() will return a different mbuf.
763 	 */
764 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) {
765 		/* XXXGL: shouldn't we save & set m_flags? */
766 		m = ip_reass(m);
767 		if (m == NULL)
768 			return;
769 		ip = mtod(m, struct ip *);
770 		/* Get the header length of the reassembled packet */
771 		hlen = ip->ip_hl << 2;
772 	}
773 
774 #ifdef IPSEC
775 	/*
776 	 * enforce IPsec policy checking if we are seeing last header.
777 	 * note that we do not visit this with protocols with pcb layer
778 	 * code - like udp/tcp/raw ip.
779 	 */
780 	if (ip_ipsec_input(m, ip->ip_p) != 0)
781 		goto bad;
782 #endif /* IPSEC */
783 
784 	/*
785 	 * Switch out to protocol's input routine.
786 	 */
787 	IPSTAT_INC(ips_delivered);
788 
789 	(*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p);
790 	return;
791 bad:
792 	m_freem(m);
793 }
794 
795 /*
796  * After maxnipq has been updated, propagate the change to UMA.  The UMA zone
797  * max has slightly different semantics than the sysctl, for historical
798  * reasons.
799  */
800 static void
801 maxnipq_update(void)
802 {
803 
804 	/*
805 	 * -1 for unlimited allocation.
806 	 */
807 	if (V_maxnipq < 0)
808 		uma_zone_set_max(V_ipq_zone, 0);
809 	/*
810 	 * Positive number for specific bound.
811 	 */
812 	if (V_maxnipq > 0)
813 		uma_zone_set_max(V_ipq_zone, V_maxnipq);
814 	/*
815 	 * Zero specifies no further fragment queue allocation -- set the
816 	 * bound very low, but rely on implementation elsewhere to actually
817 	 * prevent allocation and reclaim current queues.
818 	 */
819 	if (V_maxnipq == 0)
820 		uma_zone_set_max(V_ipq_zone, 1);
821 }
822 
823 static void
824 ipq_zone_change(void *tag)
825 {
826 
827 	if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
828 		V_maxnipq = nmbclusters / 32;
829 		maxnipq_update();
830 	}
831 }
832 
833 static int
834 sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
835 {
836 	int error, i;
837 
838 	i = V_maxnipq;
839 	error = sysctl_handle_int(oidp, &i, 0, req);
840 	if (error || !req->newptr)
841 		return (error);
842 
843 	/*
844 	 * XXXRW: Might be a good idea to sanity check the argument and place
845 	 * an extreme upper bound.
846 	 */
847 	if (i < -1)
848 		return (EINVAL);
849 	V_maxnipq = i;
850 	maxnipq_update();
851 	return (0);
852 }
853 
854 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
855     NULL, 0, sysctl_maxnipq, "I",
856     "Maximum number of IPv4 fragment reassembly queue entries");
857 
858 #define	M_IP_FRAG	M_PROTO9
859 
860 /*
861  * Attempt to purge something from the reassembly queue to make
862  * room.
863  *
864  * Must be called without any IPQ locks held, as it will attempt
865  * to lock each in turn.
866  *
867  * 'skip_bucket' is the bucket with which to skip over, or -1 to
868  * not skip over anything.
869  *
870  * Returns the bucket being freed, or -1 for no action.
871  */
872 static int
873 ip_reass_purge_element(int skip_bucket)
874 {
875 	int i;
876 	struct ipq *r;
877 
878 	for (i = 0; i < IPREASS_NHASH; i++) {
879 		if (skip_bucket > -1 && i == skip_bucket)
880 			continue;
881 		IPQ_LOCK(i);
882 		r = TAILQ_LAST(&V_ipq[i], ipqhead);
883 		if (r) {
884 			IPSTAT_ADD(ips_fragtimeout,
885 			    r->ipq_nfrags);
886 			ip_freef(&V_ipq[i], i, r);
887 			IPQ_UNLOCK(i);
888 			return (i);
889 		}
890 		IPQ_UNLOCK(i);
891 	}
892 	return (-1);
893 }
894 
895 /*
896  * Take incoming datagram fragment and try to reassemble it into
897  * whole datagram.  If the argument is the first fragment or one
898  * in between the function will return NULL and store the mbuf
899  * in the fragment chain.  If the argument is the last fragment
900  * the packet will be reassembled and the pointer to the new
901  * mbuf returned for further processing.  Only m_tags attached
902  * to the first packet/fragment are preserved.
903  * The IP header is *NOT* adjusted out of iplen.
904  */
905 struct mbuf *
906 ip_reass(struct mbuf *m)
907 {
908 	struct ip *ip;
909 	struct mbuf *p, *q, *nq, *t;
910 	struct ipq *fp = NULL;
911 	struct ipqhead *head;
912 	int i, hlen, next;
913 	u_int8_t ecn, ecn0;
914 	u_short hash;
915 #ifdef	RSS
916 	uint32_t rss_hash, rss_type;
917 #endif
918 	int do_purge = 0;
919 
920 	/* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
921 	if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
922 		IPSTAT_INC(ips_fragments);
923 		IPSTAT_INC(ips_fragdropped);
924 		m_freem(m);
925 		return (NULL);
926 	}
927 
928 	ip = mtod(m, struct ip *);
929 	hlen = ip->ip_hl << 2;
930 
931 	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
932 	head = &V_ipq[hash];
933 	IPQ_LOCK(hash);
934 
935 	/*
936 	 * Look for queue of fragments
937 	 * of this datagram.
938 	 */
939 	TAILQ_FOREACH(fp, head, ipq_list)
940 		if (ip->ip_id == fp->ipq_id &&
941 		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
942 		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
943 #ifdef MAC
944 		    mac_ipq_match(m, fp) &&
945 #endif
946 		    ip->ip_p == fp->ipq_p)
947 			goto found;
948 
949 	fp = NULL;
950 
951 	/*
952 	 * Attempt to trim the number of allocated fragment queues if it
953 	 * exceeds the administrative limit.
954 	 */
955 	if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
956 		/*
957 		 * drop something from the tail of the current queue
958 		 * before proceeding further
959 		 */
960 		struct ipq *q = TAILQ_LAST(head, ipqhead);
961 		if (q == NULL) {   /* gak */
962 			/*
963 			 * Defer doing this until later; when the
964 			 * lock is no longer held.
965 			 */
966 			do_purge = 1;
967 		} else {
968 			IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags);
969 			ip_freef(head, hash, q);
970 		}
971 	}
972 
973 found:
974 	/*
975 	 * Adjust ip_len to not reflect header,
976 	 * convert offset of this to bytes.
977 	 */
978 	ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
979 	if (ip->ip_off & htons(IP_MF)) {
980 		/*
981 		 * Make sure that fragments have a data length
982 		 * that's a non-zero multiple of 8 bytes.
983 		 */
984 		if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) {
985 			IPSTAT_INC(ips_toosmall); /* XXX */
986 			goto dropfrag;
987 		}
988 		m->m_flags |= M_IP_FRAG;
989 	} else
990 		m->m_flags &= ~M_IP_FRAG;
991 	ip->ip_off = htons(ntohs(ip->ip_off) << 3);
992 
993 	/*
994 	 * Attempt reassembly; if it succeeds, proceed.
995 	 * ip_reass() will return a different mbuf.
996 	 */
997 	IPSTAT_INC(ips_fragments);
998 	m->m_pkthdr.PH_loc.ptr = ip;
999 
1000 	/* Previous ip_reass() started here. */
1001 	/*
1002 	 * Presence of header sizes in mbufs
1003 	 * would confuse code below.
1004 	 */
1005 	m->m_data += hlen;
1006 	m->m_len -= hlen;
1007 
1008 	/*
1009 	 * If first fragment to arrive, create a reassembly queue.
1010 	 */
1011 	if (fp == NULL) {
1012 		fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
1013 		if (fp == NULL)
1014 			goto dropfrag;
1015 #ifdef MAC
1016 		if (mac_ipq_init(fp, M_NOWAIT) != 0) {
1017 			uma_zfree(V_ipq_zone, fp);
1018 			fp = NULL;
1019 			goto dropfrag;
1020 		}
1021 		mac_ipq_create(m, fp);
1022 #endif
1023 		TAILQ_INSERT_HEAD(head, fp, ipq_list);
1024 		V_nipq++;
1025 		fp->ipq_nfrags = 1;
1026 		fp->ipq_ttl = IPFRAGTTL;
1027 		fp->ipq_p = ip->ip_p;
1028 		fp->ipq_id = ip->ip_id;
1029 		fp->ipq_src = ip->ip_src;
1030 		fp->ipq_dst = ip->ip_dst;
1031 		fp->ipq_frags = m;
1032 		m->m_nextpkt = NULL;
1033 		goto done;
1034 	} else {
1035 		fp->ipq_nfrags++;
1036 #ifdef MAC
1037 		mac_ipq_update(m, fp);
1038 #endif
1039 	}
1040 
1041 #define GETIP(m)	((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
1042 
1043 	/*
1044 	 * Handle ECN by comparing this segment with the first one;
1045 	 * if CE is set, do not lose CE.
1046 	 * drop if CE and not-ECT are mixed for the same packet.
1047 	 */
1048 	ecn = ip->ip_tos & IPTOS_ECN_MASK;
1049 	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
1050 	if (ecn == IPTOS_ECN_CE) {
1051 		if (ecn0 == IPTOS_ECN_NOTECT)
1052 			goto dropfrag;
1053 		if (ecn0 != IPTOS_ECN_CE)
1054 			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
1055 	}
1056 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
1057 		goto dropfrag;
1058 
1059 	/*
1060 	 * Find a segment which begins after this one does.
1061 	 */
1062 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
1063 		if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
1064 			break;
1065 
1066 	/*
1067 	 * If there is a preceding segment, it may provide some of
1068 	 * our data already.  If so, drop the data from the incoming
1069 	 * segment.  If it provides all of our data, drop us, otherwise
1070 	 * stick new segment in the proper place.
1071 	 *
1072 	 * If some of the data is dropped from the preceding
1073 	 * segment, then it's checksum is invalidated.
1074 	 */
1075 	if (p) {
1076 		i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
1077 		    ntohs(ip->ip_off);
1078 		if (i > 0) {
1079 			if (i >= ntohs(ip->ip_len))
1080 				goto dropfrag;
1081 			m_adj(m, i);
1082 			m->m_pkthdr.csum_flags = 0;
1083 			ip->ip_off = htons(ntohs(ip->ip_off) + i);
1084 			ip->ip_len = htons(ntohs(ip->ip_len) - i);
1085 		}
1086 		m->m_nextpkt = p->m_nextpkt;
1087 		p->m_nextpkt = m;
1088 	} else {
1089 		m->m_nextpkt = fp->ipq_frags;
1090 		fp->ipq_frags = m;
1091 	}
1092 
1093 	/*
1094 	 * While we overlap succeeding segments trim them or,
1095 	 * if they are completely covered, dequeue them.
1096 	 */
1097 	for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
1098 	    ntohs(GETIP(q)->ip_off); q = nq) {
1099 		i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
1100 		    ntohs(GETIP(q)->ip_off);
1101 		if (i < ntohs(GETIP(q)->ip_len)) {
1102 			GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
1103 			GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
1104 			m_adj(q, i);
1105 			q->m_pkthdr.csum_flags = 0;
1106 			break;
1107 		}
1108 		nq = q->m_nextpkt;
1109 		m->m_nextpkt = nq;
1110 		IPSTAT_INC(ips_fragdropped);
1111 		fp->ipq_nfrags--;
1112 		m_freem(q);
1113 	}
1114 
1115 	/*
1116 	 * Check for complete reassembly and perform frag per packet
1117 	 * limiting.
1118 	 *
1119 	 * Frag limiting is performed here so that the nth frag has
1120 	 * a chance to complete the packet before we drop the packet.
1121 	 * As a result, n+1 frags are actually allowed per packet, but
1122 	 * only n will ever be stored. (n = maxfragsperpacket.)
1123 	 *
1124 	 */
1125 	next = 0;
1126 	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1127 		if (ntohs(GETIP(q)->ip_off) != next) {
1128 			if (fp->ipq_nfrags > V_maxfragsperpacket) {
1129 				IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1130 				ip_freef(head, hash, fp);
1131 			}
1132 			goto done;
1133 		}
1134 		next += ntohs(GETIP(q)->ip_len);
1135 	}
1136 	/* Make sure the last packet didn't have the IP_MF flag */
1137 	if (p->m_flags & M_IP_FRAG) {
1138 		if (fp->ipq_nfrags > V_maxfragsperpacket) {
1139 			IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1140 			ip_freef(head, hash, fp);
1141 		}
1142 		goto done;
1143 	}
1144 
1145 	/*
1146 	 * Reassembly is complete.  Make sure the packet is a sane size.
1147 	 */
1148 	q = fp->ipq_frags;
1149 	ip = GETIP(q);
1150 	if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
1151 		IPSTAT_INC(ips_toolong);
1152 		IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1153 		ip_freef(head, hash, fp);
1154 		goto done;
1155 	}
1156 
1157 	/*
1158 	 * Concatenate fragments.
1159 	 */
1160 	m = q;
1161 	t = m->m_next;
1162 	m->m_next = NULL;
1163 	m_cat(m, t);
1164 	nq = q->m_nextpkt;
1165 	q->m_nextpkt = NULL;
1166 	for (q = nq; q != NULL; q = nq) {
1167 		nq = q->m_nextpkt;
1168 		q->m_nextpkt = NULL;
1169 		m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1170 		m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1171 		m_cat(m, q);
1172 	}
1173 	/*
1174 	 * In order to do checksumming faster we do 'end-around carry' here
1175 	 * (and not in for{} loop), though it implies we are not going to
1176 	 * reassemble more than 64k fragments.
1177 	 */
1178 	while (m->m_pkthdr.csum_data & 0xffff0000)
1179 		m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
1180 		    (m->m_pkthdr.csum_data >> 16);
1181 #ifdef MAC
1182 	mac_ipq_reassemble(fp, m);
1183 	mac_ipq_destroy(fp);
1184 #endif
1185 
1186 	/*
1187 	 * Create header for new ip packet by modifying header of first
1188 	 * packet;  dequeue and discard fragment reassembly header.
1189 	 * Make header visible.
1190 	 */
1191 	ip->ip_len = htons((ip->ip_hl << 2) + next);
1192 	ip->ip_src = fp->ipq_src;
1193 	ip->ip_dst = fp->ipq_dst;
1194 	TAILQ_REMOVE(head, fp, ipq_list);
1195 	V_nipq--;
1196 	uma_zfree(V_ipq_zone, fp);
1197 	m->m_len += (ip->ip_hl << 2);
1198 	m->m_data -= (ip->ip_hl << 2);
1199 	/* some debugging cruft by sklower, below, will go away soon */
1200 	if (m->m_flags & M_PKTHDR)	/* XXX this should be done elsewhere */
1201 		m_fixhdr(m);
1202 	IPSTAT_INC(ips_reassembled);
1203 	IPQ_UNLOCK(hash);
1204 
1205 	/*
1206 	 * Do the delayed purge to keep fragment counts under
1207 	 * the configured maximum.
1208 	 *
1209 	 * This is delayed so that it's not done with another IPQ bucket
1210 	 * lock held.
1211 	 *
1212 	 * Note that we pass in the bucket to /skip/ over, not
1213 	 * the bucket to /purge/.
1214 	 */
1215 	if (do_purge)
1216 		ip_reass_purge_element(hash);
1217 
1218 #ifdef	RSS
1219 	/*
1220 	 * Query the RSS layer for the flowid / flowtype for the
1221 	 * mbuf payload.
1222 	 *
1223 	 * For now, just assume we have to calculate a new one.
1224 	 * Later on we should check to see if the assigned flowid matches
1225 	 * what RSS wants for the given IP protocol and if so, just keep it.
1226 	 *
1227 	 * We then queue into the relevant netisr so it can be dispatched
1228 	 * to the correct CPU.
1229 	 *
1230 	 * Note - this may return 1, which means the flowid in the mbuf
1231 	 * is correct for the configured RSS hash types and can be used.
1232 	 */
1233 	if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
1234 		m->m_pkthdr.flowid = rss_hash;
1235 		M_HASHTYPE_SET(m, rss_type);
1236 	}
1237 
1238 	/*
1239 	 * Queue/dispatch for reprocessing.
1240 	 *
1241 	 * Note: this is much slower than just handling the frame in the
1242 	 * current receive context.  It's likely worth investigating
1243 	 * why this is.
1244 	 */
1245 	netisr_dispatch(NETISR_IP_DIRECT, m);
1246 	return (NULL);
1247 #endif
1248 
1249 	/* Handle in-line */
1250 	return (m);
1251 
1252 dropfrag:
1253 	IPSTAT_INC(ips_fragdropped);
1254 	if (fp != NULL)
1255 		fp->ipq_nfrags--;
1256 	m_freem(m);
1257 done:
1258 	IPQ_UNLOCK(hash);
1259 	return (NULL);
1260 
1261 #undef GETIP
1262 }
1263 
1264 /*
1265  * Free a fragment reassembly header and all
1266  * associated datagrams.
1267  */
1268 static void
1269 ip_freef(struct ipqhead *fhp, int i, struct ipq *fp)
1270 {
1271 	struct mbuf *q;
1272 
1273 	IPQ_LOCK_ASSERT(i);
1274 
1275 	while (fp->ipq_frags) {
1276 		q = fp->ipq_frags;
1277 		fp->ipq_frags = q->m_nextpkt;
1278 		m_freem(q);
1279 	}
1280 	TAILQ_REMOVE(fhp, fp, ipq_list);
1281 	uma_zfree(V_ipq_zone, fp);
1282 	V_nipq--;
1283 }
1284 
1285 /*
1286  * IP timer processing;
1287  * if a timer expires on a reassembly
1288  * queue, discard it.
1289  */
1290 void
1291 ip_slowtimo(void)
1292 {
1293 	VNET_ITERATOR_DECL(vnet_iter);
1294 	struct ipq *fp;
1295 	int i;
1296 
1297 	VNET_LIST_RLOCK_NOSLEEP();
1298 	VNET_FOREACH(vnet_iter) {
1299 		CURVNET_SET(vnet_iter);
1300 		for (i = 0; i < IPREASS_NHASH; i++) {
1301 			IPQ_LOCK(i);
1302 			for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
1303 				struct ipq *fpp;
1304 
1305 				fpp = fp;
1306 				fp = TAILQ_NEXT(fp, ipq_list);
1307 				if(--fpp->ipq_ttl == 0) {
1308 					IPSTAT_ADD(ips_fragtimeout,
1309 					    fpp->ipq_nfrags);
1310 					ip_freef(&V_ipq[i], i, fpp);
1311 				}
1312 			}
1313 			IPQ_UNLOCK(i);
1314 		}
1315 		/*
1316 		 * If we are over the maximum number of fragments
1317 		 * (due to the limit being lowered), drain off
1318 		 * enough to get down to the new limit.
1319 		 */
1320 		if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
1321 			for (i = 0; i < IPREASS_NHASH; i++) {
1322 				IPQ_LOCK(i);
1323 				while (V_nipq > V_maxnipq &&
1324 				    !TAILQ_EMPTY(&V_ipq[i])) {
1325 					IPSTAT_ADD(ips_fragdropped,
1326 					    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1327 					ip_freef(&V_ipq[i],
1328 					    i,
1329 					    TAILQ_FIRST(&V_ipq[i]));
1330 				}
1331 				IPQ_UNLOCK(i);
1332 			}
1333 		}
1334 		CURVNET_RESTORE();
1335 	}
1336 	VNET_LIST_RUNLOCK_NOSLEEP();
1337 }
1338 
1339 /*
1340  * Drain off all datagram fragments.
1341  *
1342  * Call without any IPQ locks held.
1343  */
1344 static void
1345 ip_drain_locked(void)
1346 {
1347 	int     i;
1348 
1349 	for (i = 0; i < IPREASS_NHASH; i++) {
1350 		IPQ_LOCK(i);
1351 		while(!TAILQ_EMPTY(&V_ipq[i])) {
1352 			IPSTAT_ADD(ips_fragdropped,
1353 			    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1354 			ip_freef(&V_ipq[i], i, TAILQ_FIRST(&V_ipq[i]));
1355 		}
1356 		IPQ_UNLOCK(i);
1357 	}
1358 }
1359 
1360 void
1361 ip_drain(void)
1362 {
1363 	VNET_ITERATOR_DECL(vnet_iter);
1364 
1365 	VNET_LIST_RLOCK_NOSLEEP();
1366 	VNET_FOREACH(vnet_iter) {
1367 		CURVNET_SET(vnet_iter);
1368 		ip_drain_locked();
1369 		CURVNET_RESTORE();
1370 	}
1371 	VNET_LIST_RUNLOCK_NOSLEEP();
1372 }
1373 
1374 /*
1375  * The protocol to be inserted into ip_protox[] must be already registered
1376  * in inetsw[], either statically or through pf_proto_register().
1377  */
1378 int
1379 ipproto_register(short ipproto)
1380 {
1381 	struct protosw *pr;
1382 
1383 	/* Sanity checks. */
1384 	if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1385 		return (EPROTONOSUPPORT);
1386 
1387 	/*
1388 	 * The protocol slot must not be occupied by another protocol
1389 	 * already.  An index pointing to IPPROTO_RAW is unused.
1390 	 */
1391 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1392 	if (pr == NULL)
1393 		return (EPFNOSUPPORT);
1394 	if (ip_protox[ipproto] != pr - inetsw)	/* IPPROTO_RAW */
1395 		return (EEXIST);
1396 
1397 	/* Find the protocol position in inetsw[] and set the index. */
1398 	for (pr = inetdomain.dom_protosw;
1399 	     pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1400 		if (pr->pr_domain->dom_family == PF_INET &&
1401 		    pr->pr_protocol && pr->pr_protocol == ipproto) {
1402 			ip_protox[pr->pr_protocol] = pr - inetsw;
1403 			return (0);
1404 		}
1405 	}
1406 	return (EPROTONOSUPPORT);
1407 }
1408 
1409 int
1410 ipproto_unregister(short ipproto)
1411 {
1412 	struct protosw *pr;
1413 
1414 	/* Sanity checks. */
1415 	if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1416 		return (EPROTONOSUPPORT);
1417 
1418 	/* Check if the protocol was indeed registered. */
1419 	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1420 	if (pr == NULL)
1421 		return (EPFNOSUPPORT);
1422 	if (ip_protox[ipproto] == pr - inetsw)  /* IPPROTO_RAW */
1423 		return (ENOENT);
1424 
1425 	/* Reset the protocol slot to IPPROTO_RAW. */
1426 	ip_protox[ipproto] = pr - inetsw;
1427 	return (0);
1428 }
1429 
1430 /*
1431  * Given address of next destination (final or next hop), return (referenced)
1432  * internet address info of interface to be used to get there.
1433  */
1434 struct in_ifaddr *
1435 ip_rtaddr(struct in_addr dst, u_int fibnum)
1436 {
1437 	struct route sro;
1438 	struct sockaddr_in *sin;
1439 	struct in_ifaddr *ia;
1440 
1441 	bzero(&sro, sizeof(sro));
1442 	sin = (struct sockaddr_in *)&sro.ro_dst;
1443 	sin->sin_family = AF_INET;
1444 	sin->sin_len = sizeof(*sin);
1445 	sin->sin_addr = dst;
1446 	in_rtalloc_ign(&sro, 0, fibnum);
1447 
1448 	if (sro.ro_rt == NULL)
1449 		return (NULL);
1450 
1451 	ia = ifatoia(sro.ro_rt->rt_ifa);
1452 	ifa_ref(&ia->ia_ifa);
1453 	RTFREE(sro.ro_rt);
1454 	return (ia);
1455 }
1456 
1457 u_char inetctlerrmap[PRC_NCMDS] = {
1458 	0,		0,		0,		0,
1459 	0,		EMSGSIZE,	EHOSTDOWN,	EHOSTUNREACH,
1460 	EHOSTUNREACH,	EHOSTUNREACH,	ECONNREFUSED,	ECONNREFUSED,
1461 	EMSGSIZE,	EHOSTUNREACH,	0,		0,
1462 	0,		0,		EHOSTUNREACH,	0,
1463 	ENOPROTOOPT,	ECONNREFUSED
1464 };
1465 
1466 /*
1467  * Forward a packet.  If some error occurs return the sender
1468  * an icmp packet.  Note we can't always generate a meaningful
1469  * icmp message because icmp doesn't have a large enough repertoire
1470  * of codes and types.
1471  *
1472  * If not forwarding, just drop the packet.  This could be confusing
1473  * if ipforwarding was zero but some routing protocol was advancing
1474  * us as a gateway to somewhere.  However, we must let the routing
1475  * protocol deal with that.
1476  *
1477  * The srcrt parameter indicates whether the packet is being forwarded
1478  * via a source route.
1479  */
1480 void
1481 ip_forward(struct mbuf *m, int srcrt)
1482 {
1483 	struct ip *ip = mtod(m, struct ip *);
1484 	struct in_ifaddr *ia;
1485 	struct mbuf *mcopy;
1486 	struct in_addr dest;
1487 	struct route ro;
1488 	int error, type = 0, code = 0, mtu = 0;
1489 
1490 	if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1491 		IPSTAT_INC(ips_cantforward);
1492 		m_freem(m);
1493 		return;
1494 	}
1495 #ifdef IPSEC
1496 	if (ip_ipsec_fwd(m) != 0) {
1497 		IPSTAT_INC(ips_cantforward);
1498 		m_freem(m);
1499 		return;
1500 	}
1501 #endif /* IPSEC */
1502 #ifdef IPSTEALTH
1503 	if (!V_ipstealth) {
1504 #endif
1505 		if (ip->ip_ttl <= IPTTLDEC) {
1506 			icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1507 			    0, 0);
1508 			return;
1509 		}
1510 #ifdef IPSTEALTH
1511 	}
1512 #endif
1513 
1514 	ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
1515 #ifndef IPSEC
1516 	/*
1517 	 * 'ia' may be NULL if there is no route for this destination.
1518 	 * In case of IPsec, Don't discard it just yet, but pass it to
1519 	 * ip_output in case of outgoing IPsec policy.
1520 	 */
1521 	if (!srcrt && ia == NULL) {
1522 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1523 		return;
1524 	}
1525 #endif
1526 
1527 	/*
1528 	 * Save the IP header and at most 8 bytes of the payload,
1529 	 * in case we need to generate an ICMP message to the src.
1530 	 *
1531 	 * XXX this can be optimized a lot by saving the data in a local
1532 	 * buffer on the stack (72 bytes at most), and only allocating the
1533 	 * mbuf if really necessary. The vast majority of the packets
1534 	 * are forwarded without having to send an ICMP back (either
1535 	 * because unnecessary, or because rate limited), so we are
1536 	 * really we are wasting a lot of work here.
1537 	 *
1538 	 * We don't use m_copy() because it might return a reference
1539 	 * to a shared cluster. Both this function and ip_output()
1540 	 * assume exclusive access to the IP header in `m', so any
1541 	 * data in a cluster may change before we reach icmp_error().
1542 	 */
1543 	mcopy = m_gethdr(M_NOWAIT, m->m_type);
1544 	if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) {
1545 		/*
1546 		 * It's probably ok if the pkthdr dup fails (because
1547 		 * the deep copy of the tag chain failed), but for now
1548 		 * be conservative and just discard the copy since
1549 		 * code below may some day want the tags.
1550 		 */
1551 		m_free(mcopy);
1552 		mcopy = NULL;
1553 	}
1554 	if (mcopy != NULL) {
1555 		mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy));
1556 		mcopy->m_pkthdr.len = mcopy->m_len;
1557 		m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1558 	}
1559 
1560 #ifdef IPSTEALTH
1561 	if (!V_ipstealth) {
1562 #endif
1563 		ip->ip_ttl -= IPTTLDEC;
1564 #ifdef IPSTEALTH
1565 	}
1566 #endif
1567 
1568 	/*
1569 	 * If forwarding packet using same interface that it came in on,
1570 	 * perhaps should send a redirect to sender to shortcut a hop.
1571 	 * Only send redirect if source is sending directly to us,
1572 	 * and if packet was not source routed (or has any options).
1573 	 * Also, don't send redirect if forwarding using a default route
1574 	 * or a route modified by a redirect.
1575 	 */
1576 	dest.s_addr = 0;
1577 	if (!srcrt && V_ipsendredirects &&
1578 	    ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) {
1579 		struct sockaddr_in *sin;
1580 		struct rtentry *rt;
1581 
1582 		bzero(&ro, sizeof(ro));
1583 		sin = (struct sockaddr_in *)&ro.ro_dst;
1584 		sin->sin_family = AF_INET;
1585 		sin->sin_len = sizeof(*sin);
1586 		sin->sin_addr = ip->ip_dst;
1587 		in_rtalloc_ign(&ro, 0, M_GETFIB(m));
1588 
1589 		rt = ro.ro_rt;
1590 
1591 		if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1592 		    satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1593 #define	RTA(rt)	((struct in_ifaddr *)(rt->rt_ifa))
1594 			u_long src = ntohl(ip->ip_src.s_addr);
1595 
1596 			if (RTA(rt) &&
1597 			    (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1598 				if (rt->rt_flags & RTF_GATEWAY)
1599 					dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1600 				else
1601 					dest.s_addr = ip->ip_dst.s_addr;
1602 				/* Router requirements says to only send host redirects */
1603 				type = ICMP_REDIRECT;
1604 				code = ICMP_REDIRECT_HOST;
1605 			}
1606 		}
1607 		if (rt)
1608 			RTFREE(rt);
1609 	}
1610 
1611 	/*
1612 	 * Try to cache the route MTU from ip_output so we can consider it for
1613 	 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191.
1614 	 */
1615 	bzero(&ro, sizeof(ro));
1616 
1617 	error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL);
1618 
1619 	if (error == EMSGSIZE && ro.ro_rt)
1620 		mtu = ro.ro_rt->rt_mtu;
1621 	RO_RTFREE(&ro);
1622 
1623 	if (error)
1624 		IPSTAT_INC(ips_cantforward);
1625 	else {
1626 		IPSTAT_INC(ips_forward);
1627 		if (type)
1628 			IPSTAT_INC(ips_redirectsent);
1629 		else {
1630 			if (mcopy)
1631 				m_freem(mcopy);
1632 			if (ia != NULL)
1633 				ifa_free(&ia->ia_ifa);
1634 			return;
1635 		}
1636 	}
1637 	if (mcopy == NULL) {
1638 		if (ia != NULL)
1639 			ifa_free(&ia->ia_ifa);
1640 		return;
1641 	}
1642 
1643 	switch (error) {
1644 
1645 	case 0:				/* forwarded, but need redirect */
1646 		/* type, code set above */
1647 		break;
1648 
1649 	case ENETUNREACH:
1650 	case EHOSTUNREACH:
1651 	case ENETDOWN:
1652 	case EHOSTDOWN:
1653 	default:
1654 		type = ICMP_UNREACH;
1655 		code = ICMP_UNREACH_HOST;
1656 		break;
1657 
1658 	case EMSGSIZE:
1659 		type = ICMP_UNREACH;
1660 		code = ICMP_UNREACH_NEEDFRAG;
1661 
1662 #ifdef IPSEC
1663 		/*
1664 		 * If IPsec is configured for this path,
1665 		 * override any possibly mtu value set by ip_output.
1666 		 */
1667 		mtu = ip_ipsec_mtu(mcopy, mtu);
1668 #endif /* IPSEC */
1669 		/*
1670 		 * If the MTU was set before make sure we are below the
1671 		 * interface MTU.
1672 		 * If the MTU wasn't set before use the interface mtu or
1673 		 * fall back to the next smaller mtu step compared to the
1674 		 * current packet size.
1675 		 */
1676 		if (mtu != 0) {
1677 			if (ia != NULL)
1678 				mtu = min(mtu, ia->ia_ifp->if_mtu);
1679 		} else {
1680 			if (ia != NULL)
1681 				mtu = ia->ia_ifp->if_mtu;
1682 			else
1683 				mtu = ip_next_mtu(ntohs(ip->ip_len), 0);
1684 		}
1685 		IPSTAT_INC(ips_cantfrag);
1686 		break;
1687 
1688 	case ENOBUFS:
1689 	case EACCES:			/* ipfw denied packet */
1690 		m_freem(mcopy);
1691 		if (ia != NULL)
1692 			ifa_free(&ia->ia_ifa);
1693 		return;
1694 	}
1695 	if (ia != NULL)
1696 		ifa_free(&ia->ia_ifa);
1697 	icmp_error(mcopy, type, code, dest.s_addr, mtu);
1698 }
1699 
1700 void
1701 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1702     struct mbuf *m)
1703 {
1704 
1705 	if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1706 		struct bintime bt;
1707 
1708 		bintime(&bt);
1709 		if (inp->inp_socket->so_options & SO_BINTIME) {
1710 			*mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt),
1711 			    SCM_BINTIME, SOL_SOCKET);
1712 			if (*mp)
1713 				mp = &(*mp)->m_next;
1714 		}
1715 		if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1716 			struct timeval tv;
1717 
1718 			bintime2timeval(&bt, &tv);
1719 			*mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv),
1720 			    SCM_TIMESTAMP, SOL_SOCKET);
1721 			if (*mp)
1722 				mp = &(*mp)->m_next;
1723 		}
1724 	}
1725 	if (inp->inp_flags & INP_RECVDSTADDR) {
1726 		*mp = sbcreatecontrol((caddr_t)&ip->ip_dst,
1727 		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1728 		if (*mp)
1729 			mp = &(*mp)->m_next;
1730 	}
1731 	if (inp->inp_flags & INP_RECVTTL) {
1732 		*mp = sbcreatecontrol((caddr_t)&ip->ip_ttl,
1733 		    sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1734 		if (*mp)
1735 			mp = &(*mp)->m_next;
1736 	}
1737 #ifdef notyet
1738 	/* XXX
1739 	 * Moving these out of udp_input() made them even more broken
1740 	 * than they already were.
1741 	 */
1742 	/* options were tossed already */
1743 	if (inp->inp_flags & INP_RECVOPTS) {
1744 		*mp = sbcreatecontrol((caddr_t)opts_deleted_above,
1745 		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1746 		if (*mp)
1747 			mp = &(*mp)->m_next;
1748 	}
1749 	/* ip_srcroute doesn't do what we want here, need to fix */
1750 	if (inp->inp_flags & INP_RECVRETOPTS) {
1751 		*mp = sbcreatecontrol((caddr_t)ip_srcroute(m),
1752 		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1753 		if (*mp)
1754 			mp = &(*mp)->m_next;
1755 	}
1756 #endif
1757 	if (inp->inp_flags & INP_RECVIF) {
1758 		struct ifnet *ifp;
1759 		struct sdlbuf {
1760 			struct sockaddr_dl sdl;
1761 			u_char	pad[32];
1762 		} sdlbuf;
1763 		struct sockaddr_dl *sdp;
1764 		struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1765 
1766 		if ((ifp = m->m_pkthdr.rcvif) &&
1767 		    ifp->if_index && ifp->if_index <= V_if_index) {
1768 			sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1769 			/*
1770 			 * Change our mind and don't try copy.
1771 			 */
1772 			if (sdp->sdl_family != AF_LINK ||
1773 			    sdp->sdl_len > sizeof(sdlbuf)) {
1774 				goto makedummy;
1775 			}
1776 			bcopy(sdp, sdl2, sdp->sdl_len);
1777 		} else {
1778 makedummy:
1779 			sdl2->sdl_len =
1780 			    offsetof(struct sockaddr_dl, sdl_data[0]);
1781 			sdl2->sdl_family = AF_LINK;
1782 			sdl2->sdl_index = 0;
1783 			sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1784 		}
1785 		*mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len,
1786 		    IP_RECVIF, IPPROTO_IP);
1787 		if (*mp)
1788 			mp = &(*mp)->m_next;
1789 	}
1790 	if (inp->inp_flags & INP_RECVTOS) {
1791 		*mp = sbcreatecontrol((caddr_t)&ip->ip_tos,
1792 		    sizeof(u_char), IP_RECVTOS, IPPROTO_IP);
1793 		if (*mp)
1794 			mp = &(*mp)->m_next;
1795 	}
1796 
1797 	if (inp->inp_flags2 & INP_RECVFLOWID) {
1798 		uint32_t flowid, flow_type;
1799 
1800 		flowid = m->m_pkthdr.flowid;
1801 		flow_type = M_HASHTYPE_GET(m);
1802 
1803 		/*
1804 		 * XXX should handle the failure of one or the
1805 		 * other - don't populate both?
1806 		 */
1807 		*mp = sbcreatecontrol((caddr_t) &flowid,
1808 		    sizeof(uint32_t), IP_FLOWID, IPPROTO_IP);
1809 		if (*mp)
1810 			mp = &(*mp)->m_next;
1811 		*mp = sbcreatecontrol((caddr_t) &flow_type,
1812 		    sizeof(uint32_t), IP_FLOWTYPE, IPPROTO_IP);
1813 		if (*mp)
1814 			mp = &(*mp)->m_next;
1815 	}
1816 
1817 #ifdef	RSS
1818 	if (inp->inp_flags2 & INP_RECVRSSBUCKETID) {
1819 		uint32_t flowid, flow_type;
1820 		uint32_t rss_bucketid;
1821 
1822 		flowid = m->m_pkthdr.flowid;
1823 		flow_type = M_HASHTYPE_GET(m);
1824 
1825 		if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) {
1826 			*mp = sbcreatecontrol((caddr_t) &rss_bucketid,
1827 			   sizeof(uint32_t), IP_RSSBUCKETID, IPPROTO_IP);
1828 			if (*mp)
1829 				mp = &(*mp)->m_next;
1830 		}
1831 	}
1832 #endif
1833 }
1834 
1835 /*
1836  * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the
1837  * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on
1838  * locking.  This code remains in ip_input.c as ip_mroute.c is optionally
1839  * compiled.
1840  */
1841 static VNET_DEFINE(int, ip_rsvp_on);
1842 VNET_DEFINE(struct socket *, ip_rsvpd);
1843 
1844 #define	V_ip_rsvp_on		VNET(ip_rsvp_on)
1845 
1846 int
1847 ip_rsvp_init(struct socket *so)
1848 {
1849 
1850 	if (so->so_type != SOCK_RAW ||
1851 	    so->so_proto->pr_protocol != IPPROTO_RSVP)
1852 		return EOPNOTSUPP;
1853 
1854 	if (V_ip_rsvpd != NULL)
1855 		return EADDRINUSE;
1856 
1857 	V_ip_rsvpd = so;
1858 	/*
1859 	 * This may seem silly, but we need to be sure we don't over-increment
1860 	 * the RSVP counter, in case something slips up.
1861 	 */
1862 	if (!V_ip_rsvp_on) {
1863 		V_ip_rsvp_on = 1;
1864 		V_rsvp_on++;
1865 	}
1866 
1867 	return 0;
1868 }
1869 
1870 int
1871 ip_rsvp_done(void)
1872 {
1873 
1874 	V_ip_rsvpd = NULL;
1875 	/*
1876 	 * This may seem silly, but we need to be sure we don't over-decrement
1877 	 * the RSVP counter, in case something slips up.
1878 	 */
1879 	if (V_ip_rsvp_on) {
1880 		V_ip_rsvp_on = 0;
1881 		V_rsvp_on--;
1882 	}
1883 	return 0;
1884 }
1885 
1886 int
1887 rsvp_input(struct mbuf **mp, int *offp, int proto)
1888 {
1889 	struct mbuf *m;
1890 
1891 	m = *mp;
1892 	*mp = NULL;
1893 
1894 	if (rsvp_input_p) { /* call the real one if loaded */
1895 		*mp = m;
1896 		rsvp_input_p(mp, offp, proto);
1897 		return (IPPROTO_DONE);
1898 	}
1899 
1900 	/* Can still get packets with rsvp_on = 0 if there is a local member
1901 	 * of the group to which the RSVP packet is addressed.  But in this
1902 	 * case we want to throw the packet away.
1903 	 */
1904 
1905 	if (!V_rsvp_on) {
1906 		m_freem(m);
1907 		return (IPPROTO_DONE);
1908 	}
1909 
1910 	if (V_ip_rsvpd != NULL) {
1911 		*mp = m;
1912 		rip_input(mp, offp, proto);
1913 		return (IPPROTO_DONE);
1914 	}
1915 	/* Drop the packet */
1916 	m_freem(m);
1917 	return (IPPROTO_DONE);
1918 }
1919