xref: /freebsd/sys/netpfil/pf/pf.c (revision 7de72ac1f832cd9e1c747220c58913b0465b66b3)
1 /*-
2  * Copyright (c) 2001 Daniel Hartmeier
3  * Copyright (c) 2002 - 2008 Henning Brauer
4  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  *    - Redistributions of source code must retain the above copyright
12  *      notice, this list of conditions and the following disclaimer.
13  *    - Redistributions in binary form must reproduce the above
14  *      copyright notice, this list of conditions and the following
15  *      disclaimer in the documentation and/or other materials provided
16  *      with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Effort sponsored in part by the Defense Advanced Research Projects
32  * Agency (DARPA) and Air Force Research Laboratory, Air Force
33  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
34  *
35  *	$OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/bus.h>
48 #include <sys/endian.h>
49 #include <sys/hash.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/limits.h>
54 #include <sys/mbuf.h>
55 #include <sys/md5.h>
56 #include <sys/random.h>
57 #include <sys/refcount.h>
58 #include <sys/socket.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/ucred.h>
62 
63 #include <net/if.h>
64 #include <net/if_types.h>
65 #include <net/route.h>
66 #include <net/radix_mpath.h>
67 #include <net/vnet.h>
68 
69 #include <net/pfvar.h>
70 #include <net/pf_mtag.h>
71 #include <net/if_pflog.h>
72 #include <net/if_pfsync.h>
73 
74 #include <netinet/in_pcb.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip.h>
77 #include <netinet/ip_fw.h>
78 #include <netinet/ip_icmp.h>
79 #include <netinet/icmp_var.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/tcp.h>
82 #include <netinet/tcp_fsm.h>
83 #include <netinet/tcp_seq.h>
84 #include <netinet/tcp_timer.h>
85 #include <netinet/tcp_var.h>
86 #include <netinet/udp.h>
87 #include <netinet/udp_var.h>
88 
89 #include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */
90 
91 #ifdef INET6
92 #include <netinet/ip6.h>
93 #include <netinet/icmp6.h>
94 #include <netinet6/nd6.h>
95 #include <netinet6/ip6_var.h>
96 #include <netinet6/in6_pcb.h>
97 #endif /* INET6 */
98 
99 #include <machine/in_cksum.h>
100 #include <security/mac/mac_framework.h>
101 
102 #define	DPFPRINTF(n, x)	if (V_pf_status.debug >= (n)) printf x
103 
104 /*
105  * Global variables
106  */
107 
108 /* state tables */
109 VNET_DEFINE(struct pf_altqqueue,	 pf_altqs[2]);
110 VNET_DEFINE(struct pf_palist,		 pf_pabuf);
111 VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_active);
112 VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_inactive);
113 VNET_DEFINE(struct pf_status,		 pf_status);
114 
115 VNET_DEFINE(u_int32_t,			 ticket_altqs_active);
116 VNET_DEFINE(u_int32_t,			 ticket_altqs_inactive);
117 VNET_DEFINE(int,			 altqs_inactive_open);
118 VNET_DEFINE(u_int32_t,			 ticket_pabuf);
119 
120 VNET_DEFINE(MD5_CTX,			 pf_tcp_secret_ctx);
121 #define	V_pf_tcp_secret_ctx		 VNET(pf_tcp_secret_ctx)
122 VNET_DEFINE(u_char,			 pf_tcp_secret[16]);
123 #define	V_pf_tcp_secret			 VNET(pf_tcp_secret)
124 VNET_DEFINE(int,			 pf_tcp_secret_init);
125 #define	V_pf_tcp_secret_init		 VNET(pf_tcp_secret_init)
126 VNET_DEFINE(int,			 pf_tcp_iss_off);
127 #define	V_pf_tcp_iss_off		 VNET(pf_tcp_iss_off)
128 
129 /*
130  * Queue for pf_intr() sends.
131  */
132 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
133 struct pf_send_entry {
134 	STAILQ_ENTRY(pf_send_entry)	pfse_next;
135 	struct mbuf			*pfse_m;
136 	enum {
137 		PFSE_IP,
138 		PFSE_IP6,
139 		PFSE_ICMP,
140 		PFSE_ICMP6,
141 	}				pfse_type;
142 	union {
143 		struct route		ro;
144 		struct {
145 			int		type;
146 			int		code;
147 			int		mtu;
148 		} icmpopts;
149 	} u;
150 #define	pfse_ro		u.ro
151 #define	pfse_icmp_type	u.icmpopts.type
152 #define	pfse_icmp_code	u.icmpopts.code
153 #define	pfse_icmp_mtu	u.icmpopts.mtu
154 };
155 
156 STAILQ_HEAD(pf_send_head, pf_send_entry);
157 static VNET_DEFINE(struct pf_send_head, pf_sendqueue);
158 #define	V_pf_sendqueue	VNET(pf_sendqueue)
159 
160 static struct mtx pf_sendqueue_mtx;
161 #define	PF_SENDQ_LOCK()		mtx_lock(&pf_sendqueue_mtx)
162 #define	PF_SENDQ_UNLOCK()	mtx_unlock(&pf_sendqueue_mtx)
163 
164 /*
165  * Queue for pf_overload_task() tasks.
166  */
167 struct pf_overload_entry {
168 	SLIST_ENTRY(pf_overload_entry)	next;
169 	struct pf_addr  		addr;
170 	sa_family_t			af;
171 	uint8_t				dir;
172 	struct pf_rule  		*rule;
173 };
174 
175 SLIST_HEAD(pf_overload_head, pf_overload_entry);
176 static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue);
177 #define V_pf_overloadqueue	VNET(pf_overloadqueue)
178 static VNET_DEFINE(struct task, pf_overloadtask);
179 #define	V_pf_overloadtask	VNET(pf_overloadtask)
180 
181 static struct mtx pf_overloadqueue_mtx;
182 #define	PF_OVERLOADQ_LOCK()	mtx_lock(&pf_overloadqueue_mtx)
183 #define	PF_OVERLOADQ_UNLOCK()	mtx_unlock(&pf_overloadqueue_mtx)
184 
185 VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules);
186 struct mtx pf_unlnkdrules_mtx;
187 
188 static VNET_DEFINE(uma_zone_t,	pf_sources_z);
189 #define	V_pf_sources_z	VNET(pf_sources_z)
190 static VNET_DEFINE(uma_zone_t,	pf_mtag_z);
191 #define	V_pf_mtag_z	VNET(pf_mtag_z)
192 VNET_DEFINE(uma_zone_t,	 pf_state_z);
193 VNET_DEFINE(uma_zone_t,	 pf_state_key_z);
194 
195 VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]);
196 #define	PFID_CPUBITS	8
197 #define	PFID_CPUSHIFT	(sizeof(uint64_t) * NBBY - PFID_CPUBITS)
198 #define	PFID_CPUMASK	((uint64_t)((1 << PFID_CPUBITS) - 1) <<	PFID_CPUSHIFT)
199 #define	PFID_MAXID	(~PFID_CPUMASK)
200 CTASSERT((1 << PFID_CPUBITS) > MAXCPU);
201 
202 static void		 pf_src_tree_remove_state(struct pf_state *);
203 static void		 pf_init_threshold(struct pf_threshold *, u_int32_t,
204 			    u_int32_t);
205 static void		 pf_add_threshold(struct pf_threshold *);
206 static int		 pf_check_threshold(struct pf_threshold *);
207 
208 static void		 pf_change_ap(struct pf_addr *, u_int16_t *,
209 			    u_int16_t *, u_int16_t *, struct pf_addr *,
210 			    u_int16_t, u_int8_t, sa_family_t);
211 static int		 pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
212 			    struct tcphdr *, struct pf_state_peer *);
213 static void		 pf_change_icmp(struct pf_addr *, u_int16_t *,
214 			    struct pf_addr *, struct pf_addr *, u_int16_t,
215 			    u_int16_t *, u_int16_t *, u_int16_t *,
216 			    u_int16_t *, u_int8_t, sa_family_t);
217 static void		 pf_send_tcp(struct mbuf *,
218 			    const struct pf_rule *, sa_family_t,
219 			    const struct pf_addr *, const struct pf_addr *,
220 			    u_int16_t, u_int16_t, u_int32_t, u_int32_t,
221 			    u_int8_t, u_int16_t, u_int16_t, u_int8_t, int,
222 			    u_int16_t, struct ifnet *);
223 static void		 pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
224 			    sa_family_t, struct pf_rule *);
225 static void		 pf_detach_state(struct pf_state *);
226 static int		 pf_state_key_attach(struct pf_state_key *,
227 			    struct pf_state_key *, struct pf_state *);
228 static void		 pf_state_key_detach(struct pf_state *, int);
229 static int		 pf_state_key_ctor(void *, int, void *, int);
230 static u_int32_t	 pf_tcp_iss(struct pf_pdesc *);
231 static int		 pf_test_rule(struct pf_rule **, struct pf_state **,
232 			    int, struct pfi_kif *, struct mbuf *, int,
233 			    struct pf_pdesc *, struct pf_rule **,
234 			    struct pf_ruleset **, struct inpcb *);
235 static int		 pf_create_state(struct pf_rule *, struct pf_rule *,
236 			    struct pf_rule *, struct pf_pdesc *,
237 			    struct pf_src_node *, struct pf_state_key *,
238 			    struct pf_state_key *, struct mbuf *, int,
239 			    u_int16_t, u_int16_t, int *, struct pfi_kif *,
240 			    struct pf_state **, int, u_int16_t, u_int16_t,
241 			    int);
242 static int		 pf_test_fragment(struct pf_rule **, int,
243 			    struct pfi_kif *, struct mbuf *, void *,
244 			    struct pf_pdesc *, struct pf_rule **,
245 			    struct pf_ruleset **);
246 static int		 pf_tcp_track_full(struct pf_state_peer *,
247 			    struct pf_state_peer *, struct pf_state **,
248 			    struct pfi_kif *, struct mbuf *, int,
249 			    struct pf_pdesc *, u_short *, int *);
250 static int		 pf_tcp_track_sloppy(struct pf_state_peer *,
251 			    struct pf_state_peer *, struct pf_state **,
252 			    struct pf_pdesc *, u_short *);
253 static int		 pf_test_state_tcp(struct pf_state **, int,
254 			    struct pfi_kif *, struct mbuf *, int,
255 			    void *, struct pf_pdesc *, u_short *);
256 static int		 pf_test_state_udp(struct pf_state **, int,
257 			    struct pfi_kif *, struct mbuf *, int,
258 			    void *, struct pf_pdesc *);
259 static int		 pf_test_state_icmp(struct pf_state **, int,
260 			    struct pfi_kif *, struct mbuf *, int,
261 			    void *, struct pf_pdesc *, u_short *);
262 static int		 pf_test_state_other(struct pf_state **, int,
263 			    struct pfi_kif *, struct mbuf *, struct pf_pdesc *);
264 static u_int8_t		 pf_get_wscale(struct mbuf *, int, u_int16_t,
265 			    sa_family_t);
266 static u_int16_t	 pf_get_mss(struct mbuf *, int, u_int16_t,
267 			    sa_family_t);
268 static u_int16_t	 pf_calc_mss(struct pf_addr *, sa_family_t,
269 				int, u_int16_t);
270 static void		 pf_set_rt_ifp(struct pf_state *,
271 			    struct pf_addr *);
272 static int		 pf_check_proto_cksum(struct mbuf *, int, int,
273 			    u_int8_t, sa_family_t);
274 static void		 pf_print_state_parts(struct pf_state *,
275 			    struct pf_state_key *, struct pf_state_key *);
276 static int		 pf_addr_wrap_neq(struct pf_addr_wrap *,
277 			    struct pf_addr_wrap *);
278 static struct pf_state	*pf_find_state(struct pfi_kif *,
279 			    struct pf_state_key_cmp *, u_int);
280 static int		 pf_src_connlimit(struct pf_state **);
281 static void		 pf_overload_task(void *c, int pending);
282 static int		 pf_insert_src_node(struct pf_src_node **,
283 			    struct pf_rule *, struct pf_addr *, sa_family_t);
284 static u_int		 pf_purge_expired_states(u_int, int);
285 static void		 pf_purge_unlinked_rules(void);
286 static int		 pf_mtag_init(void *, int, int);
287 static void		 pf_mtag_free(struct m_tag *);
288 #ifdef INET
289 static void		 pf_route(struct mbuf **, struct pf_rule *, int,
290 			    struct ifnet *, struct pf_state *,
291 			    struct pf_pdesc *);
292 #endif /* INET */
293 #ifdef INET6
294 static void		 pf_change_a6(struct pf_addr *, u_int16_t *,
295 			    struct pf_addr *, u_int8_t);
296 static void		 pf_route6(struct mbuf **, struct pf_rule *, int,
297 			    struct ifnet *, struct pf_state *,
298 			    struct pf_pdesc *);
299 #endif /* INET6 */
300 
301 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
302 
303 VNET_DECLARE(int, pf_end_threads);
304 
305 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
306 
307 #define	PACKET_LOOPED(pd)	((pd)->pf_mtag &&			\
308 				 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
309 
310 #define	STATE_LOOKUP(i, k, d, s, pd)					\
311 	do {								\
312 		(s) = pf_find_state((i), (k), (d));			\
313 		if ((s) == NULL || (s)->timeout == PFTM_PURGE)		\
314 			return (PF_DROP);				\
315 		if (PACKET_LOOPED(pd))					\
316 			return (PF_PASS);				\
317 		if ((d) == PF_OUT &&					\
318 		    (((s)->rule.ptr->rt == PF_ROUTETO &&		\
319 		    (s)->rule.ptr->direction == PF_OUT) ||		\
320 		    ((s)->rule.ptr->rt == PF_REPLYTO &&			\
321 		    (s)->rule.ptr->direction == PF_IN)) &&		\
322 		    (s)->rt_kif != NULL &&				\
323 		    (s)->rt_kif != (i))					\
324 			return (PF_PASS);				\
325 	} while (0)
326 
327 #define	BOUND_IFACE(r, k) \
328 	((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
329 
330 #define	STATE_INC_COUNTERS(s)				\
331 	do {						\
332 		s->rule.ptr->states_cur++;		\
333 		s->rule.ptr->states_tot++;		\
334 		if (s->anchor.ptr != NULL) {		\
335 			s->anchor.ptr->states_cur++;	\
336 			s->anchor.ptr->states_tot++;	\
337 		}					\
338 		if (s->nat_rule.ptr != NULL) {		\
339 			s->nat_rule.ptr->states_cur++;	\
340 			s->nat_rule.ptr->states_tot++;	\
341 		}					\
342 	} while (0)
343 
344 #define	STATE_DEC_COUNTERS(s)				\
345 	do {						\
346 		if (s->nat_rule.ptr != NULL)		\
347 			s->nat_rule.ptr->states_cur--;	\
348 		if (s->anchor.ptr != NULL)		\
349 			s->anchor.ptr->states_cur--;	\
350 		s->rule.ptr->states_cur--;		\
351 	} while (0)
352 
353 static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
354 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
355 VNET_DEFINE(struct pf_idhash *, pf_idhash);
356 VNET_DEFINE(u_long, pf_hashmask);
357 VNET_DEFINE(struct pf_srchash *, pf_srchash);
358 VNET_DEFINE(u_long, pf_srchashmask);
359 
360 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)");
361 
362 VNET_DEFINE(u_long, pf_hashsize);
363 #define	V_pf_hashsize	VNET(pf_hashsize)
364 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
365     &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
366 
367 VNET_DEFINE(u_long, pf_srchashsize);
368 #define	V_pf_srchashsize	VNET(pf_srchashsize)
369 SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
370     &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
371 
372 VNET_DEFINE(void *, pf_swi_cookie);
373 
374 VNET_DEFINE(uint32_t, pf_hashseed);
375 #define	V_pf_hashseed	VNET(pf_hashseed)
376 
377 static __inline uint32_t
378 pf_hashkey(struct pf_state_key *sk)
379 {
380 	uint32_t h;
381 
382 	h = jenkins_hash32((uint32_t *)sk,
383 	    sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
384 	    V_pf_hashseed);
385 
386 	return (h & V_pf_hashmask);
387 }
388 
389 static __inline uint32_t
390 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
391 {
392 	uint32_t h;
393 
394 	switch (af) {
395 	case AF_INET:
396 		h = jenkins_hash32((uint32_t *)&addr->v4,
397 		    sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
398 		break;
399 	case AF_INET6:
400 		h = jenkins_hash32((uint32_t *)&addr->v6,
401 		    sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
402 		break;
403 	default:
404 		panic("%s: unknown address family %u", __func__, af);
405 	}
406 
407 	return (h & V_pf_srchashmask);
408 }
409 
410 #ifdef INET6
411 void
412 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
413 {
414 	switch (af) {
415 #ifdef INET
416 	case AF_INET:
417 		dst->addr32[0] = src->addr32[0];
418 		break;
419 #endif /* INET */
420 	case AF_INET6:
421 		dst->addr32[0] = src->addr32[0];
422 		dst->addr32[1] = src->addr32[1];
423 		dst->addr32[2] = src->addr32[2];
424 		dst->addr32[3] = src->addr32[3];
425 		break;
426 	}
427 }
428 #endif /* INET6 */
429 
430 static void
431 pf_init_threshold(struct pf_threshold *threshold,
432     u_int32_t limit, u_int32_t seconds)
433 {
434 	threshold->limit = limit * PF_THRESHOLD_MULT;
435 	threshold->seconds = seconds;
436 	threshold->count = 0;
437 	threshold->last = time_uptime;
438 }
439 
440 static void
441 pf_add_threshold(struct pf_threshold *threshold)
442 {
443 	u_int32_t t = time_uptime, diff = t - threshold->last;
444 
445 	if (diff >= threshold->seconds)
446 		threshold->count = 0;
447 	else
448 		threshold->count -= threshold->count * diff /
449 		    threshold->seconds;
450 	threshold->count += PF_THRESHOLD_MULT;
451 	threshold->last = t;
452 }
453 
454 static int
455 pf_check_threshold(struct pf_threshold *threshold)
456 {
457 	return (threshold->count > threshold->limit);
458 }
459 
460 static int
461 pf_src_connlimit(struct pf_state **state)
462 {
463 	struct pf_overload_entry *pfoe;
464 	int bad = 0;
465 
466 	PF_STATE_LOCK_ASSERT(*state);
467 
468 	(*state)->src_node->conn++;
469 	(*state)->src.tcp_est = 1;
470 	pf_add_threshold(&(*state)->src_node->conn_rate);
471 
472 	if ((*state)->rule.ptr->max_src_conn &&
473 	    (*state)->rule.ptr->max_src_conn <
474 	    (*state)->src_node->conn) {
475 		V_pf_status.lcounters[LCNT_SRCCONN]++;
476 		bad++;
477 	}
478 
479 	if ((*state)->rule.ptr->max_src_conn_rate.limit &&
480 	    pf_check_threshold(&(*state)->src_node->conn_rate)) {
481 		V_pf_status.lcounters[LCNT_SRCCONNRATE]++;
482 		bad++;
483 	}
484 
485 	if (!bad)
486 		return (0);
487 
488 	/* Kill this state. */
489 	(*state)->timeout = PFTM_PURGE;
490 	(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
491 
492 	if ((*state)->rule.ptr->overload_tbl == NULL)
493 		return (1);
494 
495 	/* Schedule overloading and flushing task. */
496 	pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
497 	if (pfoe == NULL)
498 		return (1);	/* too bad :( */
499 
500 	bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
501 	pfoe->af = (*state)->key[PF_SK_WIRE]->af;
502 	pfoe->rule = (*state)->rule.ptr;
503 	pfoe->dir = (*state)->direction;
504 	PF_OVERLOADQ_LOCK();
505 	SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
506 	PF_OVERLOADQ_UNLOCK();
507 	taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
508 
509 	return (1);
510 }
511 
512 static void
513 pf_overload_task(void *c, int pending)
514 {
515 	struct pf_overload_head queue;
516 	struct pfr_addr p;
517 	struct pf_overload_entry *pfoe, *pfoe1;
518 	uint32_t killed = 0;
519 
520 	PF_OVERLOADQ_LOCK();
521 	queue = *(struct pf_overload_head *)c;
522 	SLIST_INIT((struct pf_overload_head *)c);
523 	PF_OVERLOADQ_UNLOCK();
524 
525 	bzero(&p, sizeof(p));
526 	SLIST_FOREACH(pfoe, &queue, next) {
527 		V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
528 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
529 			printf("%s: blocking address ", __func__);
530 			pf_print_host(&pfoe->addr, 0, pfoe->af);
531 			printf("\n");
532 		}
533 
534 		p.pfra_af = pfoe->af;
535 		switch (pfoe->af) {
536 #ifdef INET
537 		case AF_INET:
538 			p.pfra_net = 32;
539 			p.pfra_ip4addr = pfoe->addr.v4;
540 			break;
541 #endif
542 #ifdef INET6
543 		case AF_INET6:
544 			p.pfra_net = 128;
545 			p.pfra_ip6addr = pfoe->addr.v6;
546 			break;
547 #endif
548 		}
549 
550 		PF_RULES_WLOCK();
551 		pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
552 		PF_RULES_WUNLOCK();
553 	}
554 
555 	/*
556 	 * Remove those entries, that don't need flushing.
557 	 */
558 	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
559 		if (pfoe->rule->flush == 0) {
560 			SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
561 			free(pfoe, M_PFTEMP);
562 		} else
563 			V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
564 
565 	/* If nothing to flush, return. */
566 	if (SLIST_EMPTY(&queue))
567 		return;
568 
569 	for (int i = 0; i <= V_pf_hashmask; i++) {
570 		struct pf_idhash *ih = &V_pf_idhash[i];
571 		struct pf_state_key *sk;
572 		struct pf_state *s;
573 
574 		PF_HASHROW_LOCK(ih);
575 		LIST_FOREACH(s, &ih->states, entry) {
576 		    sk = s->key[PF_SK_WIRE];
577 		    SLIST_FOREACH(pfoe, &queue, next)
578 			if (sk->af == pfoe->af &&
579 			    ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
580 			    pfoe->rule == s->rule.ptr) &&
581 			    ((pfoe->dir == PF_OUT &&
582 			    PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
583 			    (pfoe->dir == PF_IN &&
584 			    PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
585 				s->timeout = PFTM_PURGE;
586 				s->src.state = s->dst.state = TCPS_CLOSED;
587 				killed++;
588 			}
589 		}
590 		PF_HASHROW_UNLOCK(ih);
591 	}
592 	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
593 		free(pfoe, M_PFTEMP);
594 	if (V_pf_status.debug >= PF_DEBUG_MISC)
595 		printf("%s: %u states killed", __func__, killed);
596 }
597 
598 /*
599  * Can return locked on failure, so that we can consistently
600  * allocate and insert a new one.
601  */
602 struct pf_src_node *
603 pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af,
604 	int returnlocked)
605 {
606 	struct pf_srchash *sh;
607 	struct pf_src_node *n;
608 
609 	V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
610 
611 	sh = &V_pf_srchash[pf_hashsrc(src, af)];
612 	PF_HASHROW_LOCK(sh);
613 	LIST_FOREACH(n, &sh->nodes, entry)
614 		if (n->rule.ptr == rule && n->af == af &&
615 		    ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
616 		    (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
617 			break;
618 	if (n != NULL || returnlocked == 0)
619 		PF_HASHROW_UNLOCK(sh);
620 
621 	return (n);
622 }
623 
624 static int
625 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
626     struct pf_addr *src, sa_family_t af)
627 {
628 
629 	KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK ||
630 	    rule->rpool.opts & PF_POOL_STICKYADDR),
631 	    ("%s for non-tracking rule %p", __func__, rule));
632 
633 	if (*sn == NULL)
634 		*sn = pf_find_src_node(src, rule, af, 1);
635 
636 	if (*sn == NULL) {
637 		struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
638 
639 		PF_HASHROW_ASSERT(sh);
640 
641 		if (!rule->max_src_nodes ||
642 		    rule->src_nodes < rule->max_src_nodes)
643 			(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
644 		else
645 			V_pf_status.lcounters[LCNT_SRCNODES]++;
646 		if ((*sn) == NULL) {
647 			PF_HASHROW_UNLOCK(sh);
648 			return (-1);
649 		}
650 
651 		pf_init_threshold(&(*sn)->conn_rate,
652 		    rule->max_src_conn_rate.limit,
653 		    rule->max_src_conn_rate.seconds);
654 
655 		(*sn)->af = af;
656 		(*sn)->rule.ptr = rule;
657 		PF_ACPY(&(*sn)->addr, src, af);
658 		LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
659 		(*sn)->creation = time_uptime;
660 		(*sn)->ruletype = rule->action;
661 		if ((*sn)->rule.ptr != NULL)
662 			(*sn)->rule.ptr->src_nodes++;
663 		PF_HASHROW_UNLOCK(sh);
664 		V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
665 		V_pf_status.src_nodes++;
666 	} else {
667 		if (rule->max_src_states &&
668 		    (*sn)->states >= rule->max_src_states) {
669 			V_pf_status.lcounters[LCNT_SRCSTATES]++;
670 			return (-1);
671 		}
672 	}
673 	return (0);
674 }
675 
676 static void
677 pf_remove_src_node(struct pf_src_node *src)
678 {
679 	struct pf_srchash *sh;
680 
681 	sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)];
682 	PF_HASHROW_LOCK(sh);
683 	LIST_REMOVE(src, entry);
684 	PF_HASHROW_UNLOCK(sh);
685 
686 	V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
687 	V_pf_status.src_nodes--;
688 
689 	uma_zfree(V_pf_sources_z, src);
690 }
691 
692 /* Data storage structures initialization. */
693 void
694 pf_initialize()
695 {
696 	struct pf_keyhash	*kh;
697 	struct pf_idhash	*ih;
698 	struct pf_srchash	*sh;
699 	u_int i;
700 
701 	TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize);
702 	if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
703 		V_pf_hashsize = PF_HASHSIZ;
704 	TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize);
705 	if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
706 		V_pf_srchashsize = PF_HASHSIZ / 4;
707 
708 	V_pf_hashseed = arc4random();
709 
710 	/* States and state keys storage. */
711 	V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state),
712 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
713 	V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
714 	uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
715 	uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
716 
717 	V_pf_state_key_z = uma_zcreate("pf state keys",
718 	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
719 	    UMA_ALIGN_PTR, 0);
720 	V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash),
721 	    M_PFHASH, M_WAITOK | M_ZERO);
722 	V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash),
723 	    M_PFHASH, M_WAITOK | M_ZERO);
724 	V_pf_hashmask = V_pf_hashsize - 1;
725 	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
726 	    i++, kh++, ih++) {
727 		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF);
728 		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
729 	}
730 
731 	/* Source nodes. */
732 	V_pf_sources_z = uma_zcreate("pf source nodes",
733 	    sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
734 	    0);
735 	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
736 	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
737 	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
738 	V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash),
739 	  M_PFHASH, M_WAITOK|M_ZERO);
740 	V_pf_srchashmask = V_pf_srchashsize - 1;
741 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
742 		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
743 
744 	/* ALTQ */
745 	TAILQ_INIT(&V_pf_altqs[0]);
746 	TAILQ_INIT(&V_pf_altqs[1]);
747 	TAILQ_INIT(&V_pf_pabuf);
748 	V_pf_altqs_active = &V_pf_altqs[0];
749 	V_pf_altqs_inactive = &V_pf_altqs[1];
750 
751 	/* Mbuf tags */
752 	V_pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
753 	    sizeof(struct pf_mtag), NULL, NULL, pf_mtag_init, NULL,
754 	    UMA_ALIGN_PTR, 0);
755 
756 	/* Send & overload+flush queues. */
757 	STAILQ_INIT(&V_pf_sendqueue);
758 	SLIST_INIT(&V_pf_overloadqueue);
759 	TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue);
760 	mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF);
761 	mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL,
762 	    MTX_DEF);
763 
764 	/* Unlinked, but may be referenced rules. */
765 	TAILQ_INIT(&V_pf_unlinked_rules);
766 	mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF);
767 }
768 
769 void
770 pf_cleanup()
771 {
772 	struct pf_keyhash	*kh;
773 	struct pf_idhash	*ih;
774 	struct pf_srchash	*sh;
775 	struct pf_send_entry	*pfse, *next;
776 	u_int i;
777 
778 	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
779 	    i++, kh++, ih++) {
780 		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
781 		    __func__));
782 		KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
783 		    __func__));
784 		mtx_destroy(&kh->lock);
785 		mtx_destroy(&ih->lock);
786 	}
787 	free(V_pf_keyhash, M_PFHASH);
788 	free(V_pf_idhash, M_PFHASH);
789 
790 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
791 		KASSERT(LIST_EMPTY(&sh->nodes),
792 		    ("%s: source node hash not empty", __func__));
793 		mtx_destroy(&sh->lock);
794 	}
795 	free(V_pf_srchash, M_PFHASH);
796 
797 	STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
798 		m_freem(pfse->pfse_m);
799 		free(pfse, M_PFTEMP);
800 	}
801 
802 	mtx_destroy(&pf_sendqueue_mtx);
803 	mtx_destroy(&pf_overloadqueue_mtx);
804 	mtx_destroy(&pf_unlnkdrules_mtx);
805 
806 	uma_zdestroy(V_pf_mtag_z);
807 	uma_zdestroy(V_pf_sources_z);
808 	uma_zdestroy(V_pf_state_z);
809 	uma_zdestroy(V_pf_state_key_z);
810 }
811 
812 static int
813 pf_mtag_init(void *mem, int size, int how)
814 {
815 	struct m_tag *t;
816 
817 	t = (struct m_tag *)mem;
818 	t->m_tag_cookie = MTAG_ABI_COMPAT;
819 	t->m_tag_id = PACKET_TAG_PF;
820 	t->m_tag_len = sizeof(struct pf_mtag);
821 	t->m_tag_free = pf_mtag_free;
822 
823 	return (0);
824 }
825 
826 static void
827 pf_mtag_free(struct m_tag *t)
828 {
829 
830 	uma_zfree(V_pf_mtag_z, t);
831 }
832 
833 struct pf_mtag *
834 pf_get_mtag(struct mbuf *m)
835 {
836 	struct m_tag *mtag;
837 
838 	if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
839 		return ((struct pf_mtag *)(mtag + 1));
840 
841 	mtag = uma_zalloc(V_pf_mtag_z, M_NOWAIT);
842 	if (mtag == NULL)
843 		return (NULL);
844 	bzero(mtag + 1, sizeof(struct pf_mtag));
845 	m_tag_prepend(m, mtag);
846 
847 	return ((struct pf_mtag *)(mtag + 1));
848 }
849 
850 static int
851 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
852     struct pf_state *s)
853 {
854 	struct pf_keyhash	*kh;
855 	struct pf_state_key	*sk, *cur;
856 	struct pf_state		*si, *olds = NULL;
857 	int idx;
858 
859 	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
860 	KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
861 	KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
862 
863 	/*
864 	 * First run: start with wire key.
865 	 */
866 	sk = skw;
867 	idx = PF_SK_WIRE;
868 
869 keyattach:
870 	kh = &V_pf_keyhash[pf_hashkey(sk)];
871 
872 	PF_HASHROW_LOCK(kh);
873 	LIST_FOREACH(cur, &kh->keys, entry)
874 		if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
875 			break;
876 
877 	if (cur != NULL) {
878 		/* Key exists. Check for same kif, if none, add to key. */
879 		TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
880 			struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
881 
882 			PF_HASHROW_LOCK(ih);
883 			if (si->kif == s->kif &&
884 			    si->direction == s->direction) {
885 				if (sk->proto == IPPROTO_TCP &&
886 				    si->src.state >= TCPS_FIN_WAIT_2 &&
887 				    si->dst.state >= TCPS_FIN_WAIT_2) {
888 					si->src.state = si->dst.state =
889 					    TCPS_CLOSED;
890 					/* Unlink later or cur can go away. */
891 					pf_ref_state(si);
892 					olds = si;
893 				} else {
894 					if (V_pf_status.debug >= PF_DEBUG_MISC) {
895 						printf("pf: %s key attach "
896 						    "failed on %s: ",
897 						    (idx == PF_SK_WIRE) ?
898 						    "wire" : "stack",
899 						    s->kif->pfik_name);
900 						pf_print_state_parts(s,
901 						    (idx == PF_SK_WIRE) ?
902 						    sk : NULL,
903 						    (idx == PF_SK_STACK) ?
904 						    sk : NULL);
905 						printf(", existing: ");
906 						pf_print_state_parts(si,
907 						    (idx == PF_SK_WIRE) ?
908 						    sk : NULL,
909 						    (idx == PF_SK_STACK) ?
910 						    sk : NULL);
911 						printf("\n");
912 					}
913 					PF_HASHROW_UNLOCK(ih);
914 					PF_HASHROW_UNLOCK(kh);
915 					uma_zfree(V_pf_state_key_z, sk);
916 					if (idx == PF_SK_STACK)
917 						pf_detach_state(s);
918 					return (-1);	/* collision! */
919 				}
920 			}
921 			PF_HASHROW_UNLOCK(ih);
922 		}
923 		uma_zfree(V_pf_state_key_z, sk);
924 		s->key[idx] = cur;
925 	} else {
926 		LIST_INSERT_HEAD(&kh->keys, sk, entry);
927 		s->key[idx] = sk;
928 	}
929 
930 stateattach:
931 	/* List is sorted, if-bound states before floating. */
932 	if (s->kif == V_pfi_all)
933 		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
934 	else
935 		TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
936 
937 	/*
938 	 * Attach done. See how should we (or should not?)
939 	 * attach a second key.
940 	 */
941 	if (sks == skw) {
942 		s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
943 		idx = PF_SK_STACK;
944 		sks = NULL;
945 		goto stateattach;
946 	} else if (sks != NULL) {
947 		PF_HASHROW_UNLOCK(kh);
948 		if (olds) {
949 			pf_unlink_state(olds, 0);
950 			pf_release_state(olds);
951 			olds = NULL;
952 		}
953 		/*
954 		 * Continue attaching with stack key.
955 		 */
956 		sk = sks;
957 		idx = PF_SK_STACK;
958 		sks = NULL;
959 		goto keyattach;
960 	} else
961 		PF_HASHROW_UNLOCK(kh);
962 
963 	if (olds) {
964 		pf_unlink_state(olds, 0);
965 		pf_release_state(olds);
966 	}
967 
968 	KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
969 	    ("%s failure", __func__));
970 
971 	return (0);
972 }
973 
974 static void
975 pf_detach_state(struct pf_state *s)
976 {
977 	struct pf_state_key *sks = s->key[PF_SK_STACK];
978 	struct pf_keyhash *kh;
979 
980 	if (sks != NULL) {
981 		kh = &V_pf_keyhash[pf_hashkey(sks)];
982 		PF_HASHROW_LOCK(kh);
983 		if (s->key[PF_SK_STACK] != NULL)
984 			pf_state_key_detach(s, PF_SK_STACK);
985 		/*
986 		 * If both point to same key, then we are done.
987 		 */
988 		if (sks == s->key[PF_SK_WIRE]) {
989 			pf_state_key_detach(s, PF_SK_WIRE);
990 			PF_HASHROW_UNLOCK(kh);
991 			return;
992 		}
993 		PF_HASHROW_UNLOCK(kh);
994 	}
995 
996 	if (s->key[PF_SK_WIRE] != NULL) {
997 		kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
998 		PF_HASHROW_LOCK(kh);
999 		if (s->key[PF_SK_WIRE] != NULL)
1000 			pf_state_key_detach(s, PF_SK_WIRE);
1001 		PF_HASHROW_UNLOCK(kh);
1002 	}
1003 }
1004 
1005 static void
1006 pf_state_key_detach(struct pf_state *s, int idx)
1007 {
1008 	struct pf_state_key *sk = s->key[idx];
1009 #ifdef INVARIANTS
1010 	struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1011 
1012 	PF_HASHROW_ASSERT(kh);
1013 #endif
1014 	TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1015 	s->key[idx] = NULL;
1016 
1017 	if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1018 		LIST_REMOVE(sk, entry);
1019 		uma_zfree(V_pf_state_key_z, sk);
1020 	}
1021 }
1022 
1023 static int
1024 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1025 {
1026 	struct pf_state_key *sk = mem;
1027 
1028 	bzero(sk, sizeof(struct pf_state_key_cmp));
1029 	TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1030 	TAILQ_INIT(&sk->states[PF_SK_STACK]);
1031 
1032 	return (0);
1033 }
1034 
1035 struct pf_state_key *
1036 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1037 	struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1038 {
1039 	struct pf_state_key *sk;
1040 
1041 	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1042 	if (sk == NULL)
1043 		return (NULL);
1044 
1045 	PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1046 	PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1047 	sk->port[pd->sidx] = sport;
1048 	sk->port[pd->didx] = dport;
1049 	sk->proto = pd->proto;
1050 	sk->af = pd->af;
1051 
1052 	return (sk);
1053 }
1054 
1055 struct pf_state_key *
1056 pf_state_key_clone(struct pf_state_key *orig)
1057 {
1058 	struct pf_state_key *sk;
1059 
1060 	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1061 	if (sk == NULL)
1062 		return (NULL);
1063 
1064 	bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1065 
1066 	return (sk);
1067 }
1068 
1069 int
1070 pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw,
1071     struct pf_state_key *sks, struct pf_state *s)
1072 {
1073 	struct pf_idhash *ih;
1074 	struct pf_state *cur;
1075 
1076 	KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1077 	    ("%s: sks not pristine", __func__));
1078 	KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1079 	    ("%s: skw not pristine", __func__));
1080 	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1081 
1082 	s->kif = kif;
1083 
1084 	if (s->id == 0 && s->creatorid == 0) {
1085 		/* XXX: should be atomic, but probability of collision low */
1086 		if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID)
1087 			V_pf_stateid[curcpu] = 1;
1088 		s->id |= (uint64_t )curcpu << PFID_CPUSHIFT;
1089 		s->id = htobe64(s->id);
1090 		s->creatorid = V_pf_status.hostid;
1091 	}
1092 
1093 	if (pf_state_key_attach(skw, sks, s))
1094 		return (-1);
1095 
1096 	ih = &V_pf_idhash[PF_IDHASH(s)];
1097 	PF_HASHROW_LOCK(ih);
1098 	LIST_FOREACH(cur, &ih->states, entry)
1099 		if (cur->id == s->id && cur->creatorid == s->creatorid)
1100 			break;
1101 
1102 	if (cur != NULL) {
1103 		PF_HASHROW_UNLOCK(ih);
1104 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1105 			printf("pf: state insert failed: "
1106 			    "id: %016llx creatorid: %08x\n",
1107 			    (unsigned long long)be64toh(s->id),
1108 			    ntohl(s->creatorid));
1109 		}
1110 		pf_detach_state(s);
1111 		return (-1);
1112 	}
1113 	LIST_INSERT_HEAD(&ih->states, s, entry);
1114 	/* One for keys, one for ID hash. */
1115 	refcount_init(&s->refs, 2);
1116 
1117 	V_pf_status.fcounters[FCNT_STATE_INSERT]++;
1118 	if (pfsync_insert_state_ptr != NULL)
1119 		pfsync_insert_state_ptr(s);
1120 
1121 	/* Returns locked. */
1122 	return (0);
1123 }
1124 
1125 /*
1126  * Find state by ID: returns with locked row on success.
1127  */
1128 struct pf_state *
1129 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1130 {
1131 	struct pf_idhash *ih;
1132 	struct pf_state *s;
1133 
1134 	V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1135 
1136 	ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))];
1137 
1138 	PF_HASHROW_LOCK(ih);
1139 	LIST_FOREACH(s, &ih->states, entry)
1140 		if (s->id == id && s->creatorid == creatorid)
1141 			break;
1142 
1143 	if (s == NULL)
1144 		PF_HASHROW_UNLOCK(ih);
1145 
1146 	return (s);
1147 }
1148 
1149 /*
1150  * Find state by key.
1151  * Returns with ID hash slot locked on success.
1152  */
1153 static struct pf_state *
1154 pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir)
1155 {
1156 	struct pf_keyhash	*kh;
1157 	struct pf_state_key	*sk;
1158 	struct pf_state		*s;
1159 	int idx;
1160 
1161 	V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1162 
1163 	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1164 
1165 	PF_HASHROW_LOCK(kh);
1166 	LIST_FOREACH(sk, &kh->keys, entry)
1167 		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1168 			break;
1169 	if (sk == NULL) {
1170 		PF_HASHROW_UNLOCK(kh);
1171 		return (NULL);
1172 	}
1173 
1174 	idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1175 
1176 	/* List is sorted, if-bound states before floating ones. */
1177 	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1178 		if (s->kif == V_pfi_all || s->kif == kif) {
1179 			PF_STATE_LOCK(s);
1180 			PF_HASHROW_UNLOCK(kh);
1181 			if (s->timeout == PFTM_UNLINKED) {
1182 				/*
1183 				 * State is being processed
1184 				 * by pf_unlink_state() in
1185 				 * an other thread.
1186 				 */
1187 				PF_STATE_UNLOCK(s);
1188 				return (NULL);
1189 			}
1190 			return (s);
1191 		}
1192 	PF_HASHROW_UNLOCK(kh);
1193 
1194 	return (NULL);
1195 }
1196 
1197 struct pf_state *
1198 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1199 {
1200 	struct pf_keyhash	*kh;
1201 	struct pf_state_key	*sk;
1202 	struct pf_state		*s, *ret = NULL;
1203 	int			 idx, inout = 0;
1204 
1205 	V_pf_status.fcounters[FCNT_STATE_SEARCH]++;
1206 
1207 	kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1208 
1209 	PF_HASHROW_LOCK(kh);
1210 	LIST_FOREACH(sk, &kh->keys, entry)
1211 		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1212 			break;
1213 	if (sk == NULL) {
1214 		PF_HASHROW_UNLOCK(kh);
1215 		return (NULL);
1216 	}
1217 	switch (dir) {
1218 	case PF_IN:
1219 		idx = PF_SK_WIRE;
1220 		break;
1221 	case PF_OUT:
1222 		idx = PF_SK_STACK;
1223 		break;
1224 	case PF_INOUT:
1225 		idx = PF_SK_WIRE;
1226 		inout = 1;
1227 		break;
1228 	default:
1229 		panic("%s: dir %u", __func__, dir);
1230 	}
1231 second_run:
1232 	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1233 		if (more == NULL) {
1234 			PF_HASHROW_UNLOCK(kh);
1235 			return (s);
1236 		}
1237 
1238 		if (ret)
1239 			(*more)++;
1240 		else
1241 			ret = s;
1242 	}
1243 	if (inout == 1) {
1244 		inout = 0;
1245 		idx = PF_SK_STACK;
1246 		goto second_run;
1247 	}
1248 	PF_HASHROW_UNLOCK(kh);
1249 
1250 	return (ret);
1251 }
1252 
1253 /* END state table stuff */
1254 
1255 static void
1256 pf_send(struct pf_send_entry *pfse)
1257 {
1258 
1259 	PF_SENDQ_LOCK();
1260 	STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1261 	PF_SENDQ_UNLOCK();
1262 	swi_sched(V_pf_swi_cookie, 0);
1263 }
1264 
1265 void
1266 pf_intr(void *v)
1267 {
1268 	struct pf_send_head queue;
1269 	struct pf_send_entry *pfse, *next;
1270 
1271 	CURVNET_SET((struct vnet *)v);
1272 
1273 	PF_SENDQ_LOCK();
1274 	queue = V_pf_sendqueue;
1275 	STAILQ_INIT(&V_pf_sendqueue);
1276 	PF_SENDQ_UNLOCK();
1277 
1278 	STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1279 		switch (pfse->pfse_type) {
1280 #ifdef INET
1281 		case PFSE_IP:
1282 			ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL);
1283 			break;
1284 		case PFSE_ICMP:
1285 			icmp_error(pfse->pfse_m, pfse->pfse_icmp_type,
1286 			    pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu);
1287 			break;
1288 #endif /* INET */
1289 #ifdef INET6
1290 		case PFSE_IP6:
1291 			ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL,
1292 			    NULL);
1293 			break;
1294 		case PFSE_ICMP6:
1295 			icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type,
1296 			    pfse->pfse_icmp_code, pfse->pfse_icmp_mtu);
1297 			break;
1298 #endif /* INET6 */
1299 		default:
1300 			panic("%s: unknown type", __func__);
1301 		}
1302 		free(pfse, M_PFTEMP);
1303 	}
1304 	CURVNET_RESTORE();
1305 }
1306 
1307 void
1308 pf_purge_thread(void *v)
1309 {
1310 	u_int idx = 0;
1311 
1312 	CURVNET_SET((struct vnet *)v);
1313 
1314 	for (;;) {
1315 		PF_RULES_RLOCK();
1316 		rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10);
1317 
1318 		if (V_pf_end_threads) {
1319 			/*
1320 			 * To cleanse up all kifs and rules we need
1321 			 * two runs: first one clears reference flags,
1322 			 * then pf_purge_expired_states() doesn't
1323 			 * raise them, and then second run frees.
1324 			 */
1325 			PF_RULES_RUNLOCK();
1326 			pf_purge_unlinked_rules();
1327 			pfi_kif_purge();
1328 
1329 			/*
1330 			 * Now purge everything.
1331 			 */
1332 			pf_purge_expired_states(0, V_pf_hashmask);
1333 			pf_purge_expired_fragments();
1334 			pf_purge_expired_src_nodes();
1335 
1336 			/*
1337 			 * Now all kifs & rules should be unreferenced,
1338 			 * thus should be successfully freed.
1339 			 */
1340 			pf_purge_unlinked_rules();
1341 			pfi_kif_purge();
1342 
1343 			/*
1344 			 * Announce success and exit.
1345 			 */
1346 			PF_RULES_RLOCK();
1347 			V_pf_end_threads++;
1348 			PF_RULES_RUNLOCK();
1349 			wakeup(pf_purge_thread);
1350 			kproc_exit(0);
1351 		}
1352 		PF_RULES_RUNLOCK();
1353 
1354 		/* Process 1/interval fraction of the state table every run. */
1355 		idx = pf_purge_expired_states(idx, V_pf_hashmask /
1356 			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1357 
1358 		/* Purge other expired types every PFTM_INTERVAL seconds. */
1359 		if (idx == 0) {
1360 			/*
1361 			 * Order is important:
1362 			 * - states and src nodes reference rules
1363 			 * - states and rules reference kifs
1364 			 */
1365 			pf_purge_expired_fragments();
1366 			pf_purge_expired_src_nodes();
1367 			pf_purge_unlinked_rules();
1368 			pfi_kif_purge();
1369 		}
1370 	}
1371 	/* not reached */
1372 	CURVNET_RESTORE();
1373 }
1374 
1375 u_int32_t
1376 pf_state_expires(const struct pf_state *state)
1377 {
1378 	u_int32_t	timeout;
1379 	u_int32_t	start;
1380 	u_int32_t	end;
1381 	u_int32_t	states;
1382 
1383 	/* handle all PFTM_* > PFTM_MAX here */
1384 	if (state->timeout == PFTM_PURGE)
1385 		return (time_uptime);
1386 	if (state->timeout == PFTM_UNTIL_PACKET)
1387 		return (0);
1388 	KASSERT(state->timeout != PFTM_UNLINKED,
1389 	    ("pf_state_expires: timeout == PFTM_UNLINKED"));
1390 	KASSERT((state->timeout < PFTM_MAX),
1391 	    ("pf_state_expires: timeout > PFTM_MAX"));
1392 	timeout = state->rule.ptr->timeout[state->timeout];
1393 	if (!timeout)
1394 		timeout = V_pf_default_rule.timeout[state->timeout];
1395 	start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1396 	if (start) {
1397 		end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1398 		states = state->rule.ptr->states_cur;	/* XXXGL */
1399 	} else {
1400 		start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1401 		end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1402 		states = V_pf_status.states;
1403 	}
1404 	if (end && states > start && start < end) {
1405 		if (states < end)
1406 			return (state->expire + timeout * (end - states) /
1407 			    (end - start));
1408 		else
1409 			return (time_uptime);
1410 	}
1411 	return (state->expire + timeout);
1412 }
1413 
1414 void
1415 pf_purge_expired_src_nodes()
1416 {
1417 	struct pf_srchash	*sh;
1418 	struct pf_src_node	*cur, *next;
1419 	int i;
1420 
1421 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
1422 	    PF_HASHROW_LOCK(sh);
1423 	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1424 		if (cur->states <= 0 && cur->expire <= time_uptime) {
1425 			if (cur->rule.ptr != NULL)
1426 				cur->rule.ptr->src_nodes--;
1427 			LIST_REMOVE(cur, entry);
1428 			V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
1429 			V_pf_status.src_nodes--;
1430 			uma_zfree(V_pf_sources_z, cur);
1431 		} else if (cur->rule.ptr != NULL)
1432 			cur->rule.ptr->rule_flag |= PFRULE_REFS;
1433 	    PF_HASHROW_UNLOCK(sh);
1434 	}
1435 }
1436 
1437 static void
1438 pf_src_tree_remove_state(struct pf_state *s)
1439 {
1440 	u_int32_t timeout;
1441 
1442 	if (s->src_node != NULL) {
1443 		if (s->src.tcp_est)
1444 			--s->src_node->conn;
1445 		if (--s->src_node->states <= 0) {
1446 			timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1447 			if (!timeout)
1448 				timeout =
1449 				    V_pf_default_rule.timeout[PFTM_SRC_NODE];
1450 			s->src_node->expire = time_uptime + timeout;
1451 		}
1452 	}
1453 	if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1454 		if (--s->nat_src_node->states <= 0) {
1455 			timeout = s->rule.ptr->timeout[PFTM_SRC_NODE];
1456 			if (!timeout)
1457 				timeout =
1458 				    V_pf_default_rule.timeout[PFTM_SRC_NODE];
1459 			s->nat_src_node->expire = time_uptime + timeout;
1460 		}
1461 	}
1462 	s->src_node = s->nat_src_node = NULL;
1463 }
1464 
1465 /*
1466  * Unlink and potentilly free a state. Function may be
1467  * called with ID hash row locked, but always returns
1468  * unlocked, since it needs to go through key hash locking.
1469  */
1470 int
1471 pf_unlink_state(struct pf_state *s, u_int flags)
1472 {
1473 	struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
1474 
1475 	if ((flags & PF_ENTER_LOCKED) == 0)
1476 		PF_HASHROW_LOCK(ih);
1477 	else
1478 		PF_HASHROW_ASSERT(ih);
1479 
1480 	if (s->timeout == PFTM_UNLINKED) {
1481 		/*
1482 		 * State is being processed
1483 		 * by pf_unlink_state() in
1484 		 * an other thread.
1485 		 */
1486 		PF_HASHROW_UNLOCK(ih);
1487 		return (0);	/* XXXGL: undefined actually */
1488 	}
1489 
1490 	if (s->src.state == PF_TCPS_PROXY_DST) {
1491 		/* XXX wire key the right one? */
1492 		pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af,
1493 		    &s->key[PF_SK_WIRE]->addr[1],
1494 		    &s->key[PF_SK_WIRE]->addr[0],
1495 		    s->key[PF_SK_WIRE]->port[1],
1496 		    s->key[PF_SK_WIRE]->port[0],
1497 		    s->src.seqhi, s->src.seqlo + 1,
1498 		    TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL);
1499 	}
1500 
1501 	LIST_REMOVE(s, entry);
1502 	pf_src_tree_remove_state(s);
1503 
1504 	if (pfsync_delete_state_ptr != NULL)
1505 		pfsync_delete_state_ptr(s);
1506 
1507 	--s->rule.ptr->states_cur;
1508 	if (s->nat_rule.ptr != NULL)
1509 		--s->nat_rule.ptr->states_cur;
1510 	if (s->anchor.ptr != NULL)
1511 		--s->anchor.ptr->states_cur;
1512 
1513 	s->timeout = PFTM_UNLINKED;
1514 
1515 	PF_HASHROW_UNLOCK(ih);
1516 
1517 	pf_detach_state(s);
1518 	refcount_release(&s->refs);
1519 
1520 	return (pf_release_state(s));
1521 }
1522 
1523 void
1524 pf_free_state(struct pf_state *cur)
1525 {
1526 
1527 	KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
1528 	KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
1529 	    cur->timeout));
1530 
1531 	pf_normalize_tcp_cleanup(cur);
1532 	uma_zfree(V_pf_state_z, cur);
1533 	V_pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1534 }
1535 
1536 /*
1537  * Called only from pf_purge_thread(), thus serialized.
1538  */
1539 static u_int
1540 pf_purge_expired_states(u_int i, int maxcheck)
1541 {
1542 	struct pf_idhash *ih;
1543 	struct pf_state *s;
1544 
1545 	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1546 
1547 	/*
1548 	 * Go through hash and unlink states that expire now.
1549 	 */
1550 	while (maxcheck > 0) {
1551 
1552 		ih = &V_pf_idhash[i];
1553 relock:
1554 		PF_HASHROW_LOCK(ih);
1555 		LIST_FOREACH(s, &ih->states, entry) {
1556 			if (pf_state_expires(s) <= time_uptime) {
1557 				V_pf_status.states -=
1558 				    pf_unlink_state(s, PF_ENTER_LOCKED);
1559 				goto relock;
1560 			}
1561 			s->rule.ptr->rule_flag |= PFRULE_REFS;
1562 			if (s->nat_rule.ptr != NULL)
1563 				s->nat_rule.ptr->rule_flag |= PFRULE_REFS;
1564 			if (s->anchor.ptr != NULL)
1565 				s->anchor.ptr->rule_flag |= PFRULE_REFS;
1566 			s->kif->pfik_flags |= PFI_IFLAG_REFS;
1567 			if (s->rt_kif)
1568 				s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
1569 		}
1570 		PF_HASHROW_UNLOCK(ih);
1571 
1572 		/* Return when we hit end of hash. */
1573 		if (++i > V_pf_hashmask) {
1574 			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1575 			return (0);
1576 		}
1577 
1578 		maxcheck--;
1579 	}
1580 
1581 	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
1582 
1583 	return (i);
1584 }
1585 
1586 static void
1587 pf_purge_unlinked_rules()
1588 {
1589 	struct pf_rulequeue tmpq;
1590 	struct pf_rule *r, *r1;
1591 
1592 	/*
1593 	 * If we have overloading task pending, then we'd
1594 	 * better skip purging this time. There is a tiny
1595 	 * probability that overloading task references
1596 	 * an already unlinked rule.
1597 	 */
1598 	PF_OVERLOADQ_LOCK();
1599 	if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
1600 		PF_OVERLOADQ_UNLOCK();
1601 		return;
1602 	}
1603 	PF_OVERLOADQ_UNLOCK();
1604 
1605 	/*
1606 	 * Do naive mark-and-sweep garbage collecting of old rules.
1607 	 * Reference flag is raised by pf_purge_expired_states()
1608 	 * and pf_purge_expired_src_nodes().
1609 	 *
1610 	 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
1611 	 * use a temporary queue.
1612 	 */
1613 	TAILQ_INIT(&tmpq);
1614 	PF_UNLNKDRULES_LOCK();
1615 	TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
1616 		if (!(r->rule_flag & PFRULE_REFS)) {
1617 			TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
1618 			TAILQ_INSERT_TAIL(&tmpq, r, entries);
1619 		} else
1620 			r->rule_flag &= ~PFRULE_REFS;
1621 	}
1622 	PF_UNLNKDRULES_UNLOCK();
1623 
1624 	if (!TAILQ_EMPTY(&tmpq)) {
1625 		PF_RULES_WLOCK();
1626 		TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
1627 			TAILQ_REMOVE(&tmpq, r, entries);
1628 			pf_free_rule(r);
1629 		}
1630 		PF_RULES_WUNLOCK();
1631 	}
1632 }
1633 
1634 void
1635 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
1636 {
1637 	switch (af) {
1638 #ifdef INET
1639 	case AF_INET: {
1640 		u_int32_t a = ntohl(addr->addr32[0]);
1641 		printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
1642 		    (a>>8)&255, a&255);
1643 		if (p) {
1644 			p = ntohs(p);
1645 			printf(":%u", p);
1646 		}
1647 		break;
1648 	}
1649 #endif /* INET */
1650 #ifdef INET6
1651 	case AF_INET6: {
1652 		u_int16_t b;
1653 		u_int8_t i, curstart, curend, maxstart, maxend;
1654 		curstart = curend = maxstart = maxend = 255;
1655 		for (i = 0; i < 8; i++) {
1656 			if (!addr->addr16[i]) {
1657 				if (curstart == 255)
1658 					curstart = i;
1659 				curend = i;
1660 			} else {
1661 				if ((curend - curstart) >
1662 				    (maxend - maxstart)) {
1663 					maxstart = curstart;
1664 					maxend = curend;
1665 				}
1666 				curstart = curend = 255;
1667 			}
1668 		}
1669 		if ((curend - curstart) >
1670 		    (maxend - maxstart)) {
1671 			maxstart = curstart;
1672 			maxend = curend;
1673 		}
1674 		for (i = 0; i < 8; i++) {
1675 			if (i >= maxstart && i <= maxend) {
1676 				if (i == 0)
1677 					printf(":");
1678 				if (i == maxend)
1679 					printf(":");
1680 			} else {
1681 				b = ntohs(addr->addr16[i]);
1682 				printf("%x", b);
1683 				if (i < 7)
1684 					printf(":");
1685 			}
1686 		}
1687 		if (p) {
1688 			p = ntohs(p);
1689 			printf("[%u]", p);
1690 		}
1691 		break;
1692 	}
1693 #endif /* INET6 */
1694 	}
1695 }
1696 
1697 void
1698 pf_print_state(struct pf_state *s)
1699 {
1700 	pf_print_state_parts(s, NULL, NULL);
1701 }
1702 
1703 static void
1704 pf_print_state_parts(struct pf_state *s,
1705     struct pf_state_key *skwp, struct pf_state_key *sksp)
1706 {
1707 	struct pf_state_key *skw, *sks;
1708 	u_int8_t proto, dir;
1709 
1710 	/* Do our best to fill these, but they're skipped if NULL */
1711 	skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
1712 	sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
1713 	proto = skw ? skw->proto : (sks ? sks->proto : 0);
1714 	dir = s ? s->direction : 0;
1715 
1716 	switch (proto) {
1717 	case IPPROTO_IPV4:
1718 		printf("IPv4");
1719 		break;
1720 	case IPPROTO_IPV6:
1721 		printf("IPv6");
1722 		break;
1723 	case IPPROTO_TCP:
1724 		printf("TCP");
1725 		break;
1726 	case IPPROTO_UDP:
1727 		printf("UDP");
1728 		break;
1729 	case IPPROTO_ICMP:
1730 		printf("ICMP");
1731 		break;
1732 	case IPPROTO_ICMPV6:
1733 		printf("ICMPv6");
1734 		break;
1735 	default:
1736 		printf("%u", skw->proto);
1737 		break;
1738 	}
1739 	switch (dir) {
1740 	case PF_IN:
1741 		printf(" in");
1742 		break;
1743 	case PF_OUT:
1744 		printf(" out");
1745 		break;
1746 	}
1747 	if (skw) {
1748 		printf(" wire: ");
1749 		pf_print_host(&skw->addr[0], skw->port[0], skw->af);
1750 		printf(" ");
1751 		pf_print_host(&skw->addr[1], skw->port[1], skw->af);
1752 	}
1753 	if (sks) {
1754 		printf(" stack: ");
1755 		if (sks != skw) {
1756 			pf_print_host(&sks->addr[0], sks->port[0], sks->af);
1757 			printf(" ");
1758 			pf_print_host(&sks->addr[1], sks->port[1], sks->af);
1759 		} else
1760 			printf("-");
1761 	}
1762 	if (s) {
1763 		if (proto == IPPROTO_TCP) {
1764 			printf(" [lo=%u high=%u win=%u modulator=%u",
1765 			    s->src.seqlo, s->src.seqhi,
1766 			    s->src.max_win, s->src.seqdiff);
1767 			if (s->src.wscale && s->dst.wscale)
1768 				printf(" wscale=%u",
1769 				    s->src.wscale & PF_WSCALE_MASK);
1770 			printf("]");
1771 			printf(" [lo=%u high=%u win=%u modulator=%u",
1772 			    s->dst.seqlo, s->dst.seqhi,
1773 			    s->dst.max_win, s->dst.seqdiff);
1774 			if (s->src.wscale && s->dst.wscale)
1775 				printf(" wscale=%u",
1776 				s->dst.wscale & PF_WSCALE_MASK);
1777 			printf("]");
1778 		}
1779 		printf(" %u:%u", s->src.state, s->dst.state);
1780 	}
1781 }
1782 
1783 void
1784 pf_print_flags(u_int8_t f)
1785 {
1786 	if (f)
1787 		printf(" ");
1788 	if (f & TH_FIN)
1789 		printf("F");
1790 	if (f & TH_SYN)
1791 		printf("S");
1792 	if (f & TH_RST)
1793 		printf("R");
1794 	if (f & TH_PUSH)
1795 		printf("P");
1796 	if (f & TH_ACK)
1797 		printf("A");
1798 	if (f & TH_URG)
1799 		printf("U");
1800 	if (f & TH_ECE)
1801 		printf("E");
1802 	if (f & TH_CWR)
1803 		printf("W");
1804 }
1805 
1806 #define	PF_SET_SKIP_STEPS(i)					\
1807 	do {							\
1808 		while (head[i] != cur) {			\
1809 			head[i]->skip[i].ptr = cur;		\
1810 			head[i] = TAILQ_NEXT(head[i], entries);	\
1811 		}						\
1812 	} while (0)
1813 
1814 void
1815 pf_calc_skip_steps(struct pf_rulequeue *rules)
1816 {
1817 	struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
1818 	int i;
1819 
1820 	cur = TAILQ_FIRST(rules);
1821 	prev = cur;
1822 	for (i = 0; i < PF_SKIP_COUNT; ++i)
1823 		head[i] = cur;
1824 	while (cur != NULL) {
1825 
1826 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
1827 			PF_SET_SKIP_STEPS(PF_SKIP_IFP);
1828 		if (cur->direction != prev->direction)
1829 			PF_SET_SKIP_STEPS(PF_SKIP_DIR);
1830 		if (cur->af != prev->af)
1831 			PF_SET_SKIP_STEPS(PF_SKIP_AF);
1832 		if (cur->proto != prev->proto)
1833 			PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
1834 		if (cur->src.neg != prev->src.neg ||
1835 		    pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
1836 			PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
1837 		if (cur->src.port[0] != prev->src.port[0] ||
1838 		    cur->src.port[1] != prev->src.port[1] ||
1839 		    cur->src.port_op != prev->src.port_op)
1840 			PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
1841 		if (cur->dst.neg != prev->dst.neg ||
1842 		    pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
1843 			PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
1844 		if (cur->dst.port[0] != prev->dst.port[0] ||
1845 		    cur->dst.port[1] != prev->dst.port[1] ||
1846 		    cur->dst.port_op != prev->dst.port_op)
1847 			PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
1848 
1849 		prev = cur;
1850 		cur = TAILQ_NEXT(cur, entries);
1851 	}
1852 	for (i = 0; i < PF_SKIP_COUNT; ++i)
1853 		PF_SET_SKIP_STEPS(i);
1854 }
1855 
1856 static int
1857 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
1858 {
1859 	if (aw1->type != aw2->type)
1860 		return (1);
1861 	switch (aw1->type) {
1862 	case PF_ADDR_ADDRMASK:
1863 	case PF_ADDR_RANGE:
1864 		if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0))
1865 			return (1);
1866 		if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0))
1867 			return (1);
1868 		return (0);
1869 	case PF_ADDR_DYNIFTL:
1870 		return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
1871 	case PF_ADDR_NOROUTE:
1872 	case PF_ADDR_URPFFAILED:
1873 		return (0);
1874 	case PF_ADDR_TABLE:
1875 		return (aw1->p.tbl != aw2->p.tbl);
1876 	default:
1877 		printf("invalid address type: %d\n", aw1->type);
1878 		return (1);
1879 	}
1880 }
1881 
1882 u_int16_t
1883 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
1884 {
1885 	u_int32_t	l;
1886 
1887 	if (udp && !cksum)
1888 		return (0x0000);
1889 	l = cksum + old - new;
1890 	l = (l >> 16) + (l & 65535);
1891 	l = l & 65535;
1892 	if (udp && !l)
1893 		return (0xFFFF);
1894 	return (l);
1895 }
1896 
1897 static void
1898 pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc,
1899     struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af)
1900 {
1901 	struct pf_addr	ao;
1902 	u_int16_t	po = *p;
1903 
1904 	PF_ACPY(&ao, a, af);
1905 	PF_ACPY(a, an, af);
1906 
1907 	*p = pn;
1908 
1909 	switch (af) {
1910 #ifdef INET
1911 	case AF_INET:
1912 		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
1913 		    ao.addr16[0], an->addr16[0], 0),
1914 		    ao.addr16[1], an->addr16[1], 0);
1915 		*p = pn;
1916 		*pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1917 		    ao.addr16[0], an->addr16[0], u),
1918 		    ao.addr16[1], an->addr16[1], u),
1919 		    po, pn, u);
1920 		break;
1921 #endif /* INET */
1922 #ifdef INET6
1923 	case AF_INET6:
1924 		*pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1925 		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1926 		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc,
1927 		    ao.addr16[0], an->addr16[0], u),
1928 		    ao.addr16[1], an->addr16[1], u),
1929 		    ao.addr16[2], an->addr16[2], u),
1930 		    ao.addr16[3], an->addr16[3], u),
1931 		    ao.addr16[4], an->addr16[4], u),
1932 		    ao.addr16[5], an->addr16[5], u),
1933 		    ao.addr16[6], an->addr16[6], u),
1934 		    ao.addr16[7], an->addr16[7], u),
1935 		    po, pn, u);
1936 		break;
1937 #endif /* INET6 */
1938 	}
1939 }
1940 
1941 
1942 /* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
1943 void
1944 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
1945 {
1946 	u_int32_t	ao;
1947 
1948 	memcpy(&ao, a, sizeof(ao));
1949 	memcpy(a, &an, sizeof(u_int32_t));
1950 	*c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
1951 	    ao % 65536, an % 65536, u);
1952 }
1953 
1954 #ifdef INET6
1955 static void
1956 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
1957 {
1958 	struct pf_addr	ao;
1959 
1960 	PF_ACPY(&ao, a, AF_INET6);
1961 	PF_ACPY(a, an, AF_INET6);
1962 
1963 	*c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1964 	    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
1965 	    pf_cksum_fixup(pf_cksum_fixup(*c,
1966 	    ao.addr16[0], an->addr16[0], u),
1967 	    ao.addr16[1], an->addr16[1], u),
1968 	    ao.addr16[2], an->addr16[2], u),
1969 	    ao.addr16[3], an->addr16[3], u),
1970 	    ao.addr16[4], an->addr16[4], u),
1971 	    ao.addr16[5], an->addr16[5], u),
1972 	    ao.addr16[6], an->addr16[6], u),
1973 	    ao.addr16[7], an->addr16[7], u);
1974 }
1975 #endif /* INET6 */
1976 
1977 static void
1978 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
1979     struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
1980     u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
1981 {
1982 	struct pf_addr	oia, ooa;
1983 
1984 	PF_ACPY(&oia, ia, af);
1985 	if (oa)
1986 		PF_ACPY(&ooa, oa, af);
1987 
1988 	/* Change inner protocol port, fix inner protocol checksum. */
1989 	if (ip != NULL) {
1990 		u_int16_t	oip = *ip;
1991 		u_int32_t	opc;
1992 
1993 		if (pc != NULL)
1994 			opc = *pc;
1995 		*ip = np;
1996 		if (pc != NULL)
1997 			*pc = pf_cksum_fixup(*pc, oip, *ip, u);
1998 		*ic = pf_cksum_fixup(*ic, oip, *ip, 0);
1999 		if (pc != NULL)
2000 			*ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2001 	}
2002 	/* Change inner ip address, fix inner ip and icmp checksums. */
2003 	PF_ACPY(ia, na, af);
2004 	switch (af) {
2005 #ifdef INET
2006 	case AF_INET: {
2007 		u_int32_t	 oh2c = *h2c;
2008 
2009 		*h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2010 		    oia.addr16[0], ia->addr16[0], 0),
2011 		    oia.addr16[1], ia->addr16[1], 0);
2012 		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2013 		    oia.addr16[0], ia->addr16[0], 0),
2014 		    oia.addr16[1], ia->addr16[1], 0);
2015 		*ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2016 		break;
2017 	}
2018 #endif /* INET */
2019 #ifdef INET6
2020 	case AF_INET6:
2021 		*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2022 		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2023 		    pf_cksum_fixup(pf_cksum_fixup(*ic,
2024 		    oia.addr16[0], ia->addr16[0], u),
2025 		    oia.addr16[1], ia->addr16[1], u),
2026 		    oia.addr16[2], ia->addr16[2], u),
2027 		    oia.addr16[3], ia->addr16[3], u),
2028 		    oia.addr16[4], ia->addr16[4], u),
2029 		    oia.addr16[5], ia->addr16[5], u),
2030 		    oia.addr16[6], ia->addr16[6], u),
2031 		    oia.addr16[7], ia->addr16[7], u);
2032 		break;
2033 #endif /* INET6 */
2034 	}
2035 	/* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2036 	if (oa) {
2037 		PF_ACPY(oa, na, af);
2038 		switch (af) {
2039 #ifdef INET
2040 		case AF_INET:
2041 			*hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2042 			    ooa.addr16[0], oa->addr16[0], 0),
2043 			    ooa.addr16[1], oa->addr16[1], 0);
2044 			break;
2045 #endif /* INET */
2046 #ifdef INET6
2047 		case AF_INET6:
2048 			*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2049 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2050 			    pf_cksum_fixup(pf_cksum_fixup(*ic,
2051 			    ooa.addr16[0], oa->addr16[0], u),
2052 			    ooa.addr16[1], oa->addr16[1], u),
2053 			    ooa.addr16[2], oa->addr16[2], u),
2054 			    ooa.addr16[3], oa->addr16[3], u),
2055 			    ooa.addr16[4], oa->addr16[4], u),
2056 			    ooa.addr16[5], oa->addr16[5], u),
2057 			    ooa.addr16[6], oa->addr16[6], u),
2058 			    ooa.addr16[7], oa->addr16[7], u);
2059 			break;
2060 #endif /* INET6 */
2061 		}
2062 	}
2063 }
2064 
2065 
2066 /*
2067  * Need to modulate the sequence numbers in the TCP SACK option
2068  * (credits to Krzysztof Pfaff for report and patch)
2069  */
2070 static int
2071 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2072     struct tcphdr *th, struct pf_state_peer *dst)
2073 {
2074 	int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2075 	u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2076 	int copyback = 0, i, olen;
2077 	struct sackblk sack;
2078 
2079 #define	TCPOLEN_SACKLEN	(TCPOLEN_SACK + 2)
2080 	if (hlen < TCPOLEN_SACKLEN ||
2081 	    !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2082 		return 0;
2083 
2084 	while (hlen >= TCPOLEN_SACKLEN) {
2085 		olen = opt[1];
2086 		switch (*opt) {
2087 		case TCPOPT_EOL:	/* FALLTHROUGH */
2088 		case TCPOPT_NOP:
2089 			opt++;
2090 			hlen--;
2091 			break;
2092 		case TCPOPT_SACK:
2093 			if (olen > hlen)
2094 				olen = hlen;
2095 			if (olen >= TCPOLEN_SACKLEN) {
2096 				for (i = 2; i + TCPOLEN_SACK <= olen;
2097 				    i += TCPOLEN_SACK) {
2098 					memcpy(&sack, &opt[i], sizeof(sack));
2099 					pf_change_a(&sack.start, &th->th_sum,
2100 					    htonl(ntohl(sack.start) -
2101 					    dst->seqdiff), 0);
2102 					pf_change_a(&sack.end, &th->th_sum,
2103 					    htonl(ntohl(sack.end) -
2104 					    dst->seqdiff), 0);
2105 					memcpy(&opt[i], &sack, sizeof(sack));
2106 				}
2107 				copyback = 1;
2108 			}
2109 			/* FALLTHROUGH */
2110 		default:
2111 			if (olen < 2)
2112 				olen = 2;
2113 			hlen -= olen;
2114 			opt += olen;
2115 		}
2116 	}
2117 
2118 	if (copyback)
2119 		m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2120 	return (copyback);
2121 }
2122 
2123 static void
2124 pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af,
2125     const struct pf_addr *saddr, const struct pf_addr *daddr,
2126     u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2127     u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2128     u_int16_t rtag, struct ifnet *ifp)
2129 {
2130 	struct pf_send_entry *pfse;
2131 	struct mbuf	*m;
2132 	int		 len, tlen;
2133 #ifdef INET
2134 	struct ip	*h = NULL;
2135 #endif /* INET */
2136 #ifdef INET6
2137 	struct ip6_hdr	*h6 = NULL;
2138 #endif /* INET6 */
2139 	struct tcphdr	*th;
2140 	char		*opt;
2141 	struct pf_mtag  *pf_mtag;
2142 
2143 	len = 0;
2144 	th = NULL;
2145 
2146 	/* maximum segment size tcp option */
2147 	tlen = sizeof(struct tcphdr);
2148 	if (mss)
2149 		tlen += 4;
2150 
2151 	switch (af) {
2152 #ifdef INET
2153 	case AF_INET:
2154 		len = sizeof(struct ip) + tlen;
2155 		break;
2156 #endif /* INET */
2157 #ifdef INET6
2158 	case AF_INET6:
2159 		len = sizeof(struct ip6_hdr) + tlen;
2160 		break;
2161 #endif /* INET6 */
2162 	default:
2163 		panic("%s: unsupported af %d", __func__, af);
2164 	}
2165 
2166 	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2167 	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2168 	if (pfse == NULL)
2169 		return;
2170 	m = m_gethdr(M_NOWAIT, MT_DATA);
2171 	if (m == NULL) {
2172 		free(pfse, M_PFTEMP);
2173 		return;
2174 	}
2175 #ifdef MAC
2176 	mac_netinet_firewall_send(m);
2177 #endif
2178 	if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2179 		free(pfse, M_PFTEMP);
2180 		m_freem(m);
2181 		return;
2182 	}
2183 	if (tag)
2184 		m->m_flags |= M_SKIP_FIREWALL;
2185 	pf_mtag->tag = rtag;
2186 
2187 	if (r != NULL && r->rtableid >= 0)
2188 		M_SETFIB(m, r->rtableid);
2189 
2190 #ifdef ALTQ
2191 	if (r != NULL && r->qid) {
2192 		pf_mtag->qid = r->qid;
2193 
2194 		/* add hints for ecn */
2195 		pf_mtag->hdr = mtod(m, struct ip *);
2196 	}
2197 #endif /* ALTQ */
2198 	m->m_data += max_linkhdr;
2199 	m->m_pkthdr.len = m->m_len = len;
2200 	m->m_pkthdr.rcvif = NULL;
2201 	bzero(m->m_data, len);
2202 	switch (af) {
2203 #ifdef INET
2204 	case AF_INET:
2205 		h = mtod(m, struct ip *);
2206 
2207 		/* IP header fields included in the TCP checksum */
2208 		h->ip_p = IPPROTO_TCP;
2209 		h->ip_len = htons(tlen);
2210 		h->ip_src.s_addr = saddr->v4.s_addr;
2211 		h->ip_dst.s_addr = daddr->v4.s_addr;
2212 
2213 		th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2214 		break;
2215 #endif /* INET */
2216 #ifdef INET6
2217 	case AF_INET6:
2218 		h6 = mtod(m, struct ip6_hdr *);
2219 
2220 		/* IP header fields included in the TCP checksum */
2221 		h6->ip6_nxt = IPPROTO_TCP;
2222 		h6->ip6_plen = htons(tlen);
2223 		memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2224 		memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2225 
2226 		th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2227 		break;
2228 #endif /* INET6 */
2229 	}
2230 
2231 	/* TCP header */
2232 	th->th_sport = sport;
2233 	th->th_dport = dport;
2234 	th->th_seq = htonl(seq);
2235 	th->th_ack = htonl(ack);
2236 	th->th_off = tlen >> 2;
2237 	th->th_flags = flags;
2238 	th->th_win = htons(win);
2239 
2240 	if (mss) {
2241 		opt = (char *)(th + 1);
2242 		opt[0] = TCPOPT_MAXSEG;
2243 		opt[1] = 4;
2244 		HTONS(mss);
2245 		bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2246 	}
2247 
2248 	switch (af) {
2249 #ifdef INET
2250 	case AF_INET:
2251 		/* TCP checksum */
2252 		th->th_sum = in_cksum(m, len);
2253 
2254 		/* Finish the IP header */
2255 		h->ip_v = 4;
2256 		h->ip_hl = sizeof(*h) >> 2;
2257 		h->ip_tos = IPTOS_LOWDELAY;
2258 		h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2259 		h->ip_len = htons(len);
2260 		h->ip_ttl = ttl ? ttl : V_ip_defttl;
2261 		h->ip_sum = 0;
2262 
2263 		pfse->pfse_type = PFSE_IP;
2264 		break;
2265 #endif /* INET */
2266 #ifdef INET6
2267 	case AF_INET6:
2268 		/* TCP checksum */
2269 		th->th_sum = in6_cksum(m, IPPROTO_TCP,
2270 		    sizeof(struct ip6_hdr), tlen);
2271 
2272 		h6->ip6_vfc |= IPV6_VERSION;
2273 		h6->ip6_hlim = IPV6_DEFHLIM;
2274 
2275 		pfse->pfse_type = PFSE_IP6;
2276 		break;
2277 #endif /* INET6 */
2278 	}
2279 	pfse->pfse_m = m;
2280 	pf_send(pfse);
2281 }
2282 
2283 static void
2284 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
2285     struct pf_rule *r)
2286 {
2287 	struct pf_send_entry *pfse;
2288 	struct mbuf *m0;
2289 	struct pf_mtag *pf_mtag;
2290 
2291 	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
2292 	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2293 	if (pfse == NULL)
2294 		return;
2295 
2296 	if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
2297 		free(pfse, M_PFTEMP);
2298 		return;
2299 	}
2300 
2301 	if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
2302 		free(pfse, M_PFTEMP);
2303 		return;
2304 	}
2305 	/* XXX: revisit */
2306 	m0->m_flags |= M_SKIP_FIREWALL;
2307 
2308 	if (r->rtableid >= 0)
2309 		M_SETFIB(m0, r->rtableid);
2310 
2311 #ifdef ALTQ
2312 	if (r->qid) {
2313 		pf_mtag->qid = r->qid;
2314 		/* add hints for ecn */
2315 		pf_mtag->hdr = mtod(m0, struct ip *);
2316 	}
2317 #endif /* ALTQ */
2318 
2319 	switch (af) {
2320 #ifdef INET
2321 	case AF_INET:
2322 		pfse->pfse_type = PFSE_ICMP;
2323 		break;
2324 #endif /* INET */
2325 #ifdef INET6
2326 	case AF_INET6:
2327 		pfse->pfse_type = PFSE_ICMP6;
2328 		break;
2329 #endif /* INET6 */
2330 	}
2331 	pfse->pfse_m = m0;
2332 	pfse->pfse_icmp_type = type;
2333 	pfse->pfse_icmp_code = code;
2334 	pf_send(pfse);
2335 }
2336 
2337 /*
2338  * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
2339  * If n is 0, they match if they are equal. If n is != 0, they match if they
2340  * are different.
2341  */
2342 int
2343 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
2344     struct pf_addr *b, sa_family_t af)
2345 {
2346 	int	match = 0;
2347 
2348 	switch (af) {
2349 #ifdef INET
2350 	case AF_INET:
2351 		if ((a->addr32[0] & m->addr32[0]) ==
2352 		    (b->addr32[0] & m->addr32[0]))
2353 			match++;
2354 		break;
2355 #endif /* INET */
2356 #ifdef INET6
2357 	case AF_INET6:
2358 		if (((a->addr32[0] & m->addr32[0]) ==
2359 		     (b->addr32[0] & m->addr32[0])) &&
2360 		    ((a->addr32[1] & m->addr32[1]) ==
2361 		     (b->addr32[1] & m->addr32[1])) &&
2362 		    ((a->addr32[2] & m->addr32[2]) ==
2363 		     (b->addr32[2] & m->addr32[2])) &&
2364 		    ((a->addr32[3] & m->addr32[3]) ==
2365 		     (b->addr32[3] & m->addr32[3])))
2366 			match++;
2367 		break;
2368 #endif /* INET6 */
2369 	}
2370 	if (match) {
2371 		if (n)
2372 			return (0);
2373 		else
2374 			return (1);
2375 	} else {
2376 		if (n)
2377 			return (1);
2378 		else
2379 			return (0);
2380 	}
2381 }
2382 
2383 /*
2384  * Return 1 if b <= a <= e, otherwise return 0.
2385  */
2386 int
2387 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
2388     struct pf_addr *a, sa_family_t af)
2389 {
2390 	switch (af) {
2391 #ifdef INET
2392 	case AF_INET:
2393 		if ((a->addr32[0] < b->addr32[0]) ||
2394 		    (a->addr32[0] > e->addr32[0]))
2395 			return (0);
2396 		break;
2397 #endif /* INET */
2398 #ifdef INET6
2399 	case AF_INET6: {
2400 		int	i;
2401 
2402 		/* check a >= b */
2403 		for (i = 0; i < 4; ++i)
2404 			if (a->addr32[i] > b->addr32[i])
2405 				break;
2406 			else if (a->addr32[i] < b->addr32[i])
2407 				return (0);
2408 		/* check a <= e */
2409 		for (i = 0; i < 4; ++i)
2410 			if (a->addr32[i] < e->addr32[i])
2411 				break;
2412 			else if (a->addr32[i] > e->addr32[i])
2413 				return (0);
2414 		break;
2415 	}
2416 #endif /* INET6 */
2417 	}
2418 	return (1);
2419 }
2420 
2421 static int
2422 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
2423 {
2424 	switch (op) {
2425 	case PF_OP_IRG:
2426 		return ((p > a1) && (p < a2));
2427 	case PF_OP_XRG:
2428 		return ((p < a1) || (p > a2));
2429 	case PF_OP_RRG:
2430 		return ((p >= a1) && (p <= a2));
2431 	case PF_OP_EQ:
2432 		return (p == a1);
2433 	case PF_OP_NE:
2434 		return (p != a1);
2435 	case PF_OP_LT:
2436 		return (p < a1);
2437 	case PF_OP_LE:
2438 		return (p <= a1);
2439 	case PF_OP_GT:
2440 		return (p > a1);
2441 	case PF_OP_GE:
2442 		return (p >= a1);
2443 	}
2444 	return (0); /* never reached */
2445 }
2446 
2447 int
2448 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
2449 {
2450 	NTOHS(a1);
2451 	NTOHS(a2);
2452 	NTOHS(p);
2453 	return (pf_match(op, a1, a2, p));
2454 }
2455 
2456 static int
2457 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
2458 {
2459 	if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2460 		return (0);
2461 	return (pf_match(op, a1, a2, u));
2462 }
2463 
2464 static int
2465 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
2466 {
2467 	if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
2468 		return (0);
2469 	return (pf_match(op, a1, a2, g));
2470 }
2471 
2472 int
2473 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag)
2474 {
2475 	if (*tag == -1)
2476 		*tag = mtag;
2477 
2478 	return ((!r->match_tag_not && r->match_tag == *tag) ||
2479 	    (r->match_tag_not && r->match_tag != *tag));
2480 }
2481 
2482 int
2483 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
2484 {
2485 
2486 	KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
2487 
2488 	if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
2489 		return (ENOMEM);
2490 
2491 	pd->pf_mtag->tag = tag;
2492 
2493 	return (0);
2494 }
2495 
2496 #define	PF_ANCHOR_STACKSIZE	32
2497 struct pf_anchor_stackframe {
2498 	struct pf_ruleset	*rs;
2499 	struct pf_rule		*r;	/* XXX: + match bit */
2500 	struct pf_anchor	*child;
2501 };
2502 
2503 /*
2504  * XXX: We rely on malloc(9) returning pointer aligned addresses.
2505  */
2506 #define	PF_ANCHORSTACK_MATCH	0x00000001
2507 #define	PF_ANCHORSTACK_MASK	(PF_ANCHORSTACK_MATCH)
2508 
2509 #define	PF_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
2510 #define	PF_ANCHOR_RULE(f)	(struct pf_rule *)			\
2511 				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
2512 #define	PF_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 			\
2513 				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
2514 } while (0)
2515 
2516 void
2517 pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth,
2518     struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2519     int *match)
2520 {
2521 	struct pf_anchor_stackframe	*f;
2522 
2523 	PF_RULES_RASSERT();
2524 
2525 	if (match)
2526 		*match = 0;
2527 	if (*depth >= PF_ANCHOR_STACKSIZE) {
2528 		printf("%s: anchor stack overflow on %s\n",
2529 		    __func__, (*r)->anchor->name);
2530 		*r = TAILQ_NEXT(*r, entries);
2531 		return;
2532 	} else if (*depth == 0 && a != NULL)
2533 		*a = *r;
2534 	f = stack + (*depth)++;
2535 	f->rs = *rs;
2536 	f->r = *r;
2537 	if ((*r)->anchor_wildcard) {
2538 		struct pf_anchor_node *parent = &(*r)->anchor->children;
2539 
2540 		if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) {
2541 			*r = NULL;
2542 			return;
2543 		}
2544 		*rs = &f->child->ruleset;
2545 	} else {
2546 		f->child = NULL;
2547 		*rs = &(*r)->anchor->ruleset;
2548 	}
2549 	*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2550 }
2551 
2552 int
2553 pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth,
2554     struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a,
2555     int *match)
2556 {
2557 	struct pf_anchor_stackframe	*f;
2558 	struct pf_rule *fr;
2559 	int quick = 0;
2560 
2561 	PF_RULES_RASSERT();
2562 
2563 	do {
2564 		if (*depth <= 0)
2565 			break;
2566 		f = stack + *depth - 1;
2567 		fr = PF_ANCHOR_RULE(f);
2568 		if (f->child != NULL) {
2569 			struct pf_anchor_node *parent;
2570 
2571 			/*
2572 			 * This block traverses through
2573 			 * a wildcard anchor.
2574 			 */
2575 			parent = &fr->anchor->children;
2576 			if (match != NULL && *match) {
2577 				/*
2578 				 * If any of "*" matched, then
2579 				 * "foo/ *" matched, mark frame
2580 				 * appropriately.
2581 				 */
2582 				PF_ANCHOR_SET_MATCH(f);
2583 				*match = 0;
2584 			}
2585 			f->child = RB_NEXT(pf_anchor_node, parent, f->child);
2586 			if (f->child != NULL) {
2587 				*rs = &f->child->ruleset;
2588 				*r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
2589 				if (*r == NULL)
2590 					continue;
2591 				else
2592 					break;
2593 			}
2594 		}
2595 		(*depth)--;
2596 		if (*depth == 0 && a != NULL)
2597 			*a = NULL;
2598 		*rs = f->rs;
2599 		if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
2600 			quick = fr->quick;
2601 		*r = TAILQ_NEXT(fr, entries);
2602 	} while (*r == NULL);
2603 
2604 	return (quick);
2605 }
2606 
2607 #ifdef INET6
2608 void
2609 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
2610     struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
2611 {
2612 	switch (af) {
2613 #ifdef INET
2614 	case AF_INET:
2615 		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2616 		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2617 		break;
2618 #endif /* INET */
2619 	case AF_INET6:
2620 		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
2621 		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
2622 		naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
2623 		((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
2624 		naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
2625 		((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
2626 		naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
2627 		((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
2628 		break;
2629 	}
2630 }
2631 
2632 void
2633 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
2634 {
2635 	switch (af) {
2636 #ifdef INET
2637 	case AF_INET:
2638 		addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
2639 		break;
2640 #endif /* INET */
2641 	case AF_INET6:
2642 		if (addr->addr32[3] == 0xffffffff) {
2643 			addr->addr32[3] = 0;
2644 			if (addr->addr32[2] == 0xffffffff) {
2645 				addr->addr32[2] = 0;
2646 				if (addr->addr32[1] == 0xffffffff) {
2647 					addr->addr32[1] = 0;
2648 					addr->addr32[0] =
2649 					    htonl(ntohl(addr->addr32[0]) + 1);
2650 				} else
2651 					addr->addr32[1] =
2652 					    htonl(ntohl(addr->addr32[1]) + 1);
2653 			} else
2654 				addr->addr32[2] =
2655 				    htonl(ntohl(addr->addr32[2]) + 1);
2656 		} else
2657 			addr->addr32[3] =
2658 			    htonl(ntohl(addr->addr32[3]) + 1);
2659 		break;
2660 	}
2661 }
2662 #endif /* INET6 */
2663 
2664 int
2665 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
2666 {
2667 	struct pf_addr		*saddr, *daddr;
2668 	u_int16_t		 sport, dport;
2669 	struct inpcbinfo	*pi;
2670 	struct inpcb		*inp;
2671 
2672 	pd->lookup.uid = UID_MAX;
2673 	pd->lookup.gid = GID_MAX;
2674 
2675 	switch (pd->proto) {
2676 	case IPPROTO_TCP:
2677 		if (pd->hdr.tcp == NULL)
2678 			return (-1);
2679 		sport = pd->hdr.tcp->th_sport;
2680 		dport = pd->hdr.tcp->th_dport;
2681 		pi = &V_tcbinfo;
2682 		break;
2683 	case IPPROTO_UDP:
2684 		if (pd->hdr.udp == NULL)
2685 			return (-1);
2686 		sport = pd->hdr.udp->uh_sport;
2687 		dport = pd->hdr.udp->uh_dport;
2688 		pi = &V_udbinfo;
2689 		break;
2690 	default:
2691 		return (-1);
2692 	}
2693 	if (direction == PF_IN) {
2694 		saddr = pd->src;
2695 		daddr = pd->dst;
2696 	} else {
2697 		u_int16_t	p;
2698 
2699 		p = sport;
2700 		sport = dport;
2701 		dport = p;
2702 		saddr = pd->dst;
2703 		daddr = pd->src;
2704 	}
2705 	switch (pd->af) {
2706 #ifdef INET
2707 	case AF_INET:
2708 		inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
2709 		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2710 		if (inp == NULL) {
2711 			inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
2712 			   daddr->v4, dport, INPLOOKUP_WILDCARD |
2713 			   INPLOOKUP_RLOCKPCB, NULL, m);
2714 			if (inp == NULL)
2715 				return (-1);
2716 		}
2717 		break;
2718 #endif /* INET */
2719 #ifdef INET6
2720 	case AF_INET6:
2721 		inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
2722 		    dport, INPLOOKUP_RLOCKPCB, NULL, m);
2723 		if (inp == NULL) {
2724 			inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
2725 			    &daddr->v6, dport, INPLOOKUP_WILDCARD |
2726 			    INPLOOKUP_RLOCKPCB, NULL, m);
2727 			if (inp == NULL)
2728 				return (-1);
2729 		}
2730 		break;
2731 #endif /* INET6 */
2732 
2733 	default:
2734 		return (-1);
2735 	}
2736 	INP_RLOCK_ASSERT(inp);
2737 	pd->lookup.uid = inp->inp_cred->cr_uid;
2738 	pd->lookup.gid = inp->inp_cred->cr_groups[0];
2739 	INP_RUNLOCK(inp);
2740 
2741 	return (1);
2742 }
2743 
2744 static u_int8_t
2745 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2746 {
2747 	int		 hlen;
2748 	u_int8_t	 hdr[60];
2749 	u_int8_t	*opt, optlen;
2750 	u_int8_t	 wscale = 0;
2751 
2752 	hlen = th_off << 2;		/* hlen <= sizeof(hdr) */
2753 	if (hlen <= sizeof(struct tcphdr))
2754 		return (0);
2755 	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2756 		return (0);
2757 	opt = hdr + sizeof(struct tcphdr);
2758 	hlen -= sizeof(struct tcphdr);
2759 	while (hlen >= 3) {
2760 		switch (*opt) {
2761 		case TCPOPT_EOL:
2762 		case TCPOPT_NOP:
2763 			++opt;
2764 			--hlen;
2765 			break;
2766 		case TCPOPT_WINDOW:
2767 			wscale = opt[2];
2768 			if (wscale > TCP_MAX_WINSHIFT)
2769 				wscale = TCP_MAX_WINSHIFT;
2770 			wscale |= PF_WSCALE_FLAG;
2771 			/* FALLTHROUGH */
2772 		default:
2773 			optlen = opt[1];
2774 			if (optlen < 2)
2775 				optlen = 2;
2776 			hlen -= optlen;
2777 			opt += optlen;
2778 			break;
2779 		}
2780 	}
2781 	return (wscale);
2782 }
2783 
2784 static u_int16_t
2785 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
2786 {
2787 	int		 hlen;
2788 	u_int8_t	 hdr[60];
2789 	u_int8_t	*opt, optlen;
2790 	u_int16_t	 mss = V_tcp_mssdflt;
2791 
2792 	hlen = th_off << 2;	/* hlen <= sizeof(hdr) */
2793 	if (hlen <= sizeof(struct tcphdr))
2794 		return (0);
2795 	if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
2796 		return (0);
2797 	opt = hdr + sizeof(struct tcphdr);
2798 	hlen -= sizeof(struct tcphdr);
2799 	while (hlen >= TCPOLEN_MAXSEG) {
2800 		switch (*opt) {
2801 		case TCPOPT_EOL:
2802 		case TCPOPT_NOP:
2803 			++opt;
2804 			--hlen;
2805 			break;
2806 		case TCPOPT_MAXSEG:
2807 			bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
2808 			NTOHS(mss);
2809 			/* FALLTHROUGH */
2810 		default:
2811 			optlen = opt[1];
2812 			if (optlen < 2)
2813 				optlen = 2;
2814 			hlen -= optlen;
2815 			opt += optlen;
2816 			break;
2817 		}
2818 	}
2819 	return (mss);
2820 }
2821 
2822 static u_int16_t
2823 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
2824 {
2825 #ifdef INET
2826 	struct sockaddr_in	*dst;
2827 	struct route		 ro;
2828 #endif /* INET */
2829 #ifdef INET6
2830 	struct sockaddr_in6	*dst6;
2831 	struct route_in6	 ro6;
2832 #endif /* INET6 */
2833 	struct rtentry		*rt = NULL;
2834 	int			 hlen = 0;
2835 	u_int16_t		 mss = V_tcp_mssdflt;
2836 
2837 	switch (af) {
2838 #ifdef INET
2839 	case AF_INET:
2840 		hlen = sizeof(struct ip);
2841 		bzero(&ro, sizeof(ro));
2842 		dst = (struct sockaddr_in *)&ro.ro_dst;
2843 		dst->sin_family = AF_INET;
2844 		dst->sin_len = sizeof(*dst);
2845 		dst->sin_addr = addr->v4;
2846 		in_rtalloc_ign(&ro, 0, rtableid);
2847 		rt = ro.ro_rt;
2848 		break;
2849 #endif /* INET */
2850 #ifdef INET6
2851 	case AF_INET6:
2852 		hlen = sizeof(struct ip6_hdr);
2853 		bzero(&ro6, sizeof(ro6));
2854 		dst6 = (struct sockaddr_in6 *)&ro6.ro_dst;
2855 		dst6->sin6_family = AF_INET6;
2856 		dst6->sin6_len = sizeof(*dst6);
2857 		dst6->sin6_addr = addr->v6;
2858 		in6_rtalloc_ign(&ro6, 0, rtableid);
2859 		rt = ro6.ro_rt;
2860 		break;
2861 #endif /* INET6 */
2862 	}
2863 
2864 	if (rt && rt->rt_ifp) {
2865 		mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr);
2866 		mss = max(V_tcp_mssdflt, mss);
2867 		RTFREE(rt);
2868 	}
2869 	mss = min(mss, offer);
2870 	mss = max(mss, 64);		/* sanity - at least max opt space */
2871 	return (mss);
2872 }
2873 
2874 static void
2875 pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr)
2876 {
2877 	struct pf_rule *r = s->rule.ptr;
2878 	struct pf_src_node *sn = NULL;
2879 
2880 	s->rt_kif = NULL;
2881 	if (!r->rt || r->rt == PF_FASTROUTE)
2882 		return;
2883 	switch (s->key[PF_SK_WIRE]->af) {
2884 #ifdef INET
2885 	case AF_INET:
2886 		pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn);
2887 		s->rt_kif = r->rpool.cur->kif;
2888 		break;
2889 #endif /* INET */
2890 #ifdef INET6
2891 	case AF_INET6:
2892 		pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn);
2893 		s->rt_kif = r->rpool.cur->kif;
2894 		break;
2895 #endif /* INET6 */
2896 	}
2897 }
2898 
2899 static u_int32_t
2900 pf_tcp_iss(struct pf_pdesc *pd)
2901 {
2902 	MD5_CTX ctx;
2903 	u_int32_t digest[4];
2904 
2905 	if (V_pf_tcp_secret_init == 0) {
2906 		read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
2907 		MD5Init(&V_pf_tcp_secret_ctx);
2908 		MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
2909 		    sizeof(V_pf_tcp_secret));
2910 		V_pf_tcp_secret_init = 1;
2911 	}
2912 
2913 	ctx = V_pf_tcp_secret_ctx;
2914 
2915 	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short));
2916 	MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short));
2917 	if (pd->af == AF_INET6) {
2918 		MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
2919 		MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
2920 	} else {
2921 		MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
2922 		MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
2923 	}
2924 	MD5Final((u_char *)digest, &ctx);
2925 	V_pf_tcp_iss_off += 4096;
2926 #define	ISN_RANDOM_INCREMENT (4096 - 1)
2927 	return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
2928 	    V_pf_tcp_iss_off);
2929 #undef	ISN_RANDOM_INCREMENT
2930 }
2931 
2932 static int
2933 pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction,
2934     struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
2935     struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp)
2936 {
2937 	struct pf_rule		*nr = NULL;
2938 	struct pf_addr		* const saddr = pd->src;
2939 	struct pf_addr		* const daddr = pd->dst;
2940 	sa_family_t		 af = pd->af;
2941 	struct pf_rule		*r, *a = NULL;
2942 	struct pf_ruleset	*ruleset = NULL;
2943 	struct pf_src_node	*nsn = NULL;
2944 	struct tcphdr		*th = pd->hdr.tcp;
2945 	struct pf_state_key	*sk = NULL, *nk = NULL;
2946 	u_short			 reason;
2947 	int			 rewrite = 0, hdrlen = 0;
2948 	int			 tag = -1, rtableid = -1;
2949 	int			 asd = 0;
2950 	int			 match = 0;
2951 	int			 state_icmp = 0;
2952 	u_int16_t		 sport = 0, dport = 0;
2953 	u_int16_t		 bproto_sum = 0, bip_sum = 0;
2954 	u_int8_t		 icmptype = 0, icmpcode = 0;
2955 	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
2956 
2957 	PF_RULES_RASSERT();
2958 
2959 	if (inp != NULL) {
2960 		INP_LOCK_ASSERT(inp);
2961 		pd->lookup.uid = inp->inp_cred->cr_uid;
2962 		pd->lookup.gid = inp->inp_cred->cr_groups[0];
2963 		pd->lookup.done = 1;
2964 	}
2965 
2966 	switch (pd->proto) {
2967 	case IPPROTO_TCP:
2968 		sport = th->th_sport;
2969 		dport = th->th_dport;
2970 		hdrlen = sizeof(*th);
2971 		break;
2972 	case IPPROTO_UDP:
2973 		sport = pd->hdr.udp->uh_sport;
2974 		dport = pd->hdr.udp->uh_dport;
2975 		hdrlen = sizeof(*pd->hdr.udp);
2976 		break;
2977 #ifdef INET
2978 	case IPPROTO_ICMP:
2979 		if (pd->af != AF_INET)
2980 			break;
2981 		sport = dport = pd->hdr.icmp->icmp_id;
2982 		hdrlen = sizeof(*pd->hdr.icmp);
2983 		icmptype = pd->hdr.icmp->icmp_type;
2984 		icmpcode = pd->hdr.icmp->icmp_code;
2985 
2986 		if (icmptype == ICMP_UNREACH ||
2987 		    icmptype == ICMP_SOURCEQUENCH ||
2988 		    icmptype == ICMP_REDIRECT ||
2989 		    icmptype == ICMP_TIMXCEED ||
2990 		    icmptype == ICMP_PARAMPROB)
2991 			state_icmp++;
2992 		break;
2993 #endif /* INET */
2994 #ifdef INET6
2995 	case IPPROTO_ICMPV6:
2996 		if (af != AF_INET6)
2997 			break;
2998 		sport = dport = pd->hdr.icmp6->icmp6_id;
2999 		hdrlen = sizeof(*pd->hdr.icmp6);
3000 		icmptype = pd->hdr.icmp6->icmp6_type;
3001 		icmpcode = pd->hdr.icmp6->icmp6_code;
3002 
3003 		if (icmptype == ICMP6_DST_UNREACH ||
3004 		    icmptype == ICMP6_PACKET_TOO_BIG ||
3005 		    icmptype == ICMP6_TIME_EXCEEDED ||
3006 		    icmptype == ICMP6_PARAM_PROB)
3007 			state_icmp++;
3008 		break;
3009 #endif /* INET6 */
3010 	default:
3011 		sport = dport = hdrlen = 0;
3012 		break;
3013 	}
3014 
3015 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3016 
3017 	/* check packet for BINAT/NAT/RDR */
3018 	if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
3019 	    &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
3020 		KASSERT(sk != NULL, ("%s: null sk", __func__));
3021 		KASSERT(nk != NULL, ("%s: null nk", __func__));
3022 
3023 		if (pd->ip_sum)
3024 			bip_sum = *pd->ip_sum;
3025 
3026 		switch (pd->proto) {
3027 		case IPPROTO_TCP:
3028 			bproto_sum = th->th_sum;
3029 			pd->proto_sum = &th->th_sum;
3030 
3031 			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3032 			    nk->port[pd->sidx] != sport) {
3033 				pf_change_ap(saddr, &th->th_sport, pd->ip_sum,
3034 				    &th->th_sum, &nk->addr[pd->sidx],
3035 				    nk->port[pd->sidx], 0, af);
3036 				pd->sport = &th->th_sport;
3037 				sport = th->th_sport;
3038 			}
3039 
3040 			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3041 			    nk->port[pd->didx] != dport) {
3042 				pf_change_ap(daddr, &th->th_dport, pd->ip_sum,
3043 				    &th->th_sum, &nk->addr[pd->didx],
3044 				    nk->port[pd->didx], 0, af);
3045 				dport = th->th_dport;
3046 				pd->dport = &th->th_dport;
3047 			}
3048 			rewrite++;
3049 			break;
3050 		case IPPROTO_UDP:
3051 			bproto_sum = pd->hdr.udp->uh_sum;
3052 			pd->proto_sum = &pd->hdr.udp->uh_sum;
3053 
3054 			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
3055 			    nk->port[pd->sidx] != sport) {
3056 				pf_change_ap(saddr, &pd->hdr.udp->uh_sport,
3057 				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3058 				    &nk->addr[pd->sidx],
3059 				    nk->port[pd->sidx], 1, af);
3060 				sport = pd->hdr.udp->uh_sport;
3061 				pd->sport = &pd->hdr.udp->uh_sport;
3062 			}
3063 
3064 			if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
3065 			    nk->port[pd->didx] != dport) {
3066 				pf_change_ap(daddr, &pd->hdr.udp->uh_dport,
3067 				    pd->ip_sum, &pd->hdr.udp->uh_sum,
3068 				    &nk->addr[pd->didx],
3069 				    nk->port[pd->didx], 1, af);
3070 				dport = pd->hdr.udp->uh_dport;
3071 				pd->dport = &pd->hdr.udp->uh_dport;
3072 			}
3073 			rewrite++;
3074 			break;
3075 #ifdef INET
3076 		case IPPROTO_ICMP:
3077 			nk->port[0] = nk->port[1];
3078 			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
3079 				pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
3080 				    nk->addr[pd->sidx].v4.s_addr, 0);
3081 
3082 			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
3083 				pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
3084 				    nk->addr[pd->didx].v4.s_addr, 0);
3085 
3086 			if (nk->port[1] != pd->hdr.icmp->icmp_id) {
3087 				pd->hdr.icmp->icmp_cksum = pf_cksum_fixup(
3088 				    pd->hdr.icmp->icmp_cksum, sport,
3089 				    nk->port[1], 0);
3090 				pd->hdr.icmp->icmp_id = nk->port[1];
3091 				pd->sport = &pd->hdr.icmp->icmp_id;
3092 			}
3093 			m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
3094 			break;
3095 #endif /* INET */
3096 #ifdef INET6
3097 		case IPPROTO_ICMPV6:
3098 			nk->port[0] = nk->port[1];
3099 			if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
3100 				pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum,
3101 				    &nk->addr[pd->sidx], 0);
3102 
3103 			if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
3104 				pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum,
3105 				    &nk->addr[pd->didx], 0);
3106 			rewrite++;
3107 			break;
3108 #endif /* INET */
3109 		default:
3110 			switch (af) {
3111 #ifdef INET
3112 			case AF_INET:
3113 				if (PF_ANEQ(saddr,
3114 				    &nk->addr[pd->sidx], AF_INET))
3115 					pf_change_a(&saddr->v4.s_addr,
3116 					    pd->ip_sum,
3117 					    nk->addr[pd->sidx].v4.s_addr, 0);
3118 
3119 				if (PF_ANEQ(daddr,
3120 				    &nk->addr[pd->didx], AF_INET))
3121 					pf_change_a(&daddr->v4.s_addr,
3122 					    pd->ip_sum,
3123 					    nk->addr[pd->didx].v4.s_addr, 0);
3124 				break;
3125 #endif /* INET */
3126 #ifdef INET6
3127 			case AF_INET6:
3128 				if (PF_ANEQ(saddr,
3129 				    &nk->addr[pd->sidx], AF_INET6))
3130 					PF_ACPY(saddr, &nk->addr[pd->sidx], af);
3131 
3132 				if (PF_ANEQ(daddr,
3133 				    &nk->addr[pd->didx], AF_INET6))
3134 					PF_ACPY(saddr, &nk->addr[pd->didx], af);
3135 				break;
3136 #endif /* INET */
3137 			}
3138 			break;
3139 		}
3140 		if (nr->natpass)
3141 			r = NULL;
3142 		pd->nat_rule = nr;
3143 	}
3144 
3145 	while (r != NULL) {
3146 		r->evaluations++;
3147 		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3148 			r = r->skip[PF_SKIP_IFP].ptr;
3149 		else if (r->direction && r->direction != direction)
3150 			r = r->skip[PF_SKIP_DIR].ptr;
3151 		else if (r->af && r->af != af)
3152 			r = r->skip[PF_SKIP_AF].ptr;
3153 		else if (r->proto && r->proto != pd->proto)
3154 			r = r->skip[PF_SKIP_PROTO].ptr;
3155 		else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
3156 		    r->src.neg, kif, M_GETFIB(m)))
3157 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3158 		/* tcp/udp only. port_op always 0 in other cases */
3159 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
3160 		    r->src.port[0], r->src.port[1], sport))
3161 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
3162 		else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
3163 		    r->dst.neg, NULL, M_GETFIB(m)))
3164 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3165 		/* tcp/udp only. port_op always 0 in other cases */
3166 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
3167 		    r->dst.port[0], r->dst.port[1], dport))
3168 			r = r->skip[PF_SKIP_DST_PORT].ptr;
3169 		/* icmp only. type always 0 in other cases */
3170 		else if (r->type && r->type != icmptype + 1)
3171 			r = TAILQ_NEXT(r, entries);
3172 		/* icmp only. type always 0 in other cases */
3173 		else if (r->code && r->code != icmpcode + 1)
3174 			r = TAILQ_NEXT(r, entries);
3175 		else if (r->tos && !(r->tos == pd->tos))
3176 			r = TAILQ_NEXT(r, entries);
3177 		else if (r->rule_flag & PFRULE_FRAGMENT)
3178 			r = TAILQ_NEXT(r, entries);
3179 		else if (pd->proto == IPPROTO_TCP &&
3180 		    (r->flagset & th->th_flags) != r->flags)
3181 			r = TAILQ_NEXT(r, entries);
3182 		/* tcp/udp only. uid.op always 0 in other cases */
3183 		else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
3184 		    pf_socket_lookup(direction, pd, m), 1)) &&
3185 		    !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
3186 		    pd->lookup.uid))
3187 			r = TAILQ_NEXT(r, entries);
3188 		/* tcp/udp only. gid.op always 0 in other cases */
3189 		else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
3190 		    pf_socket_lookup(direction, pd, m), 1)) &&
3191 		    !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
3192 		    pd->lookup.gid))
3193 			r = TAILQ_NEXT(r, entries);
3194 		else if (r->prob &&
3195 		    r->prob <= arc4random())
3196 			r = TAILQ_NEXT(r, entries);
3197 		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3198 		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3199 			r = TAILQ_NEXT(r, entries);
3200 		else if (r->os_fingerprint != PF_OSFP_ANY &&
3201 		    (pd->proto != IPPROTO_TCP || !pf_osfp_match(
3202 		    pf_osfp_fingerprint(pd, m, off, th),
3203 		    r->os_fingerprint)))
3204 			r = TAILQ_NEXT(r, entries);
3205 		else {
3206 			if (r->tag)
3207 				tag = r->tag;
3208 			if (r->rtableid >= 0)
3209 				rtableid = r->rtableid;
3210 			if (r->anchor == NULL) {
3211 				match = 1;
3212 				*rm = r;
3213 				*am = a;
3214 				*rsm = ruleset;
3215 				if ((*rm)->quick)
3216 					break;
3217 				r = TAILQ_NEXT(r, entries);
3218 			} else
3219 				pf_step_into_anchor(anchor_stack, &asd,
3220 				    &ruleset, PF_RULESET_FILTER, &r, &a,
3221 				    &match);
3222 		}
3223 		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3224 		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3225 			break;
3226 	}
3227 	r = *rm;
3228 	a = *am;
3229 	ruleset = *rsm;
3230 
3231 	REASON_SET(&reason, PFRES_MATCH);
3232 
3233 	if (r->log || (nr != NULL && nr->log)) {
3234 		if (rewrite)
3235 			m_copyback(m, off, hdrlen, pd->hdr.any);
3236 		PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a,
3237 		    ruleset, pd, 1);
3238 	}
3239 
3240 	if ((r->action == PF_DROP) &&
3241 	    ((r->rule_flag & PFRULE_RETURNRST) ||
3242 	    (r->rule_flag & PFRULE_RETURNICMP) ||
3243 	    (r->rule_flag & PFRULE_RETURN))) {
3244 		/* undo NAT changes, if they have taken place */
3245 		if (nr != NULL) {
3246 			PF_ACPY(saddr, &sk->addr[pd->sidx], af);
3247 			PF_ACPY(daddr, &sk->addr[pd->didx], af);
3248 			if (pd->sport)
3249 				*pd->sport = sk->port[pd->sidx];
3250 			if (pd->dport)
3251 				*pd->dport = sk->port[pd->didx];
3252 			if (pd->proto_sum)
3253 				*pd->proto_sum = bproto_sum;
3254 			if (pd->ip_sum)
3255 				*pd->ip_sum = bip_sum;
3256 			m_copyback(m, off, hdrlen, pd->hdr.any);
3257 		}
3258 		if (pd->proto == IPPROTO_TCP &&
3259 		    ((r->rule_flag & PFRULE_RETURNRST) ||
3260 		    (r->rule_flag & PFRULE_RETURN)) &&
3261 		    !(th->th_flags & TH_RST)) {
3262 			u_int32_t	 ack = ntohl(th->th_seq) + pd->p_len;
3263 			int		 len = 0;
3264 #ifdef INET
3265 			struct ip	*h4;
3266 #endif
3267 #ifdef INET6
3268 			struct ip6_hdr	*h6;
3269 #endif
3270 
3271 			switch (af) {
3272 #ifdef INET
3273 			case AF_INET:
3274 				h4 = mtod(m, struct ip *);
3275 				len = ntohs(h4->ip_len) - off;
3276 				break;
3277 #endif
3278 #ifdef INET6
3279 			case AF_INET6:
3280 				h6 = mtod(m, struct ip6_hdr *);
3281 				len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3282 				break;
3283 #endif
3284 			}
3285 
3286 			if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3287 				REASON_SET(&reason, PFRES_PROTCKSUM);
3288 			else {
3289 				if (th->th_flags & TH_SYN)
3290 					ack++;
3291 				if (th->th_flags & TH_FIN)
3292 					ack++;
3293 				pf_send_tcp(m, r, af, pd->dst,
3294 				    pd->src, th->th_dport, th->th_sport,
3295 				    ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3296 				    r->return_ttl, 1, 0, kif->pfik_ifp);
3297 			}
3298 		} else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3299 		    r->return_icmp)
3300 			pf_send_icmp(m, r->return_icmp >> 8,
3301 			    r->return_icmp & 255, af, r);
3302 		else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3303 		    r->return_icmp6)
3304 			pf_send_icmp(m, r->return_icmp6 >> 8,
3305 			    r->return_icmp6 & 255, af, r);
3306 	}
3307 
3308 	if (r->action == PF_DROP)
3309 		goto cleanup;
3310 
3311 	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3312 		REASON_SET(&reason, PFRES_MEMORY);
3313 		goto cleanup;
3314 	}
3315 	if (rtableid >= 0)
3316 		M_SETFIB(m, rtableid);
3317 
3318 	if (!state_icmp && (r->keep_state || nr != NULL ||
3319 	    (pd->flags & PFDESC_TCP_NORM))) {
3320 		int action;
3321 		action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
3322 		    sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
3323 		    hdrlen);
3324 		if (action != PF_PASS)
3325 			return (action);
3326 	} else {
3327 		if (sk != NULL)
3328 			uma_zfree(V_pf_state_key_z, sk);
3329 		if (nk != NULL)
3330 			uma_zfree(V_pf_state_key_z, nk);
3331 	}
3332 
3333 	/* copy back packet headers if we performed NAT operations */
3334 	if (rewrite)
3335 		m_copyback(m, off, hdrlen, pd->hdr.any);
3336 
3337 	if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
3338 	    direction == PF_OUT &&
3339 	    pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m))
3340 		/*
3341 		 * We want the state created, but we dont
3342 		 * want to send this in case a partner
3343 		 * firewall has to know about it to allow
3344 		 * replies through it.
3345 		 */
3346 		return (PF_DEFER);
3347 
3348 	return (PF_PASS);
3349 
3350 cleanup:
3351 	if (sk != NULL)
3352 		uma_zfree(V_pf_state_key_z, sk);
3353 	if (nk != NULL)
3354 		uma_zfree(V_pf_state_key_z, nk);
3355 	return (PF_DROP);
3356 }
3357 
3358 static int
3359 pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a,
3360     struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk,
3361     struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
3362     u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm,
3363     int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
3364 {
3365 	struct pf_state		*s = NULL;
3366 	struct pf_src_node	*sn = NULL;
3367 	struct tcphdr		*th = pd->hdr.tcp;
3368 	u_int16_t		 mss = V_tcp_mssdflt;
3369 	u_short			 reason;
3370 
3371 	/* check maximums */
3372 	if (r->max_states && (r->states_cur >= r->max_states)) {
3373 		V_pf_status.lcounters[LCNT_STATES]++;
3374 		REASON_SET(&reason, PFRES_MAXSTATES);
3375 		return (PF_DROP);
3376 	}
3377 	/* src node for filter rule */
3378 	if ((r->rule_flag & PFRULE_SRCTRACK ||
3379 	    r->rpool.opts & PF_POOL_STICKYADDR) &&
3380 	    pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
3381 		REASON_SET(&reason, PFRES_SRCLIMIT);
3382 		goto csfailed;
3383 	}
3384 	/* src node for translation rule */
3385 	if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
3386 	    pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
3387 		REASON_SET(&reason, PFRES_SRCLIMIT);
3388 		goto csfailed;
3389 	}
3390 	s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO);
3391 	if (s == NULL) {
3392 		REASON_SET(&reason, PFRES_MEMORY);
3393 		goto csfailed;
3394 	}
3395 	s->rule.ptr = r;
3396 	s->nat_rule.ptr = nr;
3397 	s->anchor.ptr = a;
3398 	STATE_INC_COUNTERS(s);
3399 	if (r->allow_opts)
3400 		s->state_flags |= PFSTATE_ALLOWOPTS;
3401 	if (r->rule_flag & PFRULE_STATESLOPPY)
3402 		s->state_flags |= PFSTATE_SLOPPY;
3403 	s->log = r->log & PF_LOG_ALL;
3404 	s->sync_state = PFSYNC_S_NONE;
3405 	if (nr != NULL)
3406 		s->log |= nr->log & PF_LOG_ALL;
3407 	switch (pd->proto) {
3408 	case IPPROTO_TCP:
3409 		s->src.seqlo = ntohl(th->th_seq);
3410 		s->src.seqhi = s->src.seqlo + pd->p_len + 1;
3411 		if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
3412 		    r->keep_state == PF_STATE_MODULATE) {
3413 			/* Generate sequence number modulator */
3414 			if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
3415 			    0)
3416 				s->src.seqdiff = 1;
3417 			pf_change_a(&th->th_seq, &th->th_sum,
3418 			    htonl(s->src.seqlo + s->src.seqdiff), 0);
3419 			*rewrite = 1;
3420 		} else
3421 			s->src.seqdiff = 0;
3422 		if (th->th_flags & TH_SYN) {
3423 			s->src.seqhi++;
3424 			s->src.wscale = pf_get_wscale(m, off,
3425 			    th->th_off, pd->af);
3426 		}
3427 		s->src.max_win = MAX(ntohs(th->th_win), 1);
3428 		if (s->src.wscale & PF_WSCALE_MASK) {
3429 			/* Remove scale factor from initial window */
3430 			int win = s->src.max_win;
3431 			win += 1 << (s->src.wscale & PF_WSCALE_MASK);
3432 			s->src.max_win = (win - 1) >>
3433 			    (s->src.wscale & PF_WSCALE_MASK);
3434 		}
3435 		if (th->th_flags & TH_FIN)
3436 			s->src.seqhi++;
3437 		s->dst.seqhi = 1;
3438 		s->dst.max_win = 1;
3439 		s->src.state = TCPS_SYN_SENT;
3440 		s->dst.state = TCPS_CLOSED;
3441 		s->timeout = PFTM_TCP_FIRST_PACKET;
3442 		break;
3443 	case IPPROTO_UDP:
3444 		s->src.state = PFUDPS_SINGLE;
3445 		s->dst.state = PFUDPS_NO_TRAFFIC;
3446 		s->timeout = PFTM_UDP_FIRST_PACKET;
3447 		break;
3448 	case IPPROTO_ICMP:
3449 #ifdef INET6
3450 	case IPPROTO_ICMPV6:
3451 #endif
3452 		s->timeout = PFTM_ICMP_FIRST_PACKET;
3453 		break;
3454 	default:
3455 		s->src.state = PFOTHERS_SINGLE;
3456 		s->dst.state = PFOTHERS_NO_TRAFFIC;
3457 		s->timeout = PFTM_OTHER_FIRST_PACKET;
3458 	}
3459 
3460 	s->creation = time_uptime;
3461 	s->expire = time_uptime;
3462 
3463 	if (sn != NULL) {
3464 		s->src_node = sn;
3465 		s->src_node->states++;
3466 	}
3467 	if (nsn != NULL) {
3468 		/* XXX We only modify one side for now. */
3469 		PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
3470 		s->nat_src_node = nsn;
3471 		s->nat_src_node->states++;
3472 	}
3473 	if (pd->proto == IPPROTO_TCP) {
3474 		if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
3475 		    off, pd, th, &s->src, &s->dst)) {
3476 			REASON_SET(&reason, PFRES_MEMORY);
3477 			pf_src_tree_remove_state(s);
3478 			STATE_DEC_COUNTERS(s);
3479 			uma_zfree(V_pf_state_z, s);
3480 			return (PF_DROP);
3481 		}
3482 		if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
3483 		    pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
3484 		    &s->src, &s->dst, rewrite)) {
3485 			/* This really shouldn't happen!!! */
3486 			DPFPRINTF(PF_DEBUG_URGENT,
3487 			    ("pf_normalize_tcp_stateful failed on first pkt"));
3488 			pf_normalize_tcp_cleanup(s);
3489 			pf_src_tree_remove_state(s);
3490 			STATE_DEC_COUNTERS(s);
3491 			uma_zfree(V_pf_state_z, s);
3492 			return (PF_DROP);
3493 		}
3494 	}
3495 	s->direction = pd->dir;
3496 
3497 	/*
3498 	 * sk/nk could already been setup by pf_get_translation().
3499 	 */
3500 	if (nr == NULL) {
3501 		KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
3502 		    __func__, nr, sk, nk));
3503 		sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
3504 		if (sk == NULL)
3505 			goto csfailed;
3506 		nk = sk;
3507 	} else
3508 		KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
3509 		    __func__, nr, sk, nk));
3510 
3511 	/* Swap sk/nk for PF_OUT. */
3512 	if (pf_state_insert(BOUND_IFACE(r, kif),
3513 	    (pd->dir == PF_IN) ? sk : nk,
3514 	    (pd->dir == PF_IN) ? nk : sk, s)) {
3515 		if (pd->proto == IPPROTO_TCP)
3516 			pf_normalize_tcp_cleanup(s);
3517 		REASON_SET(&reason, PFRES_STATEINS);
3518 		pf_src_tree_remove_state(s);
3519 		STATE_DEC_COUNTERS(s);
3520 		uma_zfree(V_pf_state_z, s);
3521 		return (PF_DROP);
3522 	} else
3523 		*sm = s;
3524 
3525 	pf_set_rt_ifp(s, pd->src);	/* needs s->state_key set */
3526 	if (tag > 0)
3527 		s->tag = tag;
3528 	if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
3529 	    TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
3530 		s->src.state = PF_TCPS_PROXY_SRC;
3531 		/* undo NAT changes, if they have taken place */
3532 		if (nr != NULL) {
3533 			struct pf_state_key *skt = s->key[PF_SK_WIRE];
3534 			if (pd->dir == PF_OUT)
3535 				skt = s->key[PF_SK_STACK];
3536 			PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
3537 			PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
3538 			if (pd->sport)
3539 				*pd->sport = skt->port[pd->sidx];
3540 			if (pd->dport)
3541 				*pd->dport = skt->port[pd->didx];
3542 			if (pd->proto_sum)
3543 				*pd->proto_sum = bproto_sum;
3544 			if (pd->ip_sum)
3545 				*pd->ip_sum = bip_sum;
3546 			m_copyback(m, off, hdrlen, pd->hdr.any);
3547 		}
3548 		s->src.seqhi = htonl(arc4random());
3549 		/* Find mss option */
3550 		int rtid = M_GETFIB(m);
3551 		mss = pf_get_mss(m, off, th->th_off, pd->af);
3552 		mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
3553 		mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
3554 		s->src.mss = mss;
3555 		pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport,
3556 		    th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
3557 		    TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL);
3558 		REASON_SET(&reason, PFRES_SYNPROXY);
3559 		return (PF_SYNPROXY_DROP);
3560 	}
3561 
3562 	return (PF_PASS);
3563 
3564 csfailed:
3565 	if (sk != NULL)
3566 		uma_zfree(V_pf_state_key_z, sk);
3567 	if (nk != NULL)
3568 		uma_zfree(V_pf_state_key_z, nk);
3569 
3570 	if (sn != NULL && sn->states == 0 && sn->expire == 0)
3571 		pf_remove_src_node(sn);
3572 
3573 	if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0)
3574 		pf_remove_src_node(nsn);
3575 
3576 	return (PF_DROP);
3577 }
3578 
3579 static int
3580 pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif,
3581     struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am,
3582     struct pf_ruleset **rsm)
3583 {
3584 	struct pf_rule		*r, *a = NULL;
3585 	struct pf_ruleset	*ruleset = NULL;
3586 	sa_family_t		 af = pd->af;
3587 	u_short			 reason;
3588 	int			 tag = -1;
3589 	int			 asd = 0;
3590 	int			 match = 0;
3591 	struct pf_anchor_stackframe	anchor_stack[PF_ANCHOR_STACKSIZE];
3592 
3593 	PF_RULES_RASSERT();
3594 
3595 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
3596 	while (r != NULL) {
3597 		r->evaluations++;
3598 		if (pfi_kif_match(r->kif, kif) == r->ifnot)
3599 			r = r->skip[PF_SKIP_IFP].ptr;
3600 		else if (r->direction && r->direction != direction)
3601 			r = r->skip[PF_SKIP_DIR].ptr;
3602 		else if (r->af && r->af != af)
3603 			r = r->skip[PF_SKIP_AF].ptr;
3604 		else if (r->proto && r->proto != pd->proto)
3605 			r = r->skip[PF_SKIP_PROTO].ptr;
3606 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
3607 		    r->src.neg, kif, M_GETFIB(m)))
3608 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
3609 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
3610 		    r->dst.neg, NULL, M_GETFIB(m)))
3611 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
3612 		else if (r->tos && !(r->tos == pd->tos))
3613 			r = TAILQ_NEXT(r, entries);
3614 		else if (r->os_fingerprint != PF_OSFP_ANY)
3615 			r = TAILQ_NEXT(r, entries);
3616 		else if (pd->proto == IPPROTO_UDP &&
3617 		    (r->src.port_op || r->dst.port_op))
3618 			r = TAILQ_NEXT(r, entries);
3619 		else if (pd->proto == IPPROTO_TCP &&
3620 		    (r->src.port_op || r->dst.port_op || r->flagset))
3621 			r = TAILQ_NEXT(r, entries);
3622 		else if ((pd->proto == IPPROTO_ICMP ||
3623 		    pd->proto == IPPROTO_ICMPV6) &&
3624 		    (r->type || r->code))
3625 			r = TAILQ_NEXT(r, entries);
3626 		else if (r->prob && r->prob <=
3627 		    (arc4random() % (UINT_MAX - 1) + 1))
3628 			r = TAILQ_NEXT(r, entries);
3629 		else if (r->match_tag && !pf_match_tag(m, r, &tag,
3630 		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
3631 			r = TAILQ_NEXT(r, entries);
3632 		else {
3633 			if (r->anchor == NULL) {
3634 				match = 1;
3635 				*rm = r;
3636 				*am = a;
3637 				*rsm = ruleset;
3638 				if ((*rm)->quick)
3639 					break;
3640 				r = TAILQ_NEXT(r, entries);
3641 			} else
3642 				pf_step_into_anchor(anchor_stack, &asd,
3643 				    &ruleset, PF_RULESET_FILTER, &r, &a,
3644 				    &match);
3645 		}
3646 		if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
3647 		    &ruleset, PF_RULESET_FILTER, &r, &a, &match))
3648 			break;
3649 	}
3650 	r = *rm;
3651 	a = *am;
3652 	ruleset = *rsm;
3653 
3654 	REASON_SET(&reason, PFRES_MATCH);
3655 
3656 	if (r->log)
3657 		PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
3658 		    1);
3659 
3660 	if (r->action != PF_PASS)
3661 		return (PF_DROP);
3662 
3663 	if (tag > 0 && pf_tag_packet(m, pd, tag)) {
3664 		REASON_SET(&reason, PFRES_MEMORY);
3665 		return (PF_DROP);
3666 	}
3667 
3668 	return (PF_PASS);
3669 }
3670 
3671 static int
3672 pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst,
3673 	struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off,
3674 	struct pf_pdesc *pd, u_short *reason, int *copyback)
3675 {
3676 	struct tcphdr		*th = pd->hdr.tcp;
3677 	u_int16_t		 win = ntohs(th->th_win);
3678 	u_int32_t		 ack, end, seq, orig_seq;
3679 	u_int8_t		 sws, dws;
3680 	int			 ackskew;
3681 
3682 	if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
3683 		sws = src->wscale & PF_WSCALE_MASK;
3684 		dws = dst->wscale & PF_WSCALE_MASK;
3685 	} else
3686 		sws = dws = 0;
3687 
3688 	/*
3689 	 * Sequence tracking algorithm from Guido van Rooij's paper:
3690 	 *   http://www.madison-gurkha.com/publications/tcp_filtering/
3691 	 *	tcp_filtering.ps
3692 	 */
3693 
3694 	orig_seq = seq = ntohl(th->th_seq);
3695 	if (src->seqlo == 0) {
3696 		/* First packet from this end. Set its state */
3697 
3698 		if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
3699 		    src->scrub == NULL) {
3700 			if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
3701 				REASON_SET(reason, PFRES_MEMORY);
3702 				return (PF_DROP);
3703 			}
3704 		}
3705 
3706 		/* Deferred generation of sequence number modulator */
3707 		if (dst->seqdiff && !src->seqdiff) {
3708 			/* use random iss for the TCP server */
3709 			while ((src->seqdiff = arc4random() - seq) == 0)
3710 				;
3711 			ack = ntohl(th->th_ack) - dst->seqdiff;
3712 			pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3713 			    src->seqdiff), 0);
3714 			pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3715 			*copyback = 1;
3716 		} else {
3717 			ack = ntohl(th->th_ack);
3718 		}
3719 
3720 		end = seq + pd->p_len;
3721 		if (th->th_flags & TH_SYN) {
3722 			end++;
3723 			if (dst->wscale & PF_WSCALE_FLAG) {
3724 				src->wscale = pf_get_wscale(m, off, th->th_off,
3725 				    pd->af);
3726 				if (src->wscale & PF_WSCALE_FLAG) {
3727 					/* Remove scale factor from initial
3728 					 * window */
3729 					sws = src->wscale & PF_WSCALE_MASK;
3730 					win = ((u_int32_t)win + (1 << sws) - 1)
3731 					    >> sws;
3732 					dws = dst->wscale & PF_WSCALE_MASK;
3733 				} else {
3734 					/* fixup other window */
3735 					dst->max_win <<= dst->wscale &
3736 					    PF_WSCALE_MASK;
3737 					/* in case of a retrans SYN|ACK */
3738 					dst->wscale = 0;
3739 				}
3740 			}
3741 		}
3742 		if (th->th_flags & TH_FIN)
3743 			end++;
3744 
3745 		src->seqlo = seq;
3746 		if (src->state < TCPS_SYN_SENT)
3747 			src->state = TCPS_SYN_SENT;
3748 
3749 		/*
3750 		 * May need to slide the window (seqhi may have been set by
3751 		 * the crappy stack check or if we picked up the connection
3752 		 * after establishment)
3753 		 */
3754 		if (src->seqhi == 1 ||
3755 		    SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
3756 			src->seqhi = end + MAX(1, dst->max_win << dws);
3757 		if (win > src->max_win)
3758 			src->max_win = win;
3759 
3760 	} else {
3761 		ack = ntohl(th->th_ack) - dst->seqdiff;
3762 		if (src->seqdiff) {
3763 			/* Modulate sequence numbers */
3764 			pf_change_a(&th->th_seq, &th->th_sum, htonl(seq +
3765 			    src->seqdiff), 0);
3766 			pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0);
3767 			*copyback = 1;
3768 		}
3769 		end = seq + pd->p_len;
3770 		if (th->th_flags & TH_SYN)
3771 			end++;
3772 		if (th->th_flags & TH_FIN)
3773 			end++;
3774 	}
3775 
3776 	if ((th->th_flags & TH_ACK) == 0) {
3777 		/* Let it pass through the ack skew check */
3778 		ack = dst->seqlo;
3779 	} else if ((ack == 0 &&
3780 	    (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
3781 	    /* broken tcp stacks do not set ack */
3782 	    (dst->state < TCPS_SYN_SENT)) {
3783 		/*
3784 		 * Many stacks (ours included) will set the ACK number in an
3785 		 * FIN|ACK if the SYN times out -- no sequence to ACK.
3786 		 */
3787 		ack = dst->seqlo;
3788 	}
3789 
3790 	if (seq == end) {
3791 		/* Ease sequencing restrictions on no data packets */
3792 		seq = src->seqlo;
3793 		end = seq;
3794 	}
3795 
3796 	ackskew = dst->seqlo - ack;
3797 
3798 
3799 	/*
3800 	 * Need to demodulate the sequence numbers in any TCP SACK options
3801 	 * (Selective ACK). We could optionally validate the SACK values
3802 	 * against the current ACK window, either forwards or backwards, but
3803 	 * I'm not confident that SACK has been implemented properly
3804 	 * everywhere. It wouldn't surprise me if several stacks accidently
3805 	 * SACK too far backwards of previously ACKed data. There really aren't
3806 	 * any security implications of bad SACKing unless the target stack
3807 	 * doesn't validate the option length correctly. Someone trying to
3808 	 * spoof into a TCP connection won't bother blindly sending SACK
3809 	 * options anyway.
3810 	 */
3811 	if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
3812 		if (pf_modulate_sack(m, off, pd, th, dst))
3813 			*copyback = 1;
3814 	}
3815 
3816 
3817 #define	MAXACKWINDOW (0xffff + 1500)	/* 1500 is an arbitrary fudge factor */
3818 	if (SEQ_GEQ(src->seqhi, end) &&
3819 	    /* Last octet inside other's window space */
3820 	    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
3821 	    /* Retrans: not more than one window back */
3822 	    (ackskew >= -MAXACKWINDOW) &&
3823 	    /* Acking not more than one reassembled fragment backwards */
3824 	    (ackskew <= (MAXACKWINDOW << sws)) &&
3825 	    /* Acking not more than one window forward */
3826 	    ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
3827 	    (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
3828 	    (pd->flags & PFDESC_IP_REAS) == 0)) {
3829 	    /* Require an exact/+1 sequence match on resets when possible */
3830 
3831 		if (dst->scrub || src->scrub) {
3832 			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3833 			    *state, src, dst, copyback))
3834 				return (PF_DROP);
3835 		}
3836 
3837 		/* update max window */
3838 		if (src->max_win < win)
3839 			src->max_win = win;
3840 		/* synchronize sequencing */
3841 		if (SEQ_GT(end, src->seqlo))
3842 			src->seqlo = end;
3843 		/* slide the window of what the other end can send */
3844 		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3845 			dst->seqhi = ack + MAX((win << sws), 1);
3846 
3847 
3848 		/* update states */
3849 		if (th->th_flags & TH_SYN)
3850 			if (src->state < TCPS_SYN_SENT)
3851 				src->state = TCPS_SYN_SENT;
3852 		if (th->th_flags & TH_FIN)
3853 			if (src->state < TCPS_CLOSING)
3854 				src->state = TCPS_CLOSING;
3855 		if (th->th_flags & TH_ACK) {
3856 			if (dst->state == TCPS_SYN_SENT) {
3857 				dst->state = TCPS_ESTABLISHED;
3858 				if (src->state == TCPS_ESTABLISHED &&
3859 				    (*state)->src_node != NULL &&
3860 				    pf_src_connlimit(state)) {
3861 					REASON_SET(reason, PFRES_SRCLIMIT);
3862 					return (PF_DROP);
3863 				}
3864 			} else if (dst->state == TCPS_CLOSING)
3865 				dst->state = TCPS_FIN_WAIT_2;
3866 		}
3867 		if (th->th_flags & TH_RST)
3868 			src->state = dst->state = TCPS_TIME_WAIT;
3869 
3870 		/* update expire time */
3871 		(*state)->expire = time_uptime;
3872 		if (src->state >= TCPS_FIN_WAIT_2 &&
3873 		    dst->state >= TCPS_FIN_WAIT_2)
3874 			(*state)->timeout = PFTM_TCP_CLOSED;
3875 		else if (src->state >= TCPS_CLOSING &&
3876 		    dst->state >= TCPS_CLOSING)
3877 			(*state)->timeout = PFTM_TCP_FIN_WAIT;
3878 		else if (src->state < TCPS_ESTABLISHED ||
3879 		    dst->state < TCPS_ESTABLISHED)
3880 			(*state)->timeout = PFTM_TCP_OPENING;
3881 		else if (src->state >= TCPS_CLOSING ||
3882 		    dst->state >= TCPS_CLOSING)
3883 			(*state)->timeout = PFTM_TCP_CLOSING;
3884 		else
3885 			(*state)->timeout = PFTM_TCP_ESTABLISHED;
3886 
3887 		/* Fall through to PASS packet */
3888 
3889 	} else if ((dst->state < TCPS_SYN_SENT ||
3890 		dst->state >= TCPS_FIN_WAIT_2 ||
3891 		src->state >= TCPS_FIN_WAIT_2) &&
3892 	    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
3893 	    /* Within a window forward of the originating packet */
3894 	    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
3895 	    /* Within a window backward of the originating packet */
3896 
3897 		/*
3898 		 * This currently handles three situations:
3899 		 *  1) Stupid stacks will shotgun SYNs before their peer
3900 		 *     replies.
3901 		 *  2) When PF catches an already established stream (the
3902 		 *     firewall rebooted, the state table was flushed, routes
3903 		 *     changed...)
3904 		 *  3) Packets get funky immediately after the connection
3905 		 *     closes (this should catch Solaris spurious ACK|FINs
3906 		 *     that web servers like to spew after a close)
3907 		 *
3908 		 * This must be a little more careful than the above code
3909 		 * since packet floods will also be caught here. We don't
3910 		 * update the TTL here to mitigate the damage of a packet
3911 		 * flood and so the same code can handle awkward establishment
3912 		 * and a loosened connection close.
3913 		 * In the establishment case, a correct peer response will
3914 		 * validate the connection, go through the normal state code
3915 		 * and keep updating the state TTL.
3916 		 */
3917 
3918 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
3919 			printf("pf: loose state match: ");
3920 			pf_print_state(*state);
3921 			pf_print_flags(th->th_flags);
3922 			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
3923 			    "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
3924 			    pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
3925 			    (unsigned long long)(*state)->packets[1],
3926 			    pd->dir == PF_IN ? "in" : "out",
3927 			    pd->dir == (*state)->direction ? "fwd" : "rev");
3928 		}
3929 
3930 		if (dst->scrub || src->scrub) {
3931 			if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
3932 			    *state, src, dst, copyback))
3933 				return (PF_DROP);
3934 		}
3935 
3936 		/* update max window */
3937 		if (src->max_win < win)
3938 			src->max_win = win;
3939 		/* synchronize sequencing */
3940 		if (SEQ_GT(end, src->seqlo))
3941 			src->seqlo = end;
3942 		/* slide the window of what the other end can send */
3943 		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
3944 			dst->seqhi = ack + MAX((win << sws), 1);
3945 
3946 		/*
3947 		 * Cannot set dst->seqhi here since this could be a shotgunned
3948 		 * SYN and not an already established connection.
3949 		 */
3950 
3951 		if (th->th_flags & TH_FIN)
3952 			if (src->state < TCPS_CLOSING)
3953 				src->state = TCPS_CLOSING;
3954 		if (th->th_flags & TH_RST)
3955 			src->state = dst->state = TCPS_TIME_WAIT;
3956 
3957 		/* Fall through to PASS packet */
3958 
3959 	} else {
3960 		if ((*state)->dst.state == TCPS_SYN_SENT &&
3961 		    (*state)->src.state == TCPS_SYN_SENT) {
3962 			/* Send RST for state mismatches during handshake */
3963 			if (!(th->th_flags & TH_RST))
3964 				pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
3965 				    pd->dst, pd->src, th->th_dport,
3966 				    th->th_sport, ntohl(th->th_ack), 0,
3967 				    TH_RST, 0, 0,
3968 				    (*state)->rule.ptr->return_ttl, 1, 0,
3969 				    kif->pfik_ifp);
3970 			src->seqlo = 0;
3971 			src->seqhi = 1;
3972 			src->max_win = 1;
3973 		} else if (V_pf_status.debug >= PF_DEBUG_MISC) {
3974 			printf("pf: BAD state: ");
3975 			pf_print_state(*state);
3976 			pf_print_flags(th->th_flags);
3977 			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
3978 			    "pkts=%llu:%llu dir=%s,%s\n",
3979 			    seq, orig_seq, ack, pd->p_len, ackskew,
3980 			    (unsigned long long)(*state)->packets[0],
3981 			    (unsigned long long)(*state)->packets[1],
3982 			    pd->dir == PF_IN ? "in" : "out",
3983 			    pd->dir == (*state)->direction ? "fwd" : "rev");
3984 			printf("pf: State failure on: %c %c %c %c | %c %c\n",
3985 			    SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
3986 			    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
3987 			    ' ': '2',
3988 			    (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
3989 			    (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
3990 			    SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
3991 			    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
3992 		}
3993 		REASON_SET(reason, PFRES_BADSTATE);
3994 		return (PF_DROP);
3995 	}
3996 
3997 	return (PF_PASS);
3998 }
3999 
4000 static int
4001 pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst,
4002 	struct pf_state **state, struct pf_pdesc *pd, u_short *reason)
4003 {
4004 	struct tcphdr		*th = pd->hdr.tcp;
4005 
4006 	if (th->th_flags & TH_SYN)
4007 		if (src->state < TCPS_SYN_SENT)
4008 			src->state = TCPS_SYN_SENT;
4009 	if (th->th_flags & TH_FIN)
4010 		if (src->state < TCPS_CLOSING)
4011 			src->state = TCPS_CLOSING;
4012 	if (th->th_flags & TH_ACK) {
4013 		if (dst->state == TCPS_SYN_SENT) {
4014 			dst->state = TCPS_ESTABLISHED;
4015 			if (src->state == TCPS_ESTABLISHED &&
4016 			    (*state)->src_node != NULL &&
4017 			    pf_src_connlimit(state)) {
4018 				REASON_SET(reason, PFRES_SRCLIMIT);
4019 				return (PF_DROP);
4020 			}
4021 		} else if (dst->state == TCPS_CLOSING) {
4022 			dst->state = TCPS_FIN_WAIT_2;
4023 		} else if (src->state == TCPS_SYN_SENT &&
4024 		    dst->state < TCPS_SYN_SENT) {
4025 			/*
4026 			 * Handle a special sloppy case where we only see one
4027 			 * half of the connection. If there is a ACK after
4028 			 * the initial SYN without ever seeing a packet from
4029 			 * the destination, set the connection to established.
4030 			 */
4031 			dst->state = src->state = TCPS_ESTABLISHED;
4032 			if ((*state)->src_node != NULL &&
4033 			    pf_src_connlimit(state)) {
4034 				REASON_SET(reason, PFRES_SRCLIMIT);
4035 				return (PF_DROP);
4036 			}
4037 		} else if (src->state == TCPS_CLOSING &&
4038 		    dst->state == TCPS_ESTABLISHED &&
4039 		    dst->seqlo == 0) {
4040 			/*
4041 			 * Handle the closing of half connections where we
4042 			 * don't see the full bidirectional FIN/ACK+ACK
4043 			 * handshake.
4044 			 */
4045 			dst->state = TCPS_CLOSING;
4046 		}
4047 	}
4048 	if (th->th_flags & TH_RST)
4049 		src->state = dst->state = TCPS_TIME_WAIT;
4050 
4051 	/* update expire time */
4052 	(*state)->expire = time_uptime;
4053 	if (src->state >= TCPS_FIN_WAIT_2 &&
4054 	    dst->state >= TCPS_FIN_WAIT_2)
4055 		(*state)->timeout = PFTM_TCP_CLOSED;
4056 	else if (src->state >= TCPS_CLOSING &&
4057 	    dst->state >= TCPS_CLOSING)
4058 		(*state)->timeout = PFTM_TCP_FIN_WAIT;
4059 	else if (src->state < TCPS_ESTABLISHED ||
4060 	    dst->state < TCPS_ESTABLISHED)
4061 		(*state)->timeout = PFTM_TCP_OPENING;
4062 	else if (src->state >= TCPS_CLOSING ||
4063 	    dst->state >= TCPS_CLOSING)
4064 		(*state)->timeout = PFTM_TCP_CLOSING;
4065 	else
4066 		(*state)->timeout = PFTM_TCP_ESTABLISHED;
4067 
4068 	return (PF_PASS);
4069 }
4070 
4071 static int
4072 pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif,
4073     struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
4074     u_short *reason)
4075 {
4076 	struct pf_state_key_cmp	 key;
4077 	struct tcphdr		*th = pd->hdr.tcp;
4078 	int			 copyback = 0;
4079 	struct pf_state_peer	*src, *dst;
4080 	struct pf_state_key	*sk;
4081 
4082 	bzero(&key, sizeof(key));
4083 	key.af = pd->af;
4084 	key.proto = IPPROTO_TCP;
4085 	if (direction == PF_IN)	{	/* wire side, straight */
4086 		PF_ACPY(&key.addr[0], pd->src, key.af);
4087 		PF_ACPY(&key.addr[1], pd->dst, key.af);
4088 		key.port[0] = th->th_sport;
4089 		key.port[1] = th->th_dport;
4090 	} else {			/* stack side, reverse */
4091 		PF_ACPY(&key.addr[1], pd->src, key.af);
4092 		PF_ACPY(&key.addr[0], pd->dst, key.af);
4093 		key.port[1] = th->th_sport;
4094 		key.port[0] = th->th_dport;
4095 	}
4096 
4097 	STATE_LOOKUP(kif, &key, direction, *state, pd);
4098 
4099 	if (direction == (*state)->direction) {
4100 		src = &(*state)->src;
4101 		dst = &(*state)->dst;
4102 	} else {
4103 		src = &(*state)->dst;
4104 		dst = &(*state)->src;
4105 	}
4106 
4107 	sk = (*state)->key[pd->didx];
4108 
4109 	if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
4110 		if (direction != (*state)->direction) {
4111 			REASON_SET(reason, PFRES_SYNPROXY);
4112 			return (PF_SYNPROXY_DROP);
4113 		}
4114 		if (th->th_flags & TH_SYN) {
4115 			if (ntohl(th->th_seq) != (*state)->src.seqlo) {
4116 				REASON_SET(reason, PFRES_SYNPROXY);
4117 				return (PF_DROP);
4118 			}
4119 			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4120 			    pd->src, th->th_dport, th->th_sport,
4121 			    (*state)->src.seqhi, ntohl(th->th_seq) + 1,
4122 			    TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL);
4123 			REASON_SET(reason, PFRES_SYNPROXY);
4124 			return (PF_SYNPROXY_DROP);
4125 		} else if (!(th->th_flags & TH_ACK) ||
4126 		    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4127 		    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4128 			REASON_SET(reason, PFRES_SYNPROXY);
4129 			return (PF_DROP);
4130 		} else if ((*state)->src_node != NULL &&
4131 		    pf_src_connlimit(state)) {
4132 			REASON_SET(reason, PFRES_SRCLIMIT);
4133 			return (PF_DROP);
4134 		} else
4135 			(*state)->src.state = PF_TCPS_PROXY_DST;
4136 	}
4137 	if ((*state)->src.state == PF_TCPS_PROXY_DST) {
4138 		if (direction == (*state)->direction) {
4139 			if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
4140 			    (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
4141 			    (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
4142 				REASON_SET(reason, PFRES_SYNPROXY);
4143 				return (PF_DROP);
4144 			}
4145 			(*state)->src.max_win = MAX(ntohs(th->th_win), 1);
4146 			if ((*state)->dst.seqhi == 1)
4147 				(*state)->dst.seqhi = htonl(arc4random());
4148 			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4149 			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4150 			    sk->port[pd->sidx], sk->port[pd->didx],
4151 			    (*state)->dst.seqhi, 0, TH_SYN, 0,
4152 			    (*state)->src.mss, 0, 0, (*state)->tag, NULL);
4153 			REASON_SET(reason, PFRES_SYNPROXY);
4154 			return (PF_SYNPROXY_DROP);
4155 		} else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
4156 		    (TH_SYN|TH_ACK)) ||
4157 		    (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
4158 			REASON_SET(reason, PFRES_SYNPROXY);
4159 			return (PF_DROP);
4160 		} else {
4161 			(*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
4162 			(*state)->dst.seqlo = ntohl(th->th_seq);
4163 			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst,
4164 			    pd->src, th->th_dport, th->th_sport,
4165 			    ntohl(th->th_ack), ntohl(th->th_seq) + 1,
4166 			    TH_ACK, (*state)->src.max_win, 0, 0, 0,
4167 			    (*state)->tag, NULL);
4168 			pf_send_tcp(NULL, (*state)->rule.ptr, pd->af,
4169 			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
4170 			    sk->port[pd->sidx], sk->port[pd->didx],
4171 			    (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
4172 			    TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL);
4173 			(*state)->src.seqdiff = (*state)->dst.seqhi -
4174 			    (*state)->src.seqlo;
4175 			(*state)->dst.seqdiff = (*state)->src.seqhi -
4176 			    (*state)->dst.seqlo;
4177 			(*state)->src.seqhi = (*state)->src.seqlo +
4178 			    (*state)->dst.max_win;
4179 			(*state)->dst.seqhi = (*state)->dst.seqlo +
4180 			    (*state)->src.max_win;
4181 			(*state)->src.wscale = (*state)->dst.wscale = 0;
4182 			(*state)->src.state = (*state)->dst.state =
4183 			    TCPS_ESTABLISHED;
4184 			REASON_SET(reason, PFRES_SYNPROXY);
4185 			return (PF_SYNPROXY_DROP);
4186 		}
4187 	}
4188 
4189 	if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) &&
4190 	    dst->state >= TCPS_FIN_WAIT_2 &&
4191 	    src->state >= TCPS_FIN_WAIT_2) {
4192 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
4193 			printf("pf: state reuse ");
4194 			pf_print_state(*state);
4195 			pf_print_flags(th->th_flags);
4196 			printf("\n");
4197 		}
4198 		/* XXX make sure it's the same direction ?? */
4199 		(*state)->src.state = (*state)->dst.state = TCPS_CLOSED;
4200 		pf_unlink_state(*state, PF_ENTER_LOCKED);
4201 		*state = NULL;
4202 		return (PF_DROP);
4203 	}
4204 
4205 	if ((*state)->state_flags & PFSTATE_SLOPPY) {
4206 		if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP)
4207 			return (PF_DROP);
4208 	} else {
4209 		if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason,
4210 		    &copyback) == PF_DROP)
4211 			return (PF_DROP);
4212 	}
4213 
4214 	/* translate source/destination address, if necessary */
4215 	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4216 		struct pf_state_key *nk = (*state)->key[pd->didx];
4217 
4218 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4219 		    nk->port[pd->sidx] != th->th_sport)
4220 			pf_change_ap(pd->src, &th->th_sport, pd->ip_sum,
4221 			    &th->th_sum, &nk->addr[pd->sidx],
4222 			    nk->port[pd->sidx], 0, pd->af);
4223 
4224 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4225 		    nk->port[pd->didx] != th->th_dport)
4226 			pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum,
4227 			    &th->th_sum, &nk->addr[pd->didx],
4228 			    nk->port[pd->didx], 0, pd->af);
4229 		copyback = 1;
4230 	}
4231 
4232 	/* Copyback sequence modulation or stateful scrub changes if needed */
4233 	if (copyback)
4234 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
4235 
4236 	return (PF_PASS);
4237 }
4238 
4239 static int
4240 pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif,
4241     struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
4242 {
4243 	struct pf_state_peer	*src, *dst;
4244 	struct pf_state_key_cmp	 key;
4245 	struct udphdr		*uh = pd->hdr.udp;
4246 
4247 	bzero(&key, sizeof(key));
4248 	key.af = pd->af;
4249 	key.proto = IPPROTO_UDP;
4250 	if (direction == PF_IN)	{	/* wire side, straight */
4251 		PF_ACPY(&key.addr[0], pd->src, key.af);
4252 		PF_ACPY(&key.addr[1], pd->dst, key.af);
4253 		key.port[0] = uh->uh_sport;
4254 		key.port[1] = uh->uh_dport;
4255 	} else {			/* stack side, reverse */
4256 		PF_ACPY(&key.addr[1], pd->src, key.af);
4257 		PF_ACPY(&key.addr[0], pd->dst, key.af);
4258 		key.port[1] = uh->uh_sport;
4259 		key.port[0] = uh->uh_dport;
4260 	}
4261 
4262 	STATE_LOOKUP(kif, &key, direction, *state, pd);
4263 
4264 	if (direction == (*state)->direction) {
4265 		src = &(*state)->src;
4266 		dst = &(*state)->dst;
4267 	} else {
4268 		src = &(*state)->dst;
4269 		dst = &(*state)->src;
4270 	}
4271 
4272 	/* update states */
4273 	if (src->state < PFUDPS_SINGLE)
4274 		src->state = PFUDPS_SINGLE;
4275 	if (dst->state == PFUDPS_SINGLE)
4276 		dst->state = PFUDPS_MULTIPLE;
4277 
4278 	/* update expire time */
4279 	(*state)->expire = time_uptime;
4280 	if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
4281 		(*state)->timeout = PFTM_UDP_MULTIPLE;
4282 	else
4283 		(*state)->timeout = PFTM_UDP_SINGLE;
4284 
4285 	/* translate source/destination address, if necessary */
4286 	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4287 		struct pf_state_key *nk = (*state)->key[pd->didx];
4288 
4289 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
4290 		    nk->port[pd->sidx] != uh->uh_sport)
4291 			pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum,
4292 			    &uh->uh_sum, &nk->addr[pd->sidx],
4293 			    nk->port[pd->sidx], 1, pd->af);
4294 
4295 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
4296 		    nk->port[pd->didx] != uh->uh_dport)
4297 			pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum,
4298 			    &uh->uh_sum, &nk->addr[pd->didx],
4299 			    nk->port[pd->didx], 1, pd->af);
4300 		m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
4301 	}
4302 
4303 	return (PF_PASS);
4304 }
4305 
4306 static int
4307 pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif,
4308     struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
4309 {
4310 	struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
4311 	u_int16_t	 icmpid = 0, *icmpsum;
4312 	u_int8_t	 icmptype;
4313 	int		 state_icmp = 0;
4314 	struct pf_state_key_cmp key;
4315 
4316 	bzero(&key, sizeof(key));
4317 	switch (pd->proto) {
4318 #ifdef INET
4319 	case IPPROTO_ICMP:
4320 		icmptype = pd->hdr.icmp->icmp_type;
4321 		icmpid = pd->hdr.icmp->icmp_id;
4322 		icmpsum = &pd->hdr.icmp->icmp_cksum;
4323 
4324 		if (icmptype == ICMP_UNREACH ||
4325 		    icmptype == ICMP_SOURCEQUENCH ||
4326 		    icmptype == ICMP_REDIRECT ||
4327 		    icmptype == ICMP_TIMXCEED ||
4328 		    icmptype == ICMP_PARAMPROB)
4329 			state_icmp++;
4330 		break;
4331 #endif /* INET */
4332 #ifdef INET6
4333 	case IPPROTO_ICMPV6:
4334 		icmptype = pd->hdr.icmp6->icmp6_type;
4335 		icmpid = pd->hdr.icmp6->icmp6_id;
4336 		icmpsum = &pd->hdr.icmp6->icmp6_cksum;
4337 
4338 		if (icmptype == ICMP6_DST_UNREACH ||
4339 		    icmptype == ICMP6_PACKET_TOO_BIG ||
4340 		    icmptype == ICMP6_TIME_EXCEEDED ||
4341 		    icmptype == ICMP6_PARAM_PROB)
4342 			state_icmp++;
4343 		break;
4344 #endif /* INET6 */
4345 	}
4346 
4347 	if (!state_icmp) {
4348 
4349 		/*
4350 		 * ICMP query/reply message not related to a TCP/UDP packet.
4351 		 * Search for an ICMP state.
4352 		 */
4353 		key.af = pd->af;
4354 		key.proto = pd->proto;
4355 		key.port[0] = key.port[1] = icmpid;
4356 		if (direction == PF_IN)	{	/* wire side, straight */
4357 			PF_ACPY(&key.addr[0], pd->src, key.af);
4358 			PF_ACPY(&key.addr[1], pd->dst, key.af);
4359 		} else {			/* stack side, reverse */
4360 			PF_ACPY(&key.addr[1], pd->src, key.af);
4361 			PF_ACPY(&key.addr[0], pd->dst, key.af);
4362 		}
4363 
4364 		STATE_LOOKUP(kif, &key, direction, *state, pd);
4365 
4366 		(*state)->expire = time_uptime;
4367 		(*state)->timeout = PFTM_ICMP_ERROR_REPLY;
4368 
4369 		/* translate source/destination address, if necessary */
4370 		if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4371 			struct pf_state_key *nk = (*state)->key[pd->didx];
4372 
4373 			switch (pd->af) {
4374 #ifdef INET
4375 			case AF_INET:
4376 				if (PF_ANEQ(pd->src,
4377 				    &nk->addr[pd->sidx], AF_INET))
4378 					pf_change_a(&saddr->v4.s_addr,
4379 					    pd->ip_sum,
4380 					    nk->addr[pd->sidx].v4.s_addr, 0);
4381 
4382 				if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
4383 				    AF_INET))
4384 					pf_change_a(&daddr->v4.s_addr,
4385 					    pd->ip_sum,
4386 					    nk->addr[pd->didx].v4.s_addr, 0);
4387 
4388 				if (nk->port[0] !=
4389 				    pd->hdr.icmp->icmp_id) {
4390 					pd->hdr.icmp->icmp_cksum =
4391 					    pf_cksum_fixup(
4392 					    pd->hdr.icmp->icmp_cksum, icmpid,
4393 					    nk->port[pd->sidx], 0);
4394 					pd->hdr.icmp->icmp_id =
4395 					    nk->port[pd->sidx];
4396 				}
4397 
4398 				m_copyback(m, off, ICMP_MINLEN,
4399 				    (caddr_t )pd->hdr.icmp);
4400 				break;
4401 #endif /* INET */
4402 #ifdef INET6
4403 			case AF_INET6:
4404 				if (PF_ANEQ(pd->src,
4405 				    &nk->addr[pd->sidx], AF_INET6))
4406 					pf_change_a6(saddr,
4407 					    &pd->hdr.icmp6->icmp6_cksum,
4408 					    &nk->addr[pd->sidx], 0);
4409 
4410 				if (PF_ANEQ(pd->dst,
4411 				    &nk->addr[pd->didx], AF_INET6))
4412 					pf_change_a6(daddr,
4413 					    &pd->hdr.icmp6->icmp6_cksum,
4414 					    &nk->addr[pd->didx], 0);
4415 
4416 				m_copyback(m, off, sizeof(struct icmp6_hdr),
4417 				    (caddr_t )pd->hdr.icmp6);
4418 				break;
4419 #endif /* INET6 */
4420 			}
4421 		}
4422 		return (PF_PASS);
4423 
4424 	} else {
4425 		/*
4426 		 * ICMP error message in response to a TCP/UDP packet.
4427 		 * Extract the inner TCP/UDP header and search for that state.
4428 		 */
4429 
4430 		struct pf_pdesc	pd2;
4431 		bzero(&pd2, sizeof pd2);
4432 #ifdef INET
4433 		struct ip	h2;
4434 #endif /* INET */
4435 #ifdef INET6
4436 		struct ip6_hdr	h2_6;
4437 		int		terminal = 0;
4438 #endif /* INET6 */
4439 		int		ipoff2 = 0;
4440 		int		off2 = 0;
4441 
4442 		pd2.af = pd->af;
4443 		/* Payload packet is from the opposite direction. */
4444 		pd2.sidx = (direction == PF_IN) ? 1 : 0;
4445 		pd2.didx = (direction == PF_IN) ? 0 : 1;
4446 		switch (pd->af) {
4447 #ifdef INET
4448 		case AF_INET:
4449 			/* offset of h2 in mbuf chain */
4450 			ipoff2 = off + ICMP_MINLEN;
4451 
4452 			if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
4453 			    NULL, reason, pd2.af)) {
4454 				DPFPRINTF(PF_DEBUG_MISC,
4455 				    ("pf: ICMP error message too short "
4456 				    "(ip)\n"));
4457 				return (PF_DROP);
4458 			}
4459 			/*
4460 			 * ICMP error messages don't refer to non-first
4461 			 * fragments
4462 			 */
4463 			if (h2.ip_off & htons(IP_OFFMASK)) {
4464 				REASON_SET(reason, PFRES_FRAG);
4465 				return (PF_DROP);
4466 			}
4467 
4468 			/* offset of protocol header that follows h2 */
4469 			off2 = ipoff2 + (h2.ip_hl << 2);
4470 
4471 			pd2.proto = h2.ip_p;
4472 			pd2.src = (struct pf_addr *)&h2.ip_src;
4473 			pd2.dst = (struct pf_addr *)&h2.ip_dst;
4474 			pd2.ip_sum = &h2.ip_sum;
4475 			break;
4476 #endif /* INET */
4477 #ifdef INET6
4478 		case AF_INET6:
4479 			ipoff2 = off + sizeof(struct icmp6_hdr);
4480 
4481 			if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
4482 			    NULL, reason, pd2.af)) {
4483 				DPFPRINTF(PF_DEBUG_MISC,
4484 				    ("pf: ICMP error message too short "
4485 				    "(ip6)\n"));
4486 				return (PF_DROP);
4487 			}
4488 			pd2.proto = h2_6.ip6_nxt;
4489 			pd2.src = (struct pf_addr *)&h2_6.ip6_src;
4490 			pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
4491 			pd2.ip_sum = NULL;
4492 			off2 = ipoff2 + sizeof(h2_6);
4493 			do {
4494 				switch (pd2.proto) {
4495 				case IPPROTO_FRAGMENT:
4496 					/*
4497 					 * ICMPv6 error messages for
4498 					 * non-first fragments
4499 					 */
4500 					REASON_SET(reason, PFRES_FRAG);
4501 					return (PF_DROP);
4502 				case IPPROTO_AH:
4503 				case IPPROTO_HOPOPTS:
4504 				case IPPROTO_ROUTING:
4505 				case IPPROTO_DSTOPTS: {
4506 					/* get next header and header length */
4507 					struct ip6_ext opt6;
4508 
4509 					if (!pf_pull_hdr(m, off2, &opt6,
4510 					    sizeof(opt6), NULL, reason,
4511 					    pd2.af)) {
4512 						DPFPRINTF(PF_DEBUG_MISC,
4513 						    ("pf: ICMPv6 short opt\n"));
4514 						return (PF_DROP);
4515 					}
4516 					if (pd2.proto == IPPROTO_AH)
4517 						off2 += (opt6.ip6e_len + 2) * 4;
4518 					else
4519 						off2 += (opt6.ip6e_len + 1) * 8;
4520 					pd2.proto = opt6.ip6e_nxt;
4521 					/* goto the next header */
4522 					break;
4523 				}
4524 				default:
4525 					terminal++;
4526 					break;
4527 				}
4528 			} while (!terminal);
4529 			break;
4530 #endif /* INET6 */
4531 		}
4532 
4533 		switch (pd2.proto) {
4534 		case IPPROTO_TCP: {
4535 			struct tcphdr		 th;
4536 			u_int32_t		 seq;
4537 			struct pf_state_peer	*src, *dst;
4538 			u_int8_t		 dws;
4539 			int			 copyback = 0;
4540 
4541 			/*
4542 			 * Only the first 8 bytes of the TCP header can be
4543 			 * expected. Don't access any TCP header fields after
4544 			 * th_seq, an ackskew test is not possible.
4545 			 */
4546 			if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
4547 			    pd2.af)) {
4548 				DPFPRINTF(PF_DEBUG_MISC,
4549 				    ("pf: ICMP error message too short "
4550 				    "(tcp)\n"));
4551 				return (PF_DROP);
4552 			}
4553 
4554 			key.af = pd2.af;
4555 			key.proto = IPPROTO_TCP;
4556 			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4557 			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4558 			key.port[pd2.sidx] = th.th_sport;
4559 			key.port[pd2.didx] = th.th_dport;
4560 
4561 			STATE_LOOKUP(kif, &key, direction, *state, pd);
4562 
4563 			if (direction == (*state)->direction) {
4564 				src = &(*state)->dst;
4565 				dst = &(*state)->src;
4566 			} else {
4567 				src = &(*state)->src;
4568 				dst = &(*state)->dst;
4569 			}
4570 
4571 			if (src->wscale && dst->wscale)
4572 				dws = dst->wscale & PF_WSCALE_MASK;
4573 			else
4574 				dws = 0;
4575 
4576 			/* Demodulate sequence number */
4577 			seq = ntohl(th.th_seq) - src->seqdiff;
4578 			if (src->seqdiff) {
4579 				pf_change_a(&th.th_seq, icmpsum,
4580 				    htonl(seq), 0);
4581 				copyback = 1;
4582 			}
4583 
4584 			if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
4585 			    (!SEQ_GEQ(src->seqhi, seq) ||
4586 			    !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
4587 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4588 					printf("pf: BAD ICMP %d:%d ",
4589 					    icmptype, pd->hdr.icmp->icmp_code);
4590 					pf_print_host(pd->src, 0, pd->af);
4591 					printf(" -> ");
4592 					pf_print_host(pd->dst, 0, pd->af);
4593 					printf(" state: ");
4594 					pf_print_state(*state);
4595 					printf(" seq=%u\n", seq);
4596 				}
4597 				REASON_SET(reason, PFRES_BADSTATE);
4598 				return (PF_DROP);
4599 			} else {
4600 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
4601 					printf("pf: OK ICMP %d:%d ",
4602 					    icmptype, pd->hdr.icmp->icmp_code);
4603 					pf_print_host(pd->src, 0, pd->af);
4604 					printf(" -> ");
4605 					pf_print_host(pd->dst, 0, pd->af);
4606 					printf(" state: ");
4607 					pf_print_state(*state);
4608 					printf(" seq=%u\n", seq);
4609 				}
4610 			}
4611 
4612 			/* translate source/destination address, if necessary */
4613 			if ((*state)->key[PF_SK_WIRE] !=
4614 			    (*state)->key[PF_SK_STACK]) {
4615 				struct pf_state_key *nk =
4616 				    (*state)->key[pd->didx];
4617 
4618 				if (PF_ANEQ(pd2.src,
4619 				    &nk->addr[pd2.sidx], pd2.af) ||
4620 				    nk->port[pd2.sidx] != th.th_sport)
4621 					pf_change_icmp(pd2.src, &th.th_sport,
4622 					    daddr, &nk->addr[pd2.sidx],
4623 					    nk->port[pd2.sidx], NULL,
4624 					    pd2.ip_sum, icmpsum,
4625 					    pd->ip_sum, 0, pd2.af);
4626 
4627 				if (PF_ANEQ(pd2.dst,
4628 				    &nk->addr[pd2.didx], pd2.af) ||
4629 				    nk->port[pd2.didx] != th.th_dport)
4630 					pf_change_icmp(pd2.dst, &th.th_dport,
4631 					    NULL, /* XXX Inbound NAT? */
4632 					    &nk->addr[pd2.didx],
4633 					    nk->port[pd2.didx], NULL,
4634 					    pd2.ip_sum, icmpsum,
4635 					    pd->ip_sum, 0, pd2.af);
4636 				copyback = 1;
4637 			}
4638 
4639 			if (copyback) {
4640 				switch (pd2.af) {
4641 #ifdef INET
4642 				case AF_INET:
4643 					m_copyback(m, off, ICMP_MINLEN,
4644 					    (caddr_t )pd->hdr.icmp);
4645 					m_copyback(m, ipoff2, sizeof(h2),
4646 					    (caddr_t )&h2);
4647 					break;
4648 #endif /* INET */
4649 #ifdef INET6
4650 				case AF_INET6:
4651 					m_copyback(m, off,
4652 					    sizeof(struct icmp6_hdr),
4653 					    (caddr_t )pd->hdr.icmp6);
4654 					m_copyback(m, ipoff2, sizeof(h2_6),
4655 					    (caddr_t )&h2_6);
4656 					break;
4657 #endif /* INET6 */
4658 				}
4659 				m_copyback(m, off2, 8, (caddr_t)&th);
4660 			}
4661 
4662 			return (PF_PASS);
4663 			break;
4664 		}
4665 		case IPPROTO_UDP: {
4666 			struct udphdr		uh;
4667 
4668 			if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
4669 			    NULL, reason, pd2.af)) {
4670 				DPFPRINTF(PF_DEBUG_MISC,
4671 				    ("pf: ICMP error message too short "
4672 				    "(udp)\n"));
4673 				return (PF_DROP);
4674 			}
4675 
4676 			key.af = pd2.af;
4677 			key.proto = IPPROTO_UDP;
4678 			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4679 			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4680 			key.port[pd2.sidx] = uh.uh_sport;
4681 			key.port[pd2.didx] = uh.uh_dport;
4682 
4683 			STATE_LOOKUP(kif, &key, direction, *state, pd);
4684 
4685 			/* translate source/destination address, if necessary */
4686 			if ((*state)->key[PF_SK_WIRE] !=
4687 			    (*state)->key[PF_SK_STACK]) {
4688 				struct pf_state_key *nk =
4689 				    (*state)->key[pd->didx];
4690 
4691 				if (PF_ANEQ(pd2.src,
4692 				    &nk->addr[pd2.sidx], pd2.af) ||
4693 				    nk->port[pd2.sidx] != uh.uh_sport)
4694 					pf_change_icmp(pd2.src, &uh.uh_sport,
4695 					    daddr, &nk->addr[pd2.sidx],
4696 					    nk->port[pd2.sidx], &uh.uh_sum,
4697 					    pd2.ip_sum, icmpsum,
4698 					    pd->ip_sum, 1, pd2.af);
4699 
4700 				if (PF_ANEQ(pd2.dst,
4701 				    &nk->addr[pd2.didx], pd2.af) ||
4702 				    nk->port[pd2.didx] != uh.uh_dport)
4703 					pf_change_icmp(pd2.dst, &uh.uh_dport,
4704 					    NULL, /* XXX Inbound NAT? */
4705 					    &nk->addr[pd2.didx],
4706 					    nk->port[pd2.didx], &uh.uh_sum,
4707 					    pd2.ip_sum, icmpsum,
4708 					    pd->ip_sum, 1, pd2.af);
4709 
4710 				switch (pd2.af) {
4711 #ifdef INET
4712 				case AF_INET:
4713 					m_copyback(m, off, ICMP_MINLEN,
4714 					    (caddr_t )pd->hdr.icmp);
4715 					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4716 					break;
4717 #endif /* INET */
4718 #ifdef INET6
4719 				case AF_INET6:
4720 					m_copyback(m, off,
4721 					    sizeof(struct icmp6_hdr),
4722 					    (caddr_t )pd->hdr.icmp6);
4723 					m_copyback(m, ipoff2, sizeof(h2_6),
4724 					    (caddr_t )&h2_6);
4725 					break;
4726 #endif /* INET6 */
4727 				}
4728 				m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
4729 			}
4730 			return (PF_PASS);
4731 			break;
4732 		}
4733 #ifdef INET
4734 		case IPPROTO_ICMP: {
4735 			struct icmp		iih;
4736 
4737 			if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
4738 			    NULL, reason, pd2.af)) {
4739 				DPFPRINTF(PF_DEBUG_MISC,
4740 				    ("pf: ICMP error message too short i"
4741 				    "(icmp)\n"));
4742 				return (PF_DROP);
4743 			}
4744 
4745 			key.af = pd2.af;
4746 			key.proto = IPPROTO_ICMP;
4747 			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4748 			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4749 			key.port[0] = key.port[1] = iih.icmp_id;
4750 
4751 			STATE_LOOKUP(kif, &key, direction, *state, pd);
4752 
4753 			/* translate source/destination address, if necessary */
4754 			if ((*state)->key[PF_SK_WIRE] !=
4755 			    (*state)->key[PF_SK_STACK]) {
4756 				struct pf_state_key *nk =
4757 				    (*state)->key[pd->didx];
4758 
4759 				if (PF_ANEQ(pd2.src,
4760 				    &nk->addr[pd2.sidx], pd2.af) ||
4761 				    nk->port[pd2.sidx] != iih.icmp_id)
4762 					pf_change_icmp(pd2.src, &iih.icmp_id,
4763 					    daddr, &nk->addr[pd2.sidx],
4764 					    nk->port[pd2.sidx], NULL,
4765 					    pd2.ip_sum, icmpsum,
4766 					    pd->ip_sum, 0, AF_INET);
4767 
4768 				if (PF_ANEQ(pd2.dst,
4769 				    &nk->addr[pd2.didx], pd2.af) ||
4770 				    nk->port[pd2.didx] != iih.icmp_id)
4771 					pf_change_icmp(pd2.dst, &iih.icmp_id,
4772 					    NULL, /* XXX Inbound NAT? */
4773 					    &nk->addr[pd2.didx],
4774 					    nk->port[pd2.didx], NULL,
4775 					    pd2.ip_sum, icmpsum,
4776 					    pd->ip_sum, 0, AF_INET);
4777 
4778 				m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp);
4779 				m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4780 				m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
4781 			}
4782 			return (PF_PASS);
4783 			break;
4784 		}
4785 #endif /* INET */
4786 #ifdef INET6
4787 		case IPPROTO_ICMPV6: {
4788 			struct icmp6_hdr	iih;
4789 
4790 			if (!pf_pull_hdr(m, off2, &iih,
4791 			    sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
4792 				DPFPRINTF(PF_DEBUG_MISC,
4793 				    ("pf: ICMP error message too short "
4794 				    "(icmp6)\n"));
4795 				return (PF_DROP);
4796 			}
4797 
4798 			key.af = pd2.af;
4799 			key.proto = IPPROTO_ICMPV6;
4800 			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4801 			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4802 			key.port[0] = key.port[1] = iih.icmp6_id;
4803 
4804 			STATE_LOOKUP(kif, &key, direction, *state, pd);
4805 
4806 			/* translate source/destination address, if necessary */
4807 			if ((*state)->key[PF_SK_WIRE] !=
4808 			    (*state)->key[PF_SK_STACK]) {
4809 				struct pf_state_key *nk =
4810 				    (*state)->key[pd->didx];
4811 
4812 				if (PF_ANEQ(pd2.src,
4813 				    &nk->addr[pd2.sidx], pd2.af) ||
4814 				    nk->port[pd2.sidx] != iih.icmp6_id)
4815 					pf_change_icmp(pd2.src, &iih.icmp6_id,
4816 					    daddr, &nk->addr[pd2.sidx],
4817 					    nk->port[pd2.sidx], NULL,
4818 					    pd2.ip_sum, icmpsum,
4819 					    pd->ip_sum, 0, AF_INET6);
4820 
4821 				if (PF_ANEQ(pd2.dst,
4822 				    &nk->addr[pd2.didx], pd2.af) ||
4823 				    nk->port[pd2.didx] != iih.icmp6_id)
4824 					pf_change_icmp(pd2.dst, &iih.icmp6_id,
4825 					    NULL, /* XXX Inbound NAT? */
4826 					    &nk->addr[pd2.didx],
4827 					    nk->port[pd2.didx], NULL,
4828 					    pd2.ip_sum, icmpsum,
4829 					    pd->ip_sum, 0, AF_INET6);
4830 
4831 				m_copyback(m, off, sizeof(struct icmp6_hdr),
4832 				    (caddr_t)pd->hdr.icmp6);
4833 				m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
4834 				m_copyback(m, off2, sizeof(struct icmp6_hdr),
4835 				    (caddr_t)&iih);
4836 			}
4837 			return (PF_PASS);
4838 			break;
4839 		}
4840 #endif /* INET6 */
4841 		default: {
4842 			key.af = pd2.af;
4843 			key.proto = pd2.proto;
4844 			PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
4845 			PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
4846 			key.port[0] = key.port[1] = 0;
4847 
4848 			STATE_LOOKUP(kif, &key, direction, *state, pd);
4849 
4850 			/* translate source/destination address, if necessary */
4851 			if ((*state)->key[PF_SK_WIRE] !=
4852 			    (*state)->key[PF_SK_STACK]) {
4853 				struct pf_state_key *nk =
4854 				    (*state)->key[pd->didx];
4855 
4856 				if (PF_ANEQ(pd2.src,
4857 				    &nk->addr[pd2.sidx], pd2.af))
4858 					pf_change_icmp(pd2.src, NULL, daddr,
4859 					    &nk->addr[pd2.sidx], 0, NULL,
4860 					    pd2.ip_sum, icmpsum,
4861 					    pd->ip_sum, 0, pd2.af);
4862 
4863 				if (PF_ANEQ(pd2.dst,
4864 				    &nk->addr[pd2.didx], pd2.af))
4865 					pf_change_icmp(pd2.src, NULL,
4866 					    NULL, /* XXX Inbound NAT? */
4867 					    &nk->addr[pd2.didx], 0, NULL,
4868 					    pd2.ip_sum, icmpsum,
4869 					    pd->ip_sum, 0, pd2.af);
4870 
4871 				switch (pd2.af) {
4872 #ifdef INET
4873 				case AF_INET:
4874 					m_copyback(m, off, ICMP_MINLEN,
4875 					    (caddr_t)pd->hdr.icmp);
4876 					m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
4877 					break;
4878 #endif /* INET */
4879 #ifdef INET6
4880 				case AF_INET6:
4881 					m_copyback(m, off,
4882 					    sizeof(struct icmp6_hdr),
4883 					    (caddr_t )pd->hdr.icmp6);
4884 					m_copyback(m, ipoff2, sizeof(h2_6),
4885 					    (caddr_t )&h2_6);
4886 					break;
4887 #endif /* INET6 */
4888 				}
4889 			}
4890 			return (PF_PASS);
4891 			break;
4892 		}
4893 		}
4894 	}
4895 }
4896 
4897 static int
4898 pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif,
4899     struct mbuf *m, struct pf_pdesc *pd)
4900 {
4901 	struct pf_state_peer	*src, *dst;
4902 	struct pf_state_key_cmp	 key;
4903 
4904 	bzero(&key, sizeof(key));
4905 	key.af = pd->af;
4906 	key.proto = pd->proto;
4907 	if (direction == PF_IN)	{
4908 		PF_ACPY(&key.addr[0], pd->src, key.af);
4909 		PF_ACPY(&key.addr[1], pd->dst, key.af);
4910 		key.port[0] = key.port[1] = 0;
4911 	} else {
4912 		PF_ACPY(&key.addr[1], pd->src, key.af);
4913 		PF_ACPY(&key.addr[0], pd->dst, key.af);
4914 		key.port[1] = key.port[0] = 0;
4915 	}
4916 
4917 	STATE_LOOKUP(kif, &key, direction, *state, pd);
4918 
4919 	if (direction == (*state)->direction) {
4920 		src = &(*state)->src;
4921 		dst = &(*state)->dst;
4922 	} else {
4923 		src = &(*state)->dst;
4924 		dst = &(*state)->src;
4925 	}
4926 
4927 	/* update states */
4928 	if (src->state < PFOTHERS_SINGLE)
4929 		src->state = PFOTHERS_SINGLE;
4930 	if (dst->state == PFOTHERS_SINGLE)
4931 		dst->state = PFOTHERS_MULTIPLE;
4932 
4933 	/* update expire time */
4934 	(*state)->expire = time_uptime;
4935 	if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
4936 		(*state)->timeout = PFTM_OTHER_MULTIPLE;
4937 	else
4938 		(*state)->timeout = PFTM_OTHER_SINGLE;
4939 
4940 	/* translate source/destination address, if necessary */
4941 	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
4942 		struct pf_state_key *nk = (*state)->key[pd->didx];
4943 
4944 		KASSERT(nk, ("%s: nk is null", __func__));
4945 		KASSERT(pd, ("%s: pd is null", __func__));
4946 		KASSERT(pd->src, ("%s: pd->src is null", __func__));
4947 		KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
4948 		switch (pd->af) {
4949 #ifdef INET
4950 		case AF_INET:
4951 			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
4952 				pf_change_a(&pd->src->v4.s_addr,
4953 				    pd->ip_sum,
4954 				    nk->addr[pd->sidx].v4.s_addr,
4955 				    0);
4956 
4957 
4958 			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
4959 				pf_change_a(&pd->dst->v4.s_addr,
4960 				    pd->ip_sum,
4961 				    nk->addr[pd->didx].v4.s_addr,
4962 				    0);
4963 
4964 				break;
4965 #endif /* INET */
4966 #ifdef INET6
4967 		case AF_INET6:
4968 			if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
4969 				PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
4970 
4971 			if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
4972 				PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
4973 #endif /* INET6 */
4974 		}
4975 	}
4976 	return (PF_PASS);
4977 }
4978 
4979 /*
4980  * ipoff and off are measured from the start of the mbuf chain.
4981  * h must be at "ipoff" on the mbuf chain.
4982  */
4983 void *
4984 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
4985     u_short *actionp, u_short *reasonp, sa_family_t af)
4986 {
4987 	switch (af) {
4988 #ifdef INET
4989 	case AF_INET: {
4990 		struct ip	*h = mtod(m, struct ip *);
4991 		u_int16_t	 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
4992 
4993 		if (fragoff) {
4994 			if (fragoff >= len)
4995 				ACTION_SET(actionp, PF_PASS);
4996 			else {
4997 				ACTION_SET(actionp, PF_DROP);
4998 				REASON_SET(reasonp, PFRES_FRAG);
4999 			}
5000 			return (NULL);
5001 		}
5002 		if (m->m_pkthdr.len < off + len ||
5003 		    ntohs(h->ip_len) < off + len) {
5004 			ACTION_SET(actionp, PF_DROP);
5005 			REASON_SET(reasonp, PFRES_SHORT);
5006 			return (NULL);
5007 		}
5008 		break;
5009 	}
5010 #endif /* INET */
5011 #ifdef INET6
5012 	case AF_INET6: {
5013 		struct ip6_hdr	*h = mtod(m, struct ip6_hdr *);
5014 
5015 		if (m->m_pkthdr.len < off + len ||
5016 		    (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
5017 		    (unsigned)(off + len)) {
5018 			ACTION_SET(actionp, PF_DROP);
5019 			REASON_SET(reasonp, PFRES_SHORT);
5020 			return (NULL);
5021 		}
5022 		break;
5023 	}
5024 #endif /* INET6 */
5025 	}
5026 	m_copydata(m, off, len, p);
5027 	return (p);
5028 }
5029 
5030 int
5031 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
5032     int rtableid)
5033 {
5034 #ifdef RADIX_MPATH
5035 	struct radix_node_head	*rnh;
5036 #endif
5037 	struct sockaddr_in	*dst;
5038 	int			 ret = 1;
5039 	int			 check_mpath;
5040 #ifdef INET6
5041 	struct sockaddr_in6	*dst6;
5042 	struct route_in6	 ro;
5043 #else
5044 	struct route		 ro;
5045 #endif
5046 	struct radix_node	*rn;
5047 	struct rtentry		*rt;
5048 	struct ifnet		*ifp;
5049 
5050 	check_mpath = 0;
5051 #ifdef RADIX_MPATH
5052 	/* XXX: stick to table 0 for now */
5053 	rnh = rt_tables_get_rnh(0, af);
5054 	if (rnh != NULL && rn_mpath_capable(rnh))
5055 		check_mpath = 1;
5056 #endif
5057 	bzero(&ro, sizeof(ro));
5058 	switch (af) {
5059 	case AF_INET:
5060 		dst = satosin(&ro.ro_dst);
5061 		dst->sin_family = AF_INET;
5062 		dst->sin_len = sizeof(*dst);
5063 		dst->sin_addr = addr->v4;
5064 		break;
5065 #ifdef INET6
5066 	case AF_INET6:
5067 		/*
5068 		 * Skip check for addresses with embedded interface scope,
5069 		 * as they would always match anyway.
5070 		 */
5071 		if (IN6_IS_SCOPE_EMBED(&addr->v6))
5072 			goto out;
5073 		dst6 = (struct sockaddr_in6 *)&ro.ro_dst;
5074 		dst6->sin6_family = AF_INET6;
5075 		dst6->sin6_len = sizeof(*dst6);
5076 		dst6->sin6_addr = addr->v6;
5077 		break;
5078 #endif /* INET6 */
5079 	default:
5080 		return (0);
5081 	}
5082 
5083 	/* Skip checks for ipsec interfaces */
5084 	if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
5085 		goto out;
5086 
5087 	switch (af) {
5088 #ifdef INET6
5089 	case AF_INET6:
5090 		in6_rtalloc_ign(&ro, 0, rtableid);
5091 		break;
5092 #endif
5093 #ifdef INET
5094 	case AF_INET:
5095 		in_rtalloc_ign((struct route *)&ro, 0, rtableid);
5096 		break;
5097 #endif
5098 	default:
5099 		rtalloc_ign((struct route *)&ro, 0);	/* No/default FIB. */
5100 		break;
5101 	}
5102 
5103 	if (ro.ro_rt != NULL) {
5104 		/* No interface given, this is a no-route check */
5105 		if (kif == NULL)
5106 			goto out;
5107 
5108 		if (kif->pfik_ifp == NULL) {
5109 			ret = 0;
5110 			goto out;
5111 		}
5112 
5113 		/* Perform uRPF check if passed input interface */
5114 		ret = 0;
5115 		rn = (struct radix_node *)ro.ro_rt;
5116 		do {
5117 			rt = (struct rtentry *)rn;
5118 			ifp = rt->rt_ifp;
5119 
5120 			if (kif->pfik_ifp == ifp)
5121 				ret = 1;
5122 #ifdef RADIX_MPATH
5123 			rn = rn_mpath_next(rn);
5124 #endif
5125 		} while (check_mpath == 1 && rn != NULL && ret == 0);
5126 	} else
5127 		ret = 0;
5128 out:
5129 	if (ro.ro_rt != NULL)
5130 		RTFREE(ro.ro_rt);
5131 	return (ret);
5132 }
5133 
5134 #ifdef INET
5135 static void
5136 pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5137     struct pf_state *s, struct pf_pdesc *pd)
5138 {
5139 	struct mbuf		*m0, *m1;
5140 	struct sockaddr_in	dst;
5141 	struct ip		*ip;
5142 	struct ifnet		*ifp = NULL;
5143 	struct pf_addr		 naddr;
5144 	struct pf_src_node	*sn = NULL;
5145 	int			 error = 0;
5146 	uint16_t		 ip_len, ip_off;
5147 
5148 	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5149 	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5150 	    __func__));
5151 
5152 	if ((pd->pf_mtag == NULL &&
5153 	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5154 	    pd->pf_mtag->routed++ > 3) {
5155 		m0 = *m;
5156 		*m = NULL;
5157 		goto bad_locked;
5158 	}
5159 
5160 	if (r->rt == PF_DUPTO) {
5161 		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5162 			if (s)
5163 				PF_STATE_UNLOCK(s);
5164 			return;
5165 		}
5166 	} else {
5167 		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5168 			if (s)
5169 				PF_STATE_UNLOCK(s);
5170 			return;
5171 		}
5172 		m0 = *m;
5173 	}
5174 
5175 	ip = mtod(m0, struct ip *);
5176 
5177 	bzero(&dst, sizeof(dst));
5178 	dst.sin_family = AF_INET;
5179 	dst.sin_len = sizeof(dst);
5180 	dst.sin_addr = ip->ip_dst;
5181 
5182 	if (r->rt == PF_FASTROUTE) {
5183 		struct rtentry *rt;
5184 
5185 		if (s)
5186 			PF_STATE_UNLOCK(s);
5187 		rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0));
5188 		if (rt == NULL) {
5189 			RTFREE_LOCKED(rt);
5190 			KMOD_IPSTAT_INC(ips_noroute);
5191 			error = EHOSTUNREACH;
5192 			goto bad;
5193 		}
5194 
5195 		ifp = rt->rt_ifp;
5196 		rt->rt_rmx.rmx_pksent++;
5197 
5198 		if (rt->rt_flags & RTF_GATEWAY)
5199 			bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst));
5200 		RTFREE_LOCKED(rt);
5201 	} else {
5202 		if (TAILQ_EMPTY(&r->rpool.list)) {
5203 			DPFPRINTF(PF_DEBUG_URGENT,
5204 			    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5205 			goto bad_locked;
5206 		}
5207 		if (s == NULL) {
5208 			pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
5209 			    &naddr, NULL, &sn);
5210 			if (!PF_AZERO(&naddr, AF_INET))
5211 				dst.sin_addr.s_addr = naddr.v4.s_addr;
5212 			ifp = r->rpool.cur->kif ?
5213 			    r->rpool.cur->kif->pfik_ifp : NULL;
5214 		} else {
5215 			if (!PF_AZERO(&s->rt_addr, AF_INET))
5216 				dst.sin_addr.s_addr =
5217 				    s->rt_addr.v4.s_addr;
5218 			ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5219 			PF_STATE_UNLOCK(s);
5220 		}
5221 	}
5222 	if (ifp == NULL)
5223 		goto bad;
5224 
5225 	if (oifp != ifp) {
5226 		if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5227 			goto bad;
5228 		else if (m0 == NULL)
5229 			goto done;
5230 		if (m0->m_len < sizeof(struct ip)) {
5231 			DPFPRINTF(PF_DEBUG_URGENT,
5232 			    ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
5233 			goto bad;
5234 		}
5235 		ip = mtod(m0, struct ip *);
5236 	}
5237 
5238 	if (ifp->if_flags & IFF_LOOPBACK)
5239 		m0->m_flags |= M_SKIP_FIREWALL;
5240 
5241 	ip_len = ntohs(ip->ip_len);
5242 	ip_off = ntohs(ip->ip_off);
5243 
5244 	/* Copied from FreeBSD 10.0-CURRENT ip_output. */
5245 	m0->m_pkthdr.csum_flags |= CSUM_IP;
5246 	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
5247 		in_delayed_cksum(m0);
5248 		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
5249 	}
5250 #ifdef SCTP
5251 	if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
5252 		sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2));
5253 		m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
5254 	}
5255 #endif
5256 
5257 	/*
5258 	 * If small enough for interface, or the interface will take
5259 	 * care of the fragmentation for us, we can just send directly.
5260 	 */
5261 	if (ip_len <= ifp->if_mtu ||
5262 	    (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 ||
5263 	    ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) {
5264 		ip->ip_sum = 0;
5265 		if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
5266 			ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
5267 			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
5268 		}
5269 		m0->m_flags &= ~(M_PROTOFLAGS);
5270 		error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5271 		goto done;
5272 	}
5273 
5274 	/* Balk when DF bit is set or the interface didn't support TSO. */
5275 	if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5276 		error = EMSGSIZE;
5277 		KMOD_IPSTAT_INC(ips_cantfrag);
5278 		if (r->rt != PF_DUPTO) {
5279 			icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
5280 			    ifp->if_mtu);
5281 			goto done;
5282 		} else
5283 			goto bad;
5284 	}
5285 
5286 	error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
5287 	if (error)
5288 		goto bad;
5289 
5290 	for (; m0; m0 = m1) {
5291 		m1 = m0->m_nextpkt;
5292 		m0->m_nextpkt = NULL;
5293 		if (error == 0) {
5294 			m0->m_flags &= ~(M_PROTOFLAGS);
5295 			error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL);
5296 		} else
5297 			m_freem(m0);
5298 	}
5299 
5300 	if (error == 0)
5301 		KMOD_IPSTAT_INC(ips_fragmented);
5302 
5303 done:
5304 	if (r->rt != PF_DUPTO)
5305 		*m = NULL;
5306 	return;
5307 
5308 bad_locked:
5309 	if (s)
5310 		PF_STATE_UNLOCK(s);
5311 bad:
5312 	m_freem(m0);
5313 	goto done;
5314 }
5315 #endif /* INET */
5316 
5317 #ifdef INET6
5318 static void
5319 pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp,
5320     struct pf_state *s, struct pf_pdesc *pd)
5321 {
5322 	struct mbuf		*m0;
5323 	struct sockaddr_in6	dst;
5324 	struct ip6_hdr		*ip6;
5325 	struct ifnet		*ifp = NULL;
5326 	struct pf_addr		 naddr;
5327 	struct pf_src_node	*sn = NULL;
5328 
5329 	KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
5330 	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
5331 	    __func__));
5332 
5333 	if ((pd->pf_mtag == NULL &&
5334 	    ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
5335 	    pd->pf_mtag->routed++ > 3) {
5336 		m0 = *m;
5337 		*m = NULL;
5338 		goto bad_locked;
5339 	}
5340 
5341 	if (r->rt == PF_DUPTO) {
5342 		if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) {
5343 			if (s)
5344 				PF_STATE_UNLOCK(s);
5345 			return;
5346 		}
5347 	} else {
5348 		if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
5349 			if (s)
5350 				PF_STATE_UNLOCK(s);
5351 			return;
5352 		}
5353 		m0 = *m;
5354 	}
5355 
5356 	ip6 = mtod(m0, struct ip6_hdr *);
5357 
5358 	bzero(&dst, sizeof(dst));
5359 	dst.sin6_family = AF_INET6;
5360 	dst.sin6_len = sizeof(dst);
5361 	dst.sin6_addr = ip6->ip6_dst;
5362 
5363 	/* Cheat. XXX why only in the v6 case??? */
5364 	if (r->rt == PF_FASTROUTE) {
5365 		if (s)
5366 			PF_STATE_UNLOCK(s);
5367 		m0->m_flags |= M_SKIP_FIREWALL;
5368 		ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL);
5369 		return;
5370 	}
5371 
5372 	if (TAILQ_EMPTY(&r->rpool.list)) {
5373 		DPFPRINTF(PF_DEBUG_URGENT,
5374 		    ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
5375 		goto bad_locked;
5376 	}
5377 	if (s == NULL) {
5378 		pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
5379 		    &naddr, NULL, &sn);
5380 		if (!PF_AZERO(&naddr, AF_INET6))
5381 			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5382 			    &naddr, AF_INET6);
5383 		ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
5384 	} else {
5385 		if (!PF_AZERO(&s->rt_addr, AF_INET6))
5386 			PF_ACPY((struct pf_addr *)&dst.sin6_addr,
5387 			    &s->rt_addr, AF_INET6);
5388 		ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
5389 	}
5390 
5391 	if (s)
5392 		PF_STATE_UNLOCK(s);
5393 
5394 	if (ifp == NULL)
5395 		goto bad;
5396 
5397 	if (oifp != ifp) {
5398 		if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS)
5399 			goto bad;
5400 		else if (m0 == NULL)
5401 			goto done;
5402 		if (m0->m_len < sizeof(struct ip6_hdr)) {
5403 			DPFPRINTF(PF_DEBUG_URGENT,
5404 			    ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
5405 			    __func__));
5406 			goto bad;
5407 		}
5408 		ip6 = mtod(m0, struct ip6_hdr *);
5409 	}
5410 
5411 	if (ifp->if_flags & IFF_LOOPBACK)
5412 		m0->m_flags |= M_SKIP_FIREWALL;
5413 
5414 	/*
5415 	 * If the packet is too large for the outgoing interface,
5416 	 * send back an icmp6 error.
5417 	 */
5418 	if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
5419 		dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
5420 	if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu)
5421 		nd6_output(ifp, ifp, m0, &dst, NULL);
5422 	else {
5423 		in6_ifstat_inc(ifp, ifs6_in_toobig);
5424 		if (r->rt != PF_DUPTO)
5425 			icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
5426 		else
5427 			goto bad;
5428 	}
5429 
5430 done:
5431 	if (r->rt != PF_DUPTO)
5432 		*m = NULL;
5433 	return;
5434 
5435 bad_locked:
5436 	if (s)
5437 		PF_STATE_UNLOCK(s);
5438 bad:
5439 	m_freem(m0);
5440 	goto done;
5441 }
5442 #endif /* INET6 */
5443 
5444 /*
5445  * FreeBSD supports cksum offloads for the following drivers.
5446  *  em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4),
5447  *   ti(4), txp(4), xl(4)
5448  *
5449  * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
5450  *  network driver performed cksum including pseudo header, need to verify
5451  *   csum_data
5452  * CSUM_DATA_VALID :
5453  *  network driver performed cksum, needs to additional pseudo header
5454  *  cksum computation with partial csum_data(i.e. lack of H/W support for
5455  *  pseudo header, for instance hme(4), sk(4) and possibly gem(4))
5456  *
5457  * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
5458  * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
5459  * TCP/UDP layer.
5460  * Also, set csum_data to 0xffff to force cksum validation.
5461  */
5462 static int
5463 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
5464 {
5465 	u_int16_t sum = 0;
5466 	int hw_assist = 0;
5467 	struct ip *ip;
5468 
5469 	if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
5470 		return (1);
5471 	if (m->m_pkthdr.len < off + len)
5472 		return (1);
5473 
5474 	switch (p) {
5475 	case IPPROTO_TCP:
5476 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5477 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5478 				sum = m->m_pkthdr.csum_data;
5479 			} else {
5480 				ip = mtod(m, struct ip *);
5481 				sum = in_pseudo(ip->ip_src.s_addr,
5482 				ip->ip_dst.s_addr, htonl((u_short)len +
5483 				m->m_pkthdr.csum_data + IPPROTO_TCP));
5484 			}
5485 			sum ^= 0xffff;
5486 			++hw_assist;
5487 		}
5488 		break;
5489 	case IPPROTO_UDP:
5490 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
5491 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
5492 				sum = m->m_pkthdr.csum_data;
5493 			} else {
5494 				ip = mtod(m, struct ip *);
5495 				sum = in_pseudo(ip->ip_src.s_addr,
5496 				ip->ip_dst.s_addr, htonl((u_short)len +
5497 				m->m_pkthdr.csum_data + IPPROTO_UDP));
5498 			}
5499 			sum ^= 0xffff;
5500 			++hw_assist;
5501 		}
5502 		break;
5503 	case IPPROTO_ICMP:
5504 #ifdef INET6
5505 	case IPPROTO_ICMPV6:
5506 #endif /* INET6 */
5507 		break;
5508 	default:
5509 		return (1);
5510 	}
5511 
5512 	if (!hw_assist) {
5513 		switch (af) {
5514 		case AF_INET:
5515 			if (p == IPPROTO_ICMP) {
5516 				if (m->m_len < off)
5517 					return (1);
5518 				m->m_data += off;
5519 				m->m_len -= off;
5520 				sum = in_cksum(m, len);
5521 				m->m_data -= off;
5522 				m->m_len += off;
5523 			} else {
5524 				if (m->m_len < sizeof(struct ip))
5525 					return (1);
5526 				sum = in4_cksum(m, p, off, len);
5527 			}
5528 			break;
5529 #ifdef INET6
5530 		case AF_INET6:
5531 			if (m->m_len < sizeof(struct ip6_hdr))
5532 				return (1);
5533 			sum = in6_cksum(m, p, off, len);
5534 			break;
5535 #endif /* INET6 */
5536 		default:
5537 			return (1);
5538 		}
5539 	}
5540 	if (sum) {
5541 		switch (p) {
5542 		case IPPROTO_TCP:
5543 		    {
5544 			KMOD_TCPSTAT_INC(tcps_rcvbadsum);
5545 			break;
5546 		    }
5547 		case IPPROTO_UDP:
5548 		    {
5549 			KMOD_UDPSTAT_INC(udps_badsum);
5550 			break;
5551 		    }
5552 #ifdef INET
5553 		case IPPROTO_ICMP:
5554 		    {
5555 			KMOD_ICMPSTAT_INC(icps_checksum);
5556 			break;
5557 		    }
5558 #endif
5559 #ifdef INET6
5560 		case IPPROTO_ICMPV6:
5561 		    {
5562 			KMOD_ICMP6STAT_INC(icp6s_checksum);
5563 			break;
5564 		    }
5565 #endif /* INET6 */
5566 		}
5567 		return (1);
5568 	} else {
5569 		if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
5570 			m->m_pkthdr.csum_flags |=
5571 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
5572 			m->m_pkthdr.csum_data = 0xffff;
5573 		}
5574 	}
5575 	return (0);
5576 }
5577 
5578 
5579 #ifdef INET
5580 int
5581 pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5582 {
5583 	struct pfi_kif		*kif;
5584 	u_short			 action, reason = 0, log = 0;
5585 	struct mbuf		*m = *m0;
5586 	struct ip		*h = NULL;
5587 	struct m_tag		*ipfwtag;
5588 	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5589 	struct pf_state		*s = NULL;
5590 	struct pf_ruleset	*ruleset = NULL;
5591 	struct pf_pdesc		 pd;
5592 	int			 off, dirndx, pqid = 0;
5593 
5594 	M_ASSERTPKTHDR(m);
5595 
5596 	if (!V_pf_status.running)
5597 		return (PF_PASS);
5598 
5599 	memset(&pd, 0, sizeof(pd));
5600 
5601 	kif = (struct pfi_kif *)ifp->if_pf_kif;
5602 
5603 	if (kif == NULL) {
5604 		DPFPRINTF(PF_DEBUG_URGENT,
5605 		    ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
5606 		return (PF_DROP);
5607 	}
5608 	if (kif->pfik_flags & PFI_IFLAG_SKIP)
5609 		return (PF_PASS);
5610 
5611 	if (m->m_flags & M_SKIP_FIREWALL)
5612 		return (PF_PASS);
5613 
5614 	pd.pf_mtag = pf_find_mtag(m);
5615 
5616 	PF_RULES_RLOCK();
5617 
5618 	if (ip_divert_ptr != NULL &&
5619 	    ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
5620 		struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
5621 		if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
5622 			if (pd.pf_mtag == NULL &&
5623 			    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5624 				action = PF_DROP;
5625 				goto done;
5626 			}
5627 			pd.pf_mtag->flags |= PF_PACKET_LOOPED;
5628 			m_tag_delete(m, ipfwtag);
5629 		}
5630 		if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
5631 			m->m_flags |= M_FASTFWD_OURS;
5632 			pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
5633 		}
5634 	} else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
5635 		/* We do IP header normalization and packet reassembly here */
5636 		action = PF_DROP;
5637 		goto done;
5638 	}
5639 	m = *m0;	/* pf_normalize messes with m0 */
5640 	h = mtod(m, struct ip *);
5641 
5642 	off = h->ip_hl << 2;
5643 	if (off < (int)sizeof(struct ip)) {
5644 		action = PF_DROP;
5645 		REASON_SET(&reason, PFRES_SHORT);
5646 		log = 1;
5647 		goto done;
5648 	}
5649 
5650 	pd.src = (struct pf_addr *)&h->ip_src;
5651 	pd.dst = (struct pf_addr *)&h->ip_dst;
5652 	pd.sport = pd.dport = NULL;
5653 	pd.ip_sum = &h->ip_sum;
5654 	pd.proto_sum = NULL;
5655 	pd.proto = h->ip_p;
5656 	pd.dir = dir;
5657 	pd.sidx = (dir == PF_IN) ? 0 : 1;
5658 	pd.didx = (dir == PF_IN) ? 1 : 0;
5659 	pd.af = AF_INET;
5660 	pd.tos = h->ip_tos;
5661 	pd.tot_len = ntohs(h->ip_len);
5662 
5663 	/* handle fragments that didn't get reassembled by normalization */
5664 	if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
5665 		action = pf_test_fragment(&r, dir, kif, m, h,
5666 		    &pd, &a, &ruleset);
5667 		goto done;
5668 	}
5669 
5670 	switch (h->ip_p) {
5671 
5672 	case IPPROTO_TCP: {
5673 		struct tcphdr	th;
5674 
5675 		pd.hdr.tcp = &th;
5676 		if (!pf_pull_hdr(m, off, &th, sizeof(th),
5677 		    &action, &reason, AF_INET)) {
5678 			log = action != PF_PASS;
5679 			goto done;
5680 		}
5681 		pd.p_len = pd.tot_len - off - (th.th_off << 2);
5682 		if ((th.th_flags & TH_ACK) && pd.p_len == 0)
5683 			pqid = 1;
5684 		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
5685 		if (action == PF_DROP)
5686 			goto done;
5687 		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
5688 		    &reason);
5689 		if (action == PF_PASS) {
5690 			if (pfsync_update_state_ptr != NULL)
5691 				pfsync_update_state_ptr(s);
5692 			r = s->rule.ptr;
5693 			a = s->anchor.ptr;
5694 			log = s->log;
5695 		} else if (s == NULL)
5696 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5697 			    &a, &ruleset, inp);
5698 		break;
5699 	}
5700 
5701 	case IPPROTO_UDP: {
5702 		struct udphdr	uh;
5703 
5704 		pd.hdr.udp = &uh;
5705 		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
5706 		    &action, &reason, AF_INET)) {
5707 			log = action != PF_PASS;
5708 			goto done;
5709 		}
5710 		if (uh.uh_dport == 0 ||
5711 		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
5712 		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
5713 			action = PF_DROP;
5714 			REASON_SET(&reason, PFRES_SHORT);
5715 			goto done;
5716 		}
5717 		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
5718 		if (action == PF_PASS) {
5719 			if (pfsync_update_state_ptr != NULL)
5720 				pfsync_update_state_ptr(s);
5721 			r = s->rule.ptr;
5722 			a = s->anchor.ptr;
5723 			log = s->log;
5724 		} else if (s == NULL)
5725 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5726 			    &a, &ruleset, inp);
5727 		break;
5728 	}
5729 
5730 	case IPPROTO_ICMP: {
5731 		struct icmp	ih;
5732 
5733 		pd.hdr.icmp = &ih;
5734 		if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN,
5735 		    &action, &reason, AF_INET)) {
5736 			log = action != PF_PASS;
5737 			goto done;
5738 		}
5739 		action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd,
5740 		    &reason);
5741 		if (action == PF_PASS) {
5742 			if (pfsync_update_state_ptr != NULL)
5743 				pfsync_update_state_ptr(s);
5744 			r = s->rule.ptr;
5745 			a = s->anchor.ptr;
5746 			log = s->log;
5747 		} else if (s == NULL)
5748 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5749 			    &a, &ruleset, inp);
5750 		break;
5751 	}
5752 
5753 #ifdef INET6
5754 	case IPPROTO_ICMPV6: {
5755 		action = PF_DROP;
5756 		DPFPRINTF(PF_DEBUG_MISC,
5757 		    ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
5758 		goto done;
5759 	}
5760 #endif
5761 
5762 	default:
5763 		action = pf_test_state_other(&s, dir, kif, m, &pd);
5764 		if (action == PF_PASS) {
5765 			if (pfsync_update_state_ptr != NULL)
5766 				pfsync_update_state_ptr(s);
5767 			r = s->rule.ptr;
5768 			a = s->anchor.ptr;
5769 			log = s->log;
5770 		} else if (s == NULL)
5771 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
5772 			    &a, &ruleset, inp);
5773 		break;
5774 	}
5775 
5776 done:
5777 	PF_RULES_RUNLOCK();
5778 	if (action == PF_PASS && h->ip_hl > 5 &&
5779 	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
5780 		action = PF_DROP;
5781 		REASON_SET(&reason, PFRES_IPOPTIONS);
5782 		log = 1;
5783 		DPFPRINTF(PF_DEBUG_MISC,
5784 		    ("pf: dropping packet with ip options\n"));
5785 	}
5786 
5787 	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
5788 		action = PF_DROP;
5789 		REASON_SET(&reason, PFRES_MEMORY);
5790 	}
5791 	if (r->rtableid >= 0)
5792 		M_SETFIB(m, r->rtableid);
5793 
5794 #ifdef ALTQ
5795 	if (action == PF_PASS && r->qid) {
5796 		if (pd.pf_mtag == NULL &&
5797 		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5798 			action = PF_DROP;
5799 			REASON_SET(&reason, PFRES_MEMORY);
5800 		}
5801 		if (pqid || (pd.tos & IPTOS_LOWDELAY))
5802 			pd.pf_mtag->qid = r->pqid;
5803 		else
5804 			pd.pf_mtag->qid = r->qid;
5805 		/* add hints for ecn */
5806 		pd.pf_mtag->hdr = h;
5807 
5808 	}
5809 #endif /* ALTQ */
5810 
5811 	/*
5812 	 * connections redirected to loopback should not match sockets
5813 	 * bound specifically to loopback due to security implications,
5814 	 * see tcp_input() and in_pcblookup_listen().
5815 	 */
5816 	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
5817 	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
5818 	    (s->nat_rule.ptr->action == PF_RDR ||
5819 	    s->nat_rule.ptr->action == PF_BINAT) &&
5820 	    (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)
5821 		m->m_flags |= M_SKIP_FIREWALL;
5822 
5823 	if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL &&
5824 	    !PACKET_LOOPED(&pd)) {
5825 
5826 		ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0,
5827 		    sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO);
5828 		if (ipfwtag != NULL) {
5829 			((struct ipfw_rule_ref *)(ipfwtag+1))->info =
5830 			    ntohs(r->divert.port);
5831 			((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir;
5832 
5833 			if (s)
5834 				PF_STATE_UNLOCK(s);
5835 
5836 			m_tag_prepend(m, ipfwtag);
5837 			if (m->m_flags & M_FASTFWD_OURS) {
5838 				if (pd.pf_mtag == NULL &&
5839 				    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
5840 					action = PF_DROP;
5841 					REASON_SET(&reason, PFRES_MEMORY);
5842 					log = 1;
5843 					DPFPRINTF(PF_DEBUG_MISC,
5844 					    ("pf: failed to allocate tag\n"));
5845 				}
5846 				pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT;
5847 				m->m_flags &= ~M_FASTFWD_OURS;
5848 			}
5849 			ip_divert_ptr(*m0, dir ==  PF_IN ? DIR_IN : DIR_OUT);
5850 			*m0 = NULL;
5851 
5852 			return (action);
5853 		} else {
5854 			/* XXX: ipfw has the same behaviour! */
5855 			action = PF_DROP;
5856 			REASON_SET(&reason, PFRES_MEMORY);
5857 			log = 1;
5858 			DPFPRINTF(PF_DEBUG_MISC,
5859 			    ("pf: failed to allocate divert tag\n"));
5860 		}
5861 	}
5862 
5863 	if (log) {
5864 		struct pf_rule *lr;
5865 
5866 		if (s != NULL && s->nat_rule.ptr != NULL &&
5867 		    s->nat_rule.ptr->log & PF_LOG_ALL)
5868 			lr = s->nat_rule.ptr;
5869 		else
5870 			lr = r;
5871 		PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd,
5872 		    (s == NULL));
5873 	}
5874 
5875 	kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
5876 	kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++;
5877 
5878 	if (action == PF_PASS || r->action == PF_DROP) {
5879 		dirndx = (dir == PF_OUT);
5880 		r->packets[dirndx]++;
5881 		r->bytes[dirndx] += pd.tot_len;
5882 		if (a != NULL) {
5883 			a->packets[dirndx]++;
5884 			a->bytes[dirndx] += pd.tot_len;
5885 		}
5886 		if (s != NULL) {
5887 			if (s->nat_rule.ptr != NULL) {
5888 				s->nat_rule.ptr->packets[dirndx]++;
5889 				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
5890 			}
5891 			if (s->src_node != NULL) {
5892 				s->src_node->packets[dirndx]++;
5893 				s->src_node->bytes[dirndx] += pd.tot_len;
5894 			}
5895 			if (s->nat_src_node != NULL) {
5896 				s->nat_src_node->packets[dirndx]++;
5897 				s->nat_src_node->bytes[dirndx] += pd.tot_len;
5898 			}
5899 			dirndx = (dir == s->direction) ? 0 : 1;
5900 			s->packets[dirndx]++;
5901 			s->bytes[dirndx] += pd.tot_len;
5902 		}
5903 		tr = r;
5904 		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
5905 		if (nr != NULL && r == &V_pf_default_rule)
5906 			tr = nr;
5907 		if (tr->src.addr.type == PF_ADDR_TABLE)
5908 			pfr_update_stats(tr->src.addr.p.tbl,
5909 			    (s == NULL) ? pd.src :
5910 			    &s->key[(s->direction == PF_IN)]->
5911 				addr[(s->direction == PF_OUT)],
5912 			    pd.af, pd.tot_len, dir == PF_OUT,
5913 			    r->action == PF_PASS, tr->src.neg);
5914 		if (tr->dst.addr.type == PF_ADDR_TABLE)
5915 			pfr_update_stats(tr->dst.addr.p.tbl,
5916 			    (s == NULL) ? pd.dst :
5917 			    &s->key[(s->direction == PF_IN)]->
5918 				addr[(s->direction == PF_IN)],
5919 			    pd.af, pd.tot_len, dir == PF_OUT,
5920 			    r->action == PF_PASS, tr->dst.neg);
5921 	}
5922 
5923 	switch (action) {
5924 	case PF_SYNPROXY_DROP:
5925 		m_freem(*m0);
5926 	case PF_DEFER:
5927 		*m0 = NULL;
5928 		action = PF_PASS;
5929 		break;
5930 	default:
5931 		/* pf_route() returns unlocked. */
5932 		if (r->rt) {
5933 			pf_route(m0, r, dir, kif->pfik_ifp, s, &pd);
5934 			return (action);
5935 		}
5936 		break;
5937 	}
5938 	if (s)
5939 		PF_STATE_UNLOCK(s);
5940 
5941 	return (action);
5942 }
5943 #endif /* INET */
5944 
5945 #ifdef INET6
5946 int
5947 pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
5948 {
5949 	struct pfi_kif		*kif;
5950 	u_short			 action, reason = 0, log = 0;
5951 	struct mbuf		*m = *m0, *n = NULL;
5952 	struct ip6_hdr		*h = NULL;
5953 	struct pf_rule		*a = NULL, *r = &V_pf_default_rule, *tr, *nr;
5954 	struct pf_state		*s = NULL;
5955 	struct pf_ruleset	*ruleset = NULL;
5956 	struct pf_pdesc		 pd;
5957 	int			 off, terminal = 0, dirndx, rh_cnt = 0;
5958 
5959 	M_ASSERTPKTHDR(m);
5960 
5961 	if (!V_pf_status.running)
5962 		return (PF_PASS);
5963 
5964 	memset(&pd, 0, sizeof(pd));
5965 	pd.pf_mtag = pf_find_mtag(m);
5966 
5967 	if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED)
5968 		return (PF_PASS);
5969 
5970 	kif = (struct pfi_kif *)ifp->if_pf_kif;
5971 	if (kif == NULL) {
5972 		DPFPRINTF(PF_DEBUG_URGENT,
5973 		    ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname));
5974 		return (PF_DROP);
5975 	}
5976 	if (kif->pfik_flags & PFI_IFLAG_SKIP)
5977 		return (PF_PASS);
5978 
5979 	PF_RULES_RLOCK();
5980 
5981 	/* We do IP header normalization and packet reassembly here */
5982 	if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) {
5983 		action = PF_DROP;
5984 		goto done;
5985 	}
5986 	m = *m0;	/* pf_normalize messes with m0 */
5987 	h = mtod(m, struct ip6_hdr *);
5988 
5989 #if 1
5990 	/*
5991 	 * we do not support jumbogram yet.  if we keep going, zero ip6_plen
5992 	 * will do something bad, so drop the packet for now.
5993 	 */
5994 	if (htons(h->ip6_plen) == 0) {
5995 		action = PF_DROP;
5996 		REASON_SET(&reason, PFRES_NORM);	/*XXX*/
5997 		goto done;
5998 	}
5999 #endif
6000 
6001 	pd.src = (struct pf_addr *)&h->ip6_src;
6002 	pd.dst = (struct pf_addr *)&h->ip6_dst;
6003 	pd.sport = pd.dport = NULL;
6004 	pd.ip_sum = NULL;
6005 	pd.proto_sum = NULL;
6006 	pd.dir = dir;
6007 	pd.sidx = (dir == PF_IN) ? 0 : 1;
6008 	pd.didx = (dir == PF_IN) ? 1 : 0;
6009 	pd.af = AF_INET6;
6010 	pd.tos = 0;
6011 	pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6012 
6013 	off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr);
6014 	pd.proto = h->ip6_nxt;
6015 	do {
6016 		switch (pd.proto) {
6017 		case IPPROTO_FRAGMENT:
6018 			action = pf_test_fragment(&r, dir, kif, m, h,
6019 			    &pd, &a, &ruleset);
6020 			if (action == PF_DROP)
6021 				REASON_SET(&reason, PFRES_FRAG);
6022 			goto done;
6023 		case IPPROTO_ROUTING: {
6024 			struct ip6_rthdr rthdr;
6025 
6026 			if (rh_cnt++) {
6027 				DPFPRINTF(PF_DEBUG_MISC,
6028 				    ("pf: IPv6 more than one rthdr\n"));
6029 				action = PF_DROP;
6030 				REASON_SET(&reason, PFRES_IPOPTIONS);
6031 				log = 1;
6032 				goto done;
6033 			}
6034 			if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL,
6035 			    &reason, pd.af)) {
6036 				DPFPRINTF(PF_DEBUG_MISC,
6037 				    ("pf: IPv6 short rthdr\n"));
6038 				action = PF_DROP;
6039 				REASON_SET(&reason, PFRES_SHORT);
6040 				log = 1;
6041 				goto done;
6042 			}
6043 			if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
6044 				DPFPRINTF(PF_DEBUG_MISC,
6045 				    ("pf: IPv6 rthdr0\n"));
6046 				action = PF_DROP;
6047 				REASON_SET(&reason, PFRES_IPOPTIONS);
6048 				log = 1;
6049 				goto done;
6050 			}
6051 			/* FALLTHROUGH */
6052 		}
6053 		case IPPROTO_AH:
6054 		case IPPROTO_HOPOPTS:
6055 		case IPPROTO_DSTOPTS: {
6056 			/* get next header and header length */
6057 			struct ip6_ext	opt6;
6058 
6059 			if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6),
6060 			    NULL, &reason, pd.af)) {
6061 				DPFPRINTF(PF_DEBUG_MISC,
6062 				    ("pf: IPv6 short opt\n"));
6063 				action = PF_DROP;
6064 				log = 1;
6065 				goto done;
6066 			}
6067 			if (pd.proto == IPPROTO_AH)
6068 				off += (opt6.ip6e_len + 2) * 4;
6069 			else
6070 				off += (opt6.ip6e_len + 1) * 8;
6071 			pd.proto = opt6.ip6e_nxt;
6072 			/* goto the next header */
6073 			break;
6074 		}
6075 		default:
6076 			terminal++;
6077 			break;
6078 		}
6079 	} while (!terminal);
6080 
6081 	/* if there's no routing header, use unmodified mbuf for checksumming */
6082 	if (!n)
6083 		n = m;
6084 
6085 	switch (pd.proto) {
6086 
6087 	case IPPROTO_TCP: {
6088 		struct tcphdr	th;
6089 
6090 		pd.hdr.tcp = &th;
6091 		if (!pf_pull_hdr(m, off, &th, sizeof(th),
6092 		    &action, &reason, AF_INET6)) {
6093 			log = action != PF_PASS;
6094 			goto done;
6095 		}
6096 		pd.p_len = pd.tot_len - off - (th.th_off << 2);
6097 		action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
6098 		if (action == PF_DROP)
6099 			goto done;
6100 		action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
6101 		    &reason);
6102 		if (action == PF_PASS) {
6103 			if (pfsync_update_state_ptr != NULL)
6104 				pfsync_update_state_ptr(s);
6105 			r = s->rule.ptr;
6106 			a = s->anchor.ptr;
6107 			log = s->log;
6108 		} else if (s == NULL)
6109 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6110 			    &a, &ruleset, inp);
6111 		break;
6112 	}
6113 
6114 	case IPPROTO_UDP: {
6115 		struct udphdr	uh;
6116 
6117 		pd.hdr.udp = &uh;
6118 		if (!pf_pull_hdr(m, off, &uh, sizeof(uh),
6119 		    &action, &reason, AF_INET6)) {
6120 			log = action != PF_PASS;
6121 			goto done;
6122 		}
6123 		if (uh.uh_dport == 0 ||
6124 		    ntohs(uh.uh_ulen) > m->m_pkthdr.len - off ||
6125 		    ntohs(uh.uh_ulen) < sizeof(struct udphdr)) {
6126 			action = PF_DROP;
6127 			REASON_SET(&reason, PFRES_SHORT);
6128 			goto done;
6129 		}
6130 		action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
6131 		if (action == PF_PASS) {
6132 			if (pfsync_update_state_ptr != NULL)
6133 				pfsync_update_state_ptr(s);
6134 			r = s->rule.ptr;
6135 			a = s->anchor.ptr;
6136 			log = s->log;
6137 		} else if (s == NULL)
6138 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6139 			    &a, &ruleset, inp);
6140 		break;
6141 	}
6142 
6143 	case IPPROTO_ICMP: {
6144 		action = PF_DROP;
6145 		DPFPRINTF(PF_DEBUG_MISC,
6146 		    ("pf: dropping IPv6 packet with ICMPv4 payload\n"));
6147 		goto done;
6148 	}
6149 
6150 	case IPPROTO_ICMPV6: {
6151 		struct icmp6_hdr	ih;
6152 
6153 		pd.hdr.icmp6 = &ih;
6154 		if (!pf_pull_hdr(m, off, &ih, sizeof(ih),
6155 		    &action, &reason, AF_INET6)) {
6156 			log = action != PF_PASS;
6157 			goto done;
6158 		}
6159 		action = pf_test_state_icmp(&s, dir, kif,
6160 		    m, off, h, &pd, &reason);
6161 		if (action == PF_PASS) {
6162 			if (pfsync_update_state_ptr != NULL)
6163 				pfsync_update_state_ptr(s);
6164 			r = s->rule.ptr;
6165 			a = s->anchor.ptr;
6166 			log = s->log;
6167 		} else if (s == NULL)
6168 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6169 			    &a, &ruleset, inp);
6170 		break;
6171 	}
6172 
6173 	default:
6174 		action = pf_test_state_other(&s, dir, kif, m, &pd);
6175 		if (action == PF_PASS) {
6176 			if (pfsync_update_state_ptr != NULL)
6177 				pfsync_update_state_ptr(s);
6178 			r = s->rule.ptr;
6179 			a = s->anchor.ptr;
6180 			log = s->log;
6181 		} else if (s == NULL)
6182 			action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
6183 			    &a, &ruleset, inp);
6184 		break;
6185 	}
6186 
6187 done:
6188 	PF_RULES_RUNLOCK();
6189 	if (n != m) {
6190 		m_freem(n);
6191 		n = NULL;
6192 	}
6193 
6194 	/* handle dangerous IPv6 extension headers. */
6195 	if (action == PF_PASS && rh_cnt &&
6196 	    !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) {
6197 		action = PF_DROP;
6198 		REASON_SET(&reason, PFRES_IPOPTIONS);
6199 		log = 1;
6200 		DPFPRINTF(PF_DEBUG_MISC,
6201 		    ("pf: dropping packet with dangerous v6 headers\n"));
6202 	}
6203 
6204 	if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) {
6205 		action = PF_DROP;
6206 		REASON_SET(&reason, PFRES_MEMORY);
6207 	}
6208 	if (r->rtableid >= 0)
6209 		M_SETFIB(m, r->rtableid);
6210 
6211 #ifdef ALTQ
6212 	if (action == PF_PASS && r->qid) {
6213 		if (pd.pf_mtag == NULL &&
6214 		    ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
6215 			action = PF_DROP;
6216 			REASON_SET(&reason, PFRES_MEMORY);
6217 		}
6218 		if (pd.tos & IPTOS_LOWDELAY)
6219 			pd.pf_mtag->qid = r->pqid;
6220 		else
6221 			pd.pf_mtag->qid = r->qid;
6222 		/* add hints for ecn */
6223 		pd.pf_mtag->hdr = h;
6224 	}
6225 #endif /* ALTQ */
6226 
6227 	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
6228 	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL &&
6229 	    (s->nat_rule.ptr->action == PF_RDR ||
6230 	    s->nat_rule.ptr->action == PF_BINAT) &&
6231 	    IN6_IS_ADDR_LOOPBACK(&pd.dst->v6))
6232 		m->m_flags |= M_SKIP_FIREWALL;
6233 
6234 	/* XXX: Anybody working on it?! */
6235 	if (r->divert.port)
6236 		printf("pf: divert(9) is not supported for IPv6\n");
6237 
6238 	if (log) {
6239 		struct pf_rule *lr;
6240 
6241 		if (s != NULL && s->nat_rule.ptr != NULL &&
6242 		    s->nat_rule.ptr->log & PF_LOG_ALL)
6243 			lr = s->nat_rule.ptr;
6244 		else
6245 			lr = r;
6246 		PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset,
6247 		    &pd, (s == NULL));
6248 	}
6249 
6250 	kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len;
6251 	kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++;
6252 
6253 	if (action == PF_PASS || r->action == PF_DROP) {
6254 		dirndx = (dir == PF_OUT);
6255 		r->packets[dirndx]++;
6256 		r->bytes[dirndx] += pd.tot_len;
6257 		if (a != NULL) {
6258 			a->packets[dirndx]++;
6259 			a->bytes[dirndx] += pd.tot_len;
6260 		}
6261 		if (s != NULL) {
6262 			if (s->nat_rule.ptr != NULL) {
6263 				s->nat_rule.ptr->packets[dirndx]++;
6264 				s->nat_rule.ptr->bytes[dirndx] += pd.tot_len;
6265 			}
6266 			if (s->src_node != NULL) {
6267 				s->src_node->packets[dirndx]++;
6268 				s->src_node->bytes[dirndx] += pd.tot_len;
6269 			}
6270 			if (s->nat_src_node != NULL) {
6271 				s->nat_src_node->packets[dirndx]++;
6272 				s->nat_src_node->bytes[dirndx] += pd.tot_len;
6273 			}
6274 			dirndx = (dir == s->direction) ? 0 : 1;
6275 			s->packets[dirndx]++;
6276 			s->bytes[dirndx] += pd.tot_len;
6277 		}
6278 		tr = r;
6279 		nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule;
6280 		if (nr != NULL && r == &V_pf_default_rule)
6281 			tr = nr;
6282 		if (tr->src.addr.type == PF_ADDR_TABLE)
6283 			pfr_update_stats(tr->src.addr.p.tbl,
6284 			    (s == NULL) ? pd.src :
6285 			    &s->key[(s->direction == PF_IN)]->addr[0],
6286 			    pd.af, pd.tot_len, dir == PF_OUT,
6287 			    r->action == PF_PASS, tr->src.neg);
6288 		if (tr->dst.addr.type == PF_ADDR_TABLE)
6289 			pfr_update_stats(tr->dst.addr.p.tbl,
6290 			    (s == NULL) ? pd.dst :
6291 			    &s->key[(s->direction == PF_IN)]->addr[1],
6292 			    pd.af, pd.tot_len, dir == PF_OUT,
6293 			    r->action == PF_PASS, tr->dst.neg);
6294 	}
6295 
6296 	switch (action) {
6297 	case PF_SYNPROXY_DROP:
6298 		m_freem(*m0);
6299 	case PF_DEFER:
6300 		*m0 = NULL;
6301 		action = PF_PASS;
6302 		break;
6303 	default:
6304 		/* pf_route6() returns unlocked. */
6305 		if (r->rt) {
6306 			pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd);
6307 			return (action);
6308 		}
6309 		break;
6310 	}
6311 
6312 	if (s)
6313 		PF_STATE_UNLOCK(s);
6314 
6315 	return (action);
6316 }
6317 #endif /* INET6 */
6318