xref: /freebsd/sys/netpfil/pf/pf.c (revision 58571f3ea37c3e5019d39591aa09c1723c00face)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002 - 2008 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_bpf.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 #include "opt_pf.h"
45 #include "opt_sctp.h"
46 
47 #include <sys/param.h>
48 #include <sys/bus.h>
49 #include <sys/endian.h>
50 #include <sys/gsb_crc32.h>
51 #include <sys/hash.h>
52 #include <sys/interrupt.h>
53 #include <sys/kernel.h>
54 #include <sys/kthread.h>
55 #include <sys/limits.h>
56 #include <sys/mbuf.h>
57 #include <sys/random.h>
58 #include <sys/refcount.h>
59 #include <sys/sdt.h>
60 #include <sys/socket.h>
61 #include <sys/sysctl.h>
62 #include <sys/taskqueue.h>
63 #include <sys/ucred.h>
64 
65 #include <crypto/sha2/sha512.h>
66 
67 #include <net/if.h>
68 #include <net/if_var.h>
69 #include <net/if_private.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
74 #include <net/vnet.h>
75 
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pflog.h>
79 #include <net/if_pfsync.h>
80 
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/in_fib.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_fw.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
96 
97 /* dummynet */
98 #include <netinet/ip_dummynet.h>
99 #include <netinet/ip_fw.h>
100 #include <netpfil/ipfw/dn_heap.h>
101 #include <netpfil/ipfw/ip_fw_private.h>
102 #include <netpfil/ipfw/ip_dn_private.h>
103 
104 #ifdef INET6
105 #include <netinet/ip6.h>
106 #include <netinet/icmp6.h>
107 #include <netinet6/nd6.h>
108 #include <netinet6/ip6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet6/in6_fib.h>
111 #include <netinet6/scope6_var.h>
112 #endif /* INET6 */
113 
114 #include <netinet/sctp_header.h>
115 #include <netinet/sctp_crc32.h>
116 
117 #include <netipsec/ah.h>
118 
119 #include <machine/in_cksum.h>
120 #include <security/mac/mac_framework.h>
121 
122 #define	DPFPRINTF(n, x)	if (V_pf_status.debug >= (n)) printf x
123 
124 SDT_PROVIDER_DEFINE(pf);
125 SDT_PROBE_DEFINE2(pf, , test, reason_set, "int", "int");
126 SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
127     "struct pf_kstate *");
128 SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
129     "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
130     "struct pf_kstate *");
131 SDT_PROBE_DEFINE2(pf, ip, , bound_iface, "struct pf_kstate *",
132     "struct pfi_kkif *");
133 SDT_PROBE_DEFINE4(pf, ip, route_to, entry, "struct mbuf *",
134     "struct pf_pdesc *", "struct pf_kstate *", "struct ifnet *");
135 SDT_PROBE_DEFINE1(pf, ip, route_to, drop, "int");
136 SDT_PROBE_DEFINE2(pf, ip, route_to, output, "struct ifnet *", "int");
137 SDT_PROBE_DEFINE4(pf, ip6, route_to, entry, "struct mbuf *",
138     "struct pf_pdesc *", "struct pf_kstate *", "struct ifnet *");
139 SDT_PROBE_DEFINE1(pf, ip6, route_to, drop, "int");
140 SDT_PROBE_DEFINE2(pf, ip6, route_to, output, "struct ifnet *", "int");
141 SDT_PROBE_DEFINE4(pf, sctp, multihome, test, "struct pfi_kkif *",
142     "struct pf_krule *", "struct mbuf *", "int");
143 SDT_PROBE_DEFINE2(pf, sctp, multihome, add, "uint32_t",
144     "struct pf_sctp_source *");
145 SDT_PROBE_DEFINE3(pf, sctp, multihome, remove, "uint32_t",
146     "struct pf_kstate *", "struct pf_sctp_source *");
147 SDT_PROBE_DEFINE4(pf, sctp, multihome_scan, entry, "int",
148     "int", "struct pf_pdesc *", "int");
149 SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, param, "uint16_t", "uint16_t");
150 SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, ipv4, "struct in_addr *",
151     "int");
152 SDT_PROBE_DEFINE2(pf, sctp, multihome_scan, ipv6, "struct in_addr6 *",
153     "int");
154 
155 SDT_PROBE_DEFINE3(pf, eth, test_rule, entry, "int", "struct ifnet *",
156     "struct mbuf *");
157 SDT_PROBE_DEFINE2(pf, eth, test_rule, test, "int", "struct pf_keth_rule *");
158 SDT_PROBE_DEFINE3(pf, eth, test_rule, mismatch,
159     "int", "struct pf_keth_rule *", "char *");
160 SDT_PROBE_DEFINE2(pf, eth, test_rule, match, "int", "struct pf_keth_rule *");
161 SDT_PROBE_DEFINE2(pf, eth, test_rule, final_match,
162     "int", "struct pf_keth_rule *");
163 SDT_PROBE_DEFINE2(pf, purge, state, rowcount, "int", "size_t");
164 
165 /*
166  * Global variables
167  */
168 
169 /* state tables */
170 VNET_DEFINE(struct pf_altqqueue,	 pf_altqs[4]);
171 VNET_DEFINE(struct pf_kpalist,		 pf_pabuf[3]);
172 VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_active);
173 VNET_DEFINE(struct pf_altqqueue *,	 pf_altq_ifs_active);
174 VNET_DEFINE(struct pf_altqqueue *,	 pf_altqs_inactive);
175 VNET_DEFINE(struct pf_altqqueue *,	 pf_altq_ifs_inactive);
176 VNET_DEFINE(struct pf_kstatus,		 pf_status);
177 
178 VNET_DEFINE(u_int32_t,			 ticket_altqs_active);
179 VNET_DEFINE(u_int32_t,			 ticket_altqs_inactive);
180 VNET_DEFINE(int,			 altqs_inactive_open);
181 VNET_DEFINE(u_int32_t,			 ticket_pabuf);
182 
183 static const int			 PF_HDR_LIMIT = 20;	/* arbitrary limit */
184 
185 VNET_DEFINE(SHA512_CTX,			 pf_tcp_secret_ctx);
186 #define	V_pf_tcp_secret_ctx		 VNET(pf_tcp_secret_ctx)
187 VNET_DEFINE(u_char,			 pf_tcp_secret[16]);
188 #define	V_pf_tcp_secret			 VNET(pf_tcp_secret)
189 VNET_DEFINE(int,			 pf_tcp_secret_init);
190 #define	V_pf_tcp_secret_init		 VNET(pf_tcp_secret_init)
191 VNET_DEFINE(int,			 pf_tcp_iss_off);
192 #define	V_pf_tcp_iss_off		 VNET(pf_tcp_iss_off)
193 VNET_DECLARE(int,			 pf_vnet_active);
194 #define	V_pf_vnet_active		 VNET(pf_vnet_active)
195 
196 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
197 #define V_pf_purge_idx	VNET(pf_purge_idx)
198 
199 #ifdef PF_WANT_32_TO_64_COUNTER
200 VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter);
201 #define	V_pf_counter_periodic_iter	VNET(pf_counter_periodic_iter)
202 
203 VNET_DEFINE(struct allrulelist_head, pf_allrulelist);
204 VNET_DEFINE(size_t, pf_allrulecount);
205 VNET_DEFINE(struct pf_krule *, pf_rulemarker);
206 #endif
207 
208 #define PF_SCTP_MAX_ENDPOINTS		8
209 
210 struct pf_sctp_endpoint;
211 RB_HEAD(pf_sctp_endpoints, pf_sctp_endpoint);
212 struct pf_sctp_source {
213 	sa_family_t			af;
214 	struct pf_addr			addr;
215 	TAILQ_ENTRY(pf_sctp_source)	entry;
216 };
217 TAILQ_HEAD(pf_sctp_sources, pf_sctp_source);
218 struct pf_sctp_endpoint
219 {
220 	uint32_t		 v_tag;
221 	struct pf_sctp_sources	 sources;
222 	RB_ENTRY(pf_sctp_endpoint)	entry;
223 };
224 static int
pf_sctp_endpoint_compare(struct pf_sctp_endpoint * a,struct pf_sctp_endpoint * b)225 pf_sctp_endpoint_compare(struct pf_sctp_endpoint *a, struct pf_sctp_endpoint *b)
226 {
227 	return (a->v_tag - b->v_tag);
228 }
229 RB_PROTOTYPE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare);
230 RB_GENERATE(pf_sctp_endpoints, pf_sctp_endpoint, entry, pf_sctp_endpoint_compare);
231 VNET_DEFINE_STATIC(struct pf_sctp_endpoints, pf_sctp_endpoints);
232 #define V_pf_sctp_endpoints	VNET(pf_sctp_endpoints)
233 static struct mtx_padalign pf_sctp_endpoints_mtx;
234 MTX_SYSINIT(pf_sctp_endpoints_mtx, &pf_sctp_endpoints_mtx, "SCTP endpoints", MTX_DEF);
235 #define	PF_SCTP_ENDPOINTS_LOCK()	mtx_lock(&pf_sctp_endpoints_mtx)
236 #define	PF_SCTP_ENDPOINTS_UNLOCK()	mtx_unlock(&pf_sctp_endpoints_mtx)
237 
238 /*
239  * Queue for pf_intr() sends.
240  */
241 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
242 struct pf_send_entry {
243 	STAILQ_ENTRY(pf_send_entry)	pfse_next;
244 	struct mbuf			*pfse_m;
245 	enum {
246 		PFSE_IP,
247 		PFSE_IP6,
248 		PFSE_ICMP,
249 		PFSE_ICMP6,
250 	}				pfse_type;
251 	struct {
252 		int		type;
253 		int		code;
254 		int		mtu;
255 	} icmpopts;
256 };
257 
258 STAILQ_HEAD(pf_send_head, pf_send_entry);
259 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
260 #define	V_pf_sendqueue	VNET(pf_sendqueue)
261 
262 static struct mtx_padalign pf_sendqueue_mtx;
263 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
264 #define	PF_SENDQ_LOCK()		mtx_lock(&pf_sendqueue_mtx)
265 #define	PF_SENDQ_UNLOCK()	mtx_unlock(&pf_sendqueue_mtx)
266 
267 /*
268  * Queue for pf_overload_task() tasks.
269  */
270 struct pf_overload_entry {
271 	SLIST_ENTRY(pf_overload_entry)	next;
272 	struct pf_addr  		addr;
273 	sa_family_t			af;
274 	uint8_t				dir;
275 	struct pf_krule  		*rule;
276 };
277 
278 SLIST_HEAD(pf_overload_head, pf_overload_entry);
279 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
280 #define V_pf_overloadqueue	VNET(pf_overloadqueue)
281 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
282 #define	V_pf_overloadtask	VNET(pf_overloadtask)
283 
284 static struct mtx_padalign pf_overloadqueue_mtx;
285 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
286     "pf overload/flush queue", MTX_DEF);
287 #define	PF_OVERLOADQ_LOCK()	mtx_lock(&pf_overloadqueue_mtx)
288 #define	PF_OVERLOADQ_UNLOCK()	mtx_unlock(&pf_overloadqueue_mtx)
289 
290 VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
291 struct mtx_padalign pf_unlnkdrules_mtx;
292 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
293     MTX_DEF);
294 
295 struct sx pf_config_lock;
296 SX_SYSINIT(pf_config_lock, &pf_config_lock, "pf config");
297 
298 struct mtx_padalign pf_table_stats_lock;
299 MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats",
300     MTX_DEF);
301 
302 VNET_DEFINE_STATIC(uma_zone_t,	pf_sources_z);
303 #define	V_pf_sources_z	VNET(pf_sources_z)
304 uma_zone_t		pf_mtag_z;
305 VNET_DEFINE(uma_zone_t,	 pf_state_z);
306 VNET_DEFINE(uma_zone_t,	 pf_state_key_z);
307 VNET_DEFINE(uma_zone_t,	 pf_udp_mapping_z);
308 
309 VNET_DEFINE(struct unrhdr64, pf_stateid);
310 
311 static void		 pf_src_tree_remove_state(struct pf_kstate *);
312 static int		 pf_check_threshold(struct pf_kthreshold *);
313 
314 static void		 pf_change_ap(struct pf_pdesc *, struct pf_addr *, u_int16_t *,
315 			    struct pf_addr *, u_int16_t);
316 static int		 pf_modulate_sack(struct pf_pdesc *,
317 			    struct tcphdr *, struct pf_state_peer *);
318 int			 pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *,
319 			    u_int16_t *, u_int16_t *);
320 static void		 pf_change_icmp(struct pf_addr *, u_int16_t *,
321 			    struct pf_addr *, struct pf_addr *, u_int16_t,
322 			    u_int16_t *, u_int16_t *, u_int16_t *,
323 			    u_int16_t *, u_int8_t, sa_family_t);
324 int			 pf_change_icmp_af(struct mbuf *, int,
325 			    struct pf_pdesc *, struct pf_pdesc *,
326 			    struct pf_addr *, struct pf_addr *, sa_family_t,
327 			    sa_family_t);
328 int			 pf_translate_icmp_af(int, void *);
329 static void		 pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
330 			    int, sa_family_t, struct pf_krule *, int);
331 static void		 pf_detach_state(struct pf_kstate *);
332 static int		 pf_state_key_attach(struct pf_state_key *,
333 			    struct pf_state_key *, struct pf_kstate *);
334 static void		 pf_state_key_detach(struct pf_kstate *, int);
335 static int		 pf_state_key_ctor(void *, int, void *, int);
336 static u_int32_t	 pf_tcp_iss(struct pf_pdesc *);
337 static __inline void	 pf_dummynet_flag_remove(struct mbuf *m,
338 			    struct pf_mtag *pf_mtag);
339 static int		 pf_dummynet(struct pf_pdesc *, struct pf_kstate *,
340 			    struct pf_krule *, struct mbuf **);
341 static int		 pf_dummynet_route(struct pf_pdesc *,
342 			    struct pf_kstate *, struct pf_krule *,
343 			    struct ifnet *, const struct sockaddr *, struct mbuf **);
344 static int		 pf_test_eth_rule(int, struct pfi_kkif *,
345 			    struct mbuf **);
346 static int		 pf_test_rule(struct pf_krule **, struct pf_kstate **,
347 			    struct pf_pdesc *, struct pf_krule **,
348 			    struct pf_kruleset **, u_short *, struct inpcb *);
349 static int		 pf_create_state(struct pf_krule *,
350 			    struct pf_test_ctx *,
351 			    struct pf_kstate **, u_int16_t, u_int16_t);
352 static int		 pf_state_key_addr_setup(struct pf_pdesc *,
353 			    struct pf_state_key_cmp *, int);
354 static int		 pf_tcp_track_full(struct pf_kstate *,
355 			    struct pf_pdesc *, u_short *, int *,
356 			    struct pf_state_peer *, struct pf_state_peer *,
357 			    u_int8_t, u_int8_t);
358 static int		 pf_tcp_track_sloppy(struct pf_kstate *,
359 			    struct pf_pdesc *, u_short *,
360 			    struct pf_state_peer *, struct pf_state_peer *,
361 			    u_int8_t, u_int8_t);
362 static int		 pf_test_state(struct pf_kstate **, struct pf_pdesc *,
363 			    u_short *);
364 int			 pf_icmp_state_lookup(struct pf_state_key_cmp *,
365 			    struct pf_pdesc *, struct pf_kstate **,
366 			    u_int16_t, u_int16_t, int, int *, int, int);
367 static int		 pf_test_state_icmp(struct pf_kstate **,
368 			    struct pf_pdesc *, u_short *);
369 static int		 pf_sctp_track(struct pf_kstate *, struct pf_pdesc *,
370 			    u_short *);
371 static void		 pf_sctp_multihome_detach_addr(const struct pf_kstate *);
372 static void		 pf_sctp_multihome_delayed(struct pf_pdesc *,
373 			    struct pfi_kkif *, struct pf_kstate *, int);
374 static u_int16_t	 pf_calc_mss(struct pf_addr *, sa_family_t,
375 				int, u_int16_t);
376 static int		 pf_check_proto_cksum(struct mbuf *, int, int,
377 			    u_int8_t, sa_family_t);
378 static int		 pf_walk_header(struct pf_pdesc *, struct ip *, u_short *);
379 #ifdef INET6
380 static int		 pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *,
381 			    int, int, u_short *);
382 static int		 pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *,
383 			    u_short *);
384 #endif
385 static void		 pf_print_state_parts(struct pf_kstate *,
386 			    struct pf_state_key *, struct pf_state_key *);
387 static int		 pf_patch_8(struct pf_pdesc *, u_int8_t *, u_int8_t,
388 			    bool);
389 static int		 pf_find_state(struct pf_pdesc *,
390 			    const struct pf_state_key_cmp *, struct pf_kstate **);
391 static bool		 pf_src_connlimit(struct pf_kstate *);
392 static int		 pf_match_rcvif(struct mbuf *, struct pf_krule *);
393 static void		 pf_counters_inc(int, struct pf_pdesc *,
394 			    struct pf_kstate *, struct pf_krule *,
395 			    struct pf_krule *);
396 static void		 pf_log_matches(struct pf_pdesc *, struct pf_krule *,
397 			    struct pf_krule *, struct pf_kruleset *,
398 			    struct pf_krule_slist *);
399 static void		 pf_overload_task(void *v, int pending);
400 static u_short		 pf_insert_src_node(struct pf_ksrc_node *[PF_SN_MAX],
401 			    struct pf_srchash *[PF_SN_MAX], struct pf_krule *,
402 			    struct pf_addr *, sa_family_t, struct pf_addr *,
403 			    struct pfi_kkif *, pf_sn_types_t);
404 static u_int		 pf_purge_expired_states(u_int, int);
405 static void		 pf_purge_unlinked_rules(void);
406 static int		 pf_mtag_uminit(void *, int, int);
407 static void		 pf_mtag_free(struct m_tag *);
408 static void		 pf_packet_rework_nat(struct pf_pdesc *, int,
409 			    struct pf_state_key *);
410 #ifdef INET
411 static void		 pf_route(struct pf_krule *,
412 			    struct ifnet *, struct pf_kstate *,
413 			    struct pf_pdesc *, struct inpcb *);
414 #endif /* INET */
415 #ifdef INET6
416 static void		 pf_change_a6(struct pf_addr *, u_int16_t *,
417 			    struct pf_addr *, u_int8_t);
418 static void		 pf_route6(struct pf_krule *,
419 			    struct ifnet *, struct pf_kstate *,
420 			    struct pf_pdesc *, struct inpcb *);
421 #endif /* INET6 */
422 static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
423 
424 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
425 
426 extern int pf_end_threads;
427 extern struct proc *pf_purge_proc;
428 
429 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
430 
431 #define	PACKET_UNDO_NAT(_pd, _off, _s)					\
432 	do {								\
433 		struct pf_state_key *nk;				\
434 		if ((pd->dir) == PF_OUT)				\
435 			nk = (_s)->key[PF_SK_STACK];			\
436 		else							\
437 			nk = (_s)->key[PF_SK_WIRE];			\
438 		pf_packet_rework_nat(_pd, _off, nk);		\
439 	} while (0)
440 
441 #define	PACKET_LOOPED(pd)	((pd)->pf_mtag &&			\
442 				 (pd)->pf_mtag->flags & PF_MTAG_FLAG_PACKET_LOOPED)
443 
444 static struct pfi_kkif *
BOUND_IFACE(struct pf_kstate * st,struct pf_pdesc * pd)445 BOUND_IFACE(struct pf_kstate *st, struct pf_pdesc *pd)
446 {
447 	struct pfi_kkif *k = pd->kif;
448 
449 	SDT_PROBE2(pf, ip, , bound_iface, st, k);
450 
451 	/* Floating unless otherwise specified. */
452 	if (! (st->rule->rule_flag & PFRULE_IFBOUND))
453 		return (V_pfi_all);
454 
455 	/*
456 	 * Initially set to all, because we don't know what interface we'll be
457 	 * sending this out when we create the state.
458 	 */
459 	if (st->rule->rt == PF_REPLYTO || (pd->af != pd->naf && st->direction == PF_IN))
460 		return (V_pfi_all);
461 
462 	/*
463 	 * If this state is created based on another state (e.g. SCTP
464 	 * multihome) always set it floating initially. We can't know for sure
465 	 * what interface the actual traffic for this state will come in on.
466 	 */
467 	if (pd->related_rule)
468 		return (V_pfi_all);
469 
470 	/* Don't overrule the interface for states created on incoming packets. */
471 	if (st->direction == PF_IN)
472 		return (k);
473 
474 	/* No route-to, so don't overrule. */
475 	if (st->act.rt != PF_ROUTETO)
476 		return (k);
477 
478 	/* Bind to the route-to interface. */
479 	return (st->act.rt_kif);
480 }
481 
482 #define	STATE_INC_COUNTERS(s)						\
483 	do {								\
484 		struct pf_krule_item *mrm;				\
485 		counter_u64_add(s->rule->states_cur, 1);		\
486 		counter_u64_add(s->rule->states_tot, 1);		\
487 		if (s->anchor != NULL) {				\
488 			counter_u64_add(s->anchor->states_cur, 1);	\
489 			counter_u64_add(s->anchor->states_tot, 1);	\
490 		}							\
491 		if (s->nat_rule != NULL) {				\
492 			counter_u64_add(s->nat_rule->states_cur, 1);\
493 			counter_u64_add(s->nat_rule->states_tot, 1);\
494 		}							\
495 		SLIST_FOREACH(mrm, &s->match_rules, entry) {		\
496 			counter_u64_add(mrm->r->states_cur, 1);		\
497 			counter_u64_add(mrm->r->states_tot, 1);		\
498 		}							\
499 	} while (0)
500 
501 #define	STATE_DEC_COUNTERS(s)						\
502 	do {								\
503 		struct pf_krule_item *mrm;				\
504 		if (s->nat_rule != NULL)				\
505 			counter_u64_add(s->nat_rule->states_cur, -1);\
506 		if (s->anchor != NULL)				\
507 			counter_u64_add(s->anchor->states_cur, -1);	\
508 		counter_u64_add(s->rule->states_cur, -1);		\
509 		SLIST_FOREACH(mrm, &s->match_rules, entry)		\
510 			counter_u64_add(mrm->r->states_cur, -1);	\
511 	} while (0)
512 
513 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
514 MALLOC_DEFINE(M_PF_RULE_ITEM, "pf_krule_item", "pf(4) rule items");
515 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
516 VNET_DEFINE(struct pf_idhash *, pf_idhash);
517 VNET_DEFINE(struct pf_srchash *, pf_srchash);
518 VNET_DEFINE(struct pf_udpendpointhash *, pf_udpendpointhash);
519 VNET_DEFINE(struct pf_udpendpointmapping *, pf_udpendpointmapping);
520 
521 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
522     "pf(4)");
523 
524 VNET_DEFINE(u_long, pf_hashmask);
525 VNET_DEFINE(u_long, pf_srchashmask);
526 VNET_DEFINE(u_long, pf_udpendpointhashmask);
527 VNET_DEFINE_STATIC(u_long, pf_hashsize);
528 #define V_pf_hashsize	VNET(pf_hashsize)
529 VNET_DEFINE_STATIC(u_long, pf_srchashsize);
530 #define V_pf_srchashsize	VNET(pf_srchashsize)
531 VNET_DEFINE_STATIC(u_long, pf_udpendpointhashsize);
532 #define V_pf_udpendpointhashsize	VNET(pf_udpendpointhashsize)
533 u_long	pf_ioctl_maxcount = 65535;
534 
535 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
536     &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable");
537 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
538     &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable");
539 SYSCTL_ULONG(_net_pf, OID_AUTO, udpendpoint_hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
540     &VNET_NAME(pf_udpendpointhashsize), 0, "Size of pf(4) endpoint hashtable");
541 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
542     &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
543 
544 VNET_DEFINE(void *, pf_swi_cookie);
545 VNET_DEFINE(struct intr_event *, pf_swi_ie);
546 
547 VNET_DEFINE(uint32_t, pf_hashseed);
548 #define	V_pf_hashseed	VNET(pf_hashseed)
549 
550 static void
pf_sctp_checksum(struct mbuf * m,int off)551 pf_sctp_checksum(struct mbuf *m, int off)
552 {
553 	uint32_t sum = 0;
554 
555 	/* Zero out the checksum, to enable recalculation. */
556 	m_copyback(m, off + offsetof(struct sctphdr, checksum),
557 	    sizeof(sum), (caddr_t)&sum);
558 
559 	sum = sctp_calculate_cksum(m, off);
560 
561 	m_copyback(m, off + offsetof(struct sctphdr, checksum),
562 	    sizeof(sum), (caddr_t)&sum);
563 }
564 
565 int
pf_addr_cmp(struct pf_addr * a,struct pf_addr * b,sa_family_t af)566 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
567 {
568 
569 	switch (af) {
570 #ifdef INET
571 	case AF_INET:
572 		if (a->addr32[0] > b->addr32[0])
573 			return (1);
574 		if (a->addr32[0] < b->addr32[0])
575 			return (-1);
576 		break;
577 #endif /* INET */
578 #ifdef INET6
579 	case AF_INET6:
580 		if (a->addr32[3] > b->addr32[3])
581 			return (1);
582 		if (a->addr32[3] < b->addr32[3])
583 			return (-1);
584 		if (a->addr32[2] > b->addr32[2])
585 			return (1);
586 		if (a->addr32[2] < b->addr32[2])
587 			return (-1);
588 		if (a->addr32[1] > b->addr32[1])
589 			return (1);
590 		if (a->addr32[1] < b->addr32[1])
591 			return (-1);
592 		if (a->addr32[0] > b->addr32[0])
593 			return (1);
594 		if (a->addr32[0] < b->addr32[0])
595 			return (-1);
596 		break;
597 #endif /* INET6 */
598 	default:
599 		unhandled_af(af);
600 	}
601 	return (0);
602 }
603 
604 static bool
pf_is_loopback(sa_family_t af,struct pf_addr * addr)605 pf_is_loopback(sa_family_t af, struct pf_addr *addr)
606 {
607 	switch (af) {
608 #ifdef INET
609 	case AF_INET:
610 		return IN_LOOPBACK(ntohl(addr->v4.s_addr));
611 #endif /* INET */
612 	case AF_INET6:
613 		return IN6_IS_ADDR_LOOPBACK(&addr->v6);
614 	default:
615 		unhandled_af(af);
616 	}
617 }
618 
619 static void
pf_packet_rework_nat(struct pf_pdesc * pd,int off,struct pf_state_key * nk)620 pf_packet_rework_nat(struct pf_pdesc *pd, int off, struct pf_state_key *nk)
621 {
622 
623 	switch (pd->proto) {
624 	case IPPROTO_TCP: {
625 		struct tcphdr *th = &pd->hdr.tcp;
626 
627 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
628 			pf_change_ap(pd, pd->src, &th->th_sport,
629 			    &nk->addr[pd->sidx], nk->port[pd->sidx]);
630 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
631 			pf_change_ap(pd, pd->dst, &th->th_dport,
632 			    &nk->addr[pd->didx], nk->port[pd->didx]);
633 		m_copyback(pd->m, off, sizeof(*th), (caddr_t)th);
634 		break;
635 	}
636 	case IPPROTO_UDP: {
637 		struct udphdr *uh = &pd->hdr.udp;
638 
639 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
640 			pf_change_ap(pd, pd->src, &uh->uh_sport,
641 			    &nk->addr[pd->sidx], nk->port[pd->sidx]);
642 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
643 			pf_change_ap(pd, pd->dst, &uh->uh_dport,
644 			    &nk->addr[pd->didx], nk->port[pd->didx]);
645 		m_copyback(pd->m, off, sizeof(*uh), (caddr_t)uh);
646 		break;
647 	}
648 	case IPPROTO_SCTP: {
649 		struct sctphdr *sh = &pd->hdr.sctp;
650 
651 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
652 			pf_change_ap(pd, pd->src, &sh->src_port,
653 			    &nk->addr[pd->sidx], nk->port[pd->sidx]);
654 		}
655 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
656 			pf_change_ap(pd, pd->dst, &sh->dest_port,
657 			    &nk->addr[pd->didx], nk->port[pd->didx]);
658 		}
659 
660 		break;
661 	}
662 	case IPPROTO_ICMP: {
663 		struct icmp *ih = &pd->hdr.icmp;
664 
665 		if (nk->port[pd->sidx] != ih->icmp_id) {
666 			pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
667 			    ih->icmp_cksum, ih->icmp_id,
668 			    nk->port[pd->sidx], 0);
669 			ih->icmp_id = nk->port[pd->sidx];
670 			pd->sport = &ih->icmp_id;
671 
672 			m_copyback(pd->m, off, ICMP_MINLEN, (caddr_t)ih);
673 		}
674 		/* FALLTHROUGH */
675 	}
676 	default:
677 		if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
678 			switch (pd->af) {
679 			case AF_INET:
680 				pf_change_a(&pd->src->v4.s_addr,
681 				    pd->ip_sum, nk->addr[pd->sidx].v4.s_addr,
682 				    0);
683 				break;
684 			case AF_INET6:
685 				pf_addrcpy(pd->src, &nk->addr[pd->sidx],
686 				    pd->af);
687 				break;
688 			default:
689 				unhandled_af(pd->af);
690 			}
691 		}
692 		if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
693 			switch (pd->af) {
694 			case AF_INET:
695 				pf_change_a(&pd->dst->v4.s_addr,
696 				    pd->ip_sum, nk->addr[pd->didx].v4.s_addr,
697 				    0);
698 				break;
699 			case AF_INET6:
700 				pf_addrcpy(pd->dst, &nk->addr[pd->didx],
701 				    pd->af);
702 				break;
703 			default:
704 				unhandled_af(pd->af);
705 			}
706 		}
707 		break;
708 	}
709 }
710 
711 static __inline uint32_t
pf_hashkey(const struct pf_state_key * sk)712 pf_hashkey(const struct pf_state_key *sk)
713 {
714 	uint32_t h;
715 
716 	h = murmur3_32_hash32((const uint32_t *)sk,
717 	    sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
718 	    V_pf_hashseed);
719 
720 	return (h & V_pf_hashmask);
721 }
722 
723 __inline uint32_t
pf_hashsrc(struct pf_addr * addr,sa_family_t af)724 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
725 {
726 	uint32_t h;
727 
728 	switch (af) {
729 	case AF_INET:
730 		h = murmur3_32_hash32((uint32_t *)&addr->v4,
731 		    sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
732 		break;
733 	case AF_INET6:
734 		h = murmur3_32_hash32((uint32_t *)&addr->v6,
735 		    sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
736 		break;
737 	default:
738 		unhandled_af(af);
739 	}
740 
741 	return (h & V_pf_srchashmask);
742 }
743 
744 static inline uint32_t
pf_hashudpendpoint(struct pf_udp_endpoint * endpoint)745 pf_hashudpendpoint(struct pf_udp_endpoint *endpoint)
746 {
747 	uint32_t h;
748 
749 	h = murmur3_32_hash32((uint32_t *)endpoint,
750 	    sizeof(struct pf_udp_endpoint_cmp)/sizeof(uint32_t),
751 	    V_pf_hashseed);
752 	return (h & V_pf_udpendpointhashmask);
753 }
754 
755 #ifdef ALTQ
756 static int
pf_state_hash(struct pf_kstate * s)757 pf_state_hash(struct pf_kstate *s)
758 {
759 	u_int32_t hv = (intptr_t)s / sizeof(*s);
760 
761 	hv ^= crc32(&s->src, sizeof(s->src));
762 	hv ^= crc32(&s->dst, sizeof(s->dst));
763 	if (hv == 0)
764 		hv = 1;
765 	return (hv);
766 }
767 #endif /* ALTQ */
768 
769 static __inline void
pf_set_protostate(struct pf_kstate * s,int which,u_int8_t newstate)770 pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate)
771 {
772 	if (which == PF_PEER_DST || which == PF_PEER_BOTH)
773 		s->dst.state = newstate;
774 	if (which == PF_PEER_DST)
775 		return;
776 	if (s->src.state == newstate)
777 		return;
778 	if (s->creatorid == V_pf_status.hostid &&
779 	    s->key[PF_SK_STACK] != NULL &&
780 	    s->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
781 	    !(TCPS_HAVEESTABLISHED(s->src.state) ||
782 	    s->src.state == TCPS_CLOSED) &&
783 	    (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED))
784 		atomic_add_32(&V_pf_status.states_halfopen, -1);
785 
786 	s->src.state = newstate;
787 }
788 
789 bool
pf_init_threshold(struct pf_kthreshold * threshold,u_int32_t limit,u_int32_t seconds)790 pf_init_threshold(struct pf_kthreshold *threshold,
791     u_int32_t limit, u_int32_t seconds)
792 {
793 	threshold->limit = limit;
794 	threshold->seconds = seconds;
795 	threshold->cr = counter_rate_alloc(M_NOWAIT, seconds);
796 
797 	return (threshold->cr != NULL);
798 }
799 
800 static int
pf_check_threshold(struct pf_kthreshold * threshold)801 pf_check_threshold(struct pf_kthreshold *threshold)
802 {
803 	return (counter_ratecheck(threshold->cr, threshold->limit) < 0);
804 }
805 
806 static bool
pf_src_connlimit(struct pf_kstate * state)807 pf_src_connlimit(struct pf_kstate *state)
808 {
809 	struct pf_overload_entry	*pfoe;
810 	struct pf_ksrc_node		*src_node = state->sns[PF_SN_LIMIT];
811 	bool				 limited = false;
812 
813 	PF_STATE_LOCK_ASSERT(state);
814 	PF_SRC_NODE_LOCK(src_node);
815 
816 	src_node->conn++;
817 	state->src.tcp_est = 1;
818 
819 	if (state->rule->max_src_conn &&
820 	    state->rule->max_src_conn <
821 	    src_node->conn) {
822 		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
823 		limited = true;
824 	}
825 
826 	if (state->rule->max_src_conn_rate.limit &&
827 	    pf_check_threshold(&src_node->conn_rate)) {
828 		counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
829 		limited = true;
830 	}
831 
832 	if (!limited)
833 		goto done;
834 
835 	/* Kill this state. */
836 	state->timeout = PFTM_PURGE;
837 	pf_set_protostate(state, PF_PEER_BOTH, TCPS_CLOSED);
838 
839 	if (state->rule->overload_tbl == NULL)
840 		goto done;
841 
842 	/* Schedule overloading and flushing task. */
843 	pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
844 	if (pfoe == NULL)
845 		goto done;  /* too bad :( */
846 
847 	bcopy(&src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
848 	pfoe->af = state->key[PF_SK_WIRE]->af;
849 	pfoe->rule = state->rule;
850 	pfoe->dir = state->direction;
851 	PF_OVERLOADQ_LOCK();
852 	SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
853 	PF_OVERLOADQ_UNLOCK();
854 	taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
855 
856 done:
857 	PF_SRC_NODE_UNLOCK(src_node);
858 	return (limited);
859 }
860 
861 static void
pf_overload_task(void * v,int pending)862 pf_overload_task(void *v, int pending)
863 {
864 	struct pf_overload_head queue;
865 	struct pfr_addr p;
866 	struct pf_overload_entry *pfoe, *pfoe1;
867 	uint32_t killed = 0;
868 
869 	CURVNET_SET((struct vnet *)v);
870 
871 	PF_OVERLOADQ_LOCK();
872 	queue = V_pf_overloadqueue;
873 	SLIST_INIT(&V_pf_overloadqueue);
874 	PF_OVERLOADQ_UNLOCK();
875 
876 	bzero(&p, sizeof(p));
877 	SLIST_FOREACH(pfoe, &queue, next) {
878 		counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
879 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
880 			printf("%s: blocking address ", __func__);
881 			pf_print_host(&pfoe->addr, 0, pfoe->af);
882 			printf("\n");
883 		}
884 
885 		p.pfra_af = pfoe->af;
886 		switch (pfoe->af) {
887 #ifdef INET
888 		case AF_INET:
889 			p.pfra_net = 32;
890 			p.pfra_ip4addr = pfoe->addr.v4;
891 			break;
892 #endif /* INET */
893 #ifdef INET6
894 		case AF_INET6:
895 			p.pfra_net = 128;
896 			p.pfra_ip6addr = pfoe->addr.v6;
897 			break;
898 #endif /* INET6 */
899 		default:
900 			unhandled_af(pfoe->af);
901 		}
902 
903 		PF_RULES_WLOCK();
904 		pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
905 		PF_RULES_WUNLOCK();
906 	}
907 
908 	/*
909 	 * Remove those entries, that don't need flushing.
910 	 */
911 	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
912 		if (pfoe->rule->flush == 0) {
913 			SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
914 			free(pfoe, M_PFTEMP);
915 		} else
916 			counter_u64_add(
917 			    V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
918 
919 	/* If nothing to flush, return. */
920 	if (SLIST_EMPTY(&queue)) {
921 		CURVNET_RESTORE();
922 		return;
923 	}
924 
925 	for (int i = 0; i <= V_pf_hashmask; i++) {
926 		struct pf_idhash *ih = &V_pf_idhash[i];
927 		struct pf_state_key *sk;
928 		struct pf_kstate *s;
929 
930 		PF_HASHROW_LOCK(ih);
931 		LIST_FOREACH(s, &ih->states, entry) {
932 		    sk = s->key[PF_SK_WIRE];
933 		    SLIST_FOREACH(pfoe, &queue, next)
934 			if (sk->af == pfoe->af &&
935 			    ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
936 			    pfoe->rule == s->rule) &&
937 			    ((pfoe->dir == PF_OUT &&
938 			    PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
939 			    (pfoe->dir == PF_IN &&
940 			    PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
941 				s->timeout = PFTM_PURGE;
942 				pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
943 				killed++;
944 			}
945 		}
946 		PF_HASHROW_UNLOCK(ih);
947 	}
948 	SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
949 		free(pfoe, M_PFTEMP);
950 	if (V_pf_status.debug >= PF_DEBUG_MISC)
951 		printf("%s: %u states killed", __func__, killed);
952 
953 	CURVNET_RESTORE();
954 }
955 
956 /*
957  * On node found always returns locked. On not found its configurable.
958  */
959 struct pf_ksrc_node *
pf_find_src_node(struct pf_addr * src,struct pf_krule * rule,sa_family_t af,struct pf_srchash ** sh,pf_sn_types_t sn_type,bool returnlocked)960 pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
961     struct pf_srchash **sh, pf_sn_types_t sn_type, bool returnlocked)
962 {
963 	struct pf_ksrc_node *n;
964 
965 	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
966 
967 	*sh = &V_pf_srchash[pf_hashsrc(src, af)];
968 	PF_HASHROW_LOCK(*sh);
969 	LIST_FOREACH(n, &(*sh)->nodes, entry)
970 		if (n->rule == rule && n->af == af && n->type == sn_type &&
971 		    ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
972 		    (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
973 			break;
974 
975 	if (n == NULL && !returnlocked)
976 		PF_HASHROW_UNLOCK(*sh);
977 
978 	return (n);
979 }
980 
981 bool
pf_src_node_exists(struct pf_ksrc_node ** sn,struct pf_srchash * sh)982 pf_src_node_exists(struct pf_ksrc_node **sn, struct pf_srchash *sh)
983 {
984 	struct pf_ksrc_node	*cur;
985 
986 	if ((*sn) == NULL)
987 		return (false);
988 
989 	KASSERT(sh != NULL, ("%s: sh is NULL", __func__));
990 
991 	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
992 	PF_HASHROW_LOCK(sh);
993 	LIST_FOREACH(cur, &(sh->nodes), entry) {
994 		if (cur == (*sn) &&
995 		    cur->expire != 1) /* Ignore nodes being killed */
996 			return (true);
997 	}
998 	PF_HASHROW_UNLOCK(sh);
999 	(*sn) = NULL;
1000 	return (false);
1001 }
1002 
1003 static void
pf_free_src_node(struct pf_ksrc_node * sn)1004 pf_free_src_node(struct pf_ksrc_node *sn)
1005 {
1006 
1007 	for (int i = 0; i < 2; i++) {
1008 		counter_u64_free(sn->bytes[i]);
1009 		counter_u64_free(sn->packets[i]);
1010 	}
1011 	counter_rate_free(sn->conn_rate.cr);
1012 	uma_zfree(V_pf_sources_z, sn);
1013 }
1014 
1015 static u_short
pf_insert_src_node(struct pf_ksrc_node * sns[PF_SN_MAX],struct pf_srchash * snhs[PF_SN_MAX],struct pf_krule * rule,struct pf_addr * src,sa_family_t af,struct pf_addr * raddr,struct pfi_kkif * rkif,pf_sn_types_t sn_type)1016 pf_insert_src_node(struct pf_ksrc_node *sns[PF_SN_MAX],
1017     struct pf_srchash *snhs[PF_SN_MAX], struct pf_krule *rule,
1018     struct pf_addr *src, sa_family_t af, struct pf_addr *raddr,
1019     struct pfi_kkif *rkif, pf_sn_types_t sn_type)
1020 {
1021 	u_short			 reason = 0;
1022 	struct pf_krule		*r_track = rule;
1023 	struct pf_ksrc_node	**sn = &(sns[sn_type]);
1024 	struct pf_srchash	**sh = &(snhs[sn_type]);
1025 
1026 	KASSERT(sn_type != PF_SN_LIMIT || (raddr == NULL && rkif == NULL),
1027 	    ("%s: raddr and rkif must be NULL for PF_SN_LIMIT", __func__));
1028 
1029 	KASSERT(sn_type != PF_SN_LIMIT || (rule->rule_flag & PFRULE_SRCTRACK),
1030 	    ("%s: PF_SN_LIMIT only valid for rules with PFRULE_SRCTRACK", __func__));
1031 
1032 	/*
1033 	 * XXX: There could be a KASSERT for
1034 	 * sn_type == PF_SN_LIMIT || (pool->opts & PF_POOL_STICKYADDR)
1035 	 * but we'd need to pass pool *only* for this KASSERT.
1036 	 */
1037 
1038 	if ( (rule->rule_flag & PFRULE_SRCTRACK) &&
1039 	    !(rule->rule_flag & PFRULE_RULESRCTRACK))
1040 		r_track = &V_pf_default_rule;
1041 
1042 	/*
1043 	 * Request the sh to always be locked, as we might insert a new sn.
1044 	 */
1045 	if (*sn == NULL)
1046 		*sn = pf_find_src_node(src, r_track, af, sh, sn_type, true);
1047 
1048 	if (*sn == NULL) {
1049 		PF_HASHROW_ASSERT(*sh);
1050 
1051 		if (sn_type == PF_SN_LIMIT && rule->max_src_nodes &&
1052 		    counter_u64_fetch(r_track->src_nodes[sn_type]) >= rule->max_src_nodes) {
1053 			counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES], 1);
1054 			reason = PFRES_SRCLIMIT;
1055 			goto done;
1056 		}
1057 
1058 		(*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
1059 		if ((*sn) == NULL) {
1060 			reason = PFRES_MEMORY;
1061 			goto done;
1062 		}
1063 
1064 		for (int i = 0; i < 2; i++) {
1065 			(*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
1066 			(*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
1067 
1068 			if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
1069 				pf_free_src_node(*sn);
1070 				reason = PFRES_MEMORY;
1071 				goto done;
1072 			}
1073 		}
1074 
1075 		if (sn_type == PF_SN_LIMIT)
1076 			if (! pf_init_threshold(&(*sn)->conn_rate,
1077 			    rule->max_src_conn_rate.limit,
1078 			    rule->max_src_conn_rate.seconds)) {
1079 				pf_free_src_node(*sn);
1080 				reason = PFRES_MEMORY;
1081 				goto done;
1082 			}
1083 
1084 		MPASS((*sn)->lock == NULL);
1085 		(*sn)->lock = &(*sh)->lock;
1086 
1087 		(*sn)->af = af;
1088 		(*sn)->rule = r_track;
1089 		pf_addrcpy(&(*sn)->addr, src, af);
1090 		if (raddr != NULL)
1091 			pf_addrcpy(&(*sn)->raddr, raddr, af);
1092 		(*sn)->rkif = rkif;
1093 		LIST_INSERT_HEAD(&(*sh)->nodes, *sn, entry);
1094 		(*sn)->creation = time_uptime;
1095 		(*sn)->ruletype = rule->action;
1096 		(*sn)->type = sn_type;
1097 		counter_u64_add(r_track->src_nodes[sn_type], 1);
1098 		counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
1099 	} else {
1100 		if (sn_type == PF_SN_LIMIT && rule->max_src_states &&
1101 		    (*sn)->states >= rule->max_src_states) {
1102 			counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
1103 			    1);
1104 			reason = PFRES_SRCLIMIT;
1105 			goto done;
1106 		}
1107 	}
1108 done:
1109 	if (reason == 0)
1110 		(*sn)->states++;
1111 	else
1112 		(*sn) = NULL;
1113 
1114 	PF_HASHROW_UNLOCK(*sh);
1115 	return (reason);
1116 }
1117 
1118 void
pf_unlink_src_node(struct pf_ksrc_node * src)1119 pf_unlink_src_node(struct pf_ksrc_node *src)
1120 {
1121 	PF_SRC_NODE_LOCK_ASSERT(src);
1122 
1123 	LIST_REMOVE(src, entry);
1124 	if (src->rule)
1125 		counter_u64_add(src->rule->src_nodes[src->type], -1);
1126 }
1127 
1128 u_int
pf_free_src_nodes(struct pf_ksrc_node_list * head)1129 pf_free_src_nodes(struct pf_ksrc_node_list *head)
1130 {
1131 	struct pf_ksrc_node *sn, *tmp;
1132 	u_int count = 0;
1133 
1134 	LIST_FOREACH_SAFE(sn, head, entry, tmp) {
1135 		pf_free_src_node(sn);
1136 		count++;
1137 	}
1138 
1139 	counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
1140 
1141 	return (count);
1142 }
1143 
1144 void
pf_mtag_initialize(void)1145 pf_mtag_initialize(void)
1146 {
1147 
1148 	pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
1149 	    sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
1150 	    UMA_ALIGN_PTR, 0);
1151 }
1152 
1153 /* Per-vnet data storage structures initialization. */
1154 void
pf_initialize(void)1155 pf_initialize(void)
1156 {
1157 	struct pf_keyhash	*kh;
1158 	struct pf_idhash	*ih;
1159 	struct pf_srchash	*sh;
1160 	struct pf_udpendpointhash	*uh;
1161 	u_int i;
1162 
1163 	if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize))
1164 		V_pf_hashsize = PF_HASHSIZ;
1165 	if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize))
1166 		V_pf_srchashsize = PF_SRCHASHSIZ;
1167 	if (V_pf_udpendpointhashsize == 0 || !powerof2(V_pf_udpendpointhashsize))
1168 		V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ;
1169 
1170 	V_pf_hashseed = arc4random();
1171 
1172 	/* States and state keys storage. */
1173 	V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate),
1174 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1175 	V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
1176 	uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
1177 	uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
1178 
1179 	V_pf_state_key_z = uma_zcreate("pf state keys",
1180 	    sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
1181 	    UMA_ALIGN_PTR, 0);
1182 
1183 	V_pf_keyhash = mallocarray(V_pf_hashsize, sizeof(struct pf_keyhash),
1184 	    M_PFHASH, M_NOWAIT | M_ZERO);
1185 	V_pf_idhash = mallocarray(V_pf_hashsize, sizeof(struct pf_idhash),
1186 	    M_PFHASH, M_NOWAIT | M_ZERO);
1187 	if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
1188 		printf("pf: Unable to allocate memory for "
1189 		    "state_hashsize %lu.\n", V_pf_hashsize);
1190 
1191 		free(V_pf_keyhash, M_PFHASH);
1192 		free(V_pf_idhash, M_PFHASH);
1193 
1194 		V_pf_hashsize = PF_HASHSIZ;
1195 		V_pf_keyhash = mallocarray(V_pf_hashsize,
1196 		    sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
1197 		V_pf_idhash = mallocarray(V_pf_hashsize,
1198 		    sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
1199 	}
1200 
1201 	V_pf_hashmask = V_pf_hashsize - 1;
1202 	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask;
1203 	    i++, kh++, ih++) {
1204 		mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
1205 		mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
1206 	}
1207 
1208 	/* Source nodes. */
1209 	V_pf_sources_z = uma_zcreate("pf source nodes",
1210 	    sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1211 	    0);
1212 	V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
1213 	uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
1214 	uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
1215 
1216 	V_pf_srchash = mallocarray(V_pf_srchashsize,
1217 	    sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
1218 	if (V_pf_srchash == NULL) {
1219 		printf("pf: Unable to allocate memory for "
1220 		    "source_hashsize %lu.\n", V_pf_srchashsize);
1221 
1222 		V_pf_srchashsize = PF_SRCHASHSIZ;
1223 		V_pf_srchash = mallocarray(V_pf_srchashsize,
1224 		    sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
1225 	}
1226 
1227 	V_pf_srchashmask = V_pf_srchashsize - 1;
1228 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++)
1229 		mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
1230 
1231 
1232 	/* UDP endpoint mappings. */
1233 	V_pf_udp_mapping_z = uma_zcreate("pf UDP mappings",
1234 	    sizeof(struct pf_udp_mapping), NULL, NULL, NULL, NULL,
1235 	    UMA_ALIGN_PTR, 0);
1236 	V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize,
1237 	    sizeof(struct pf_udpendpointhash), M_PFHASH, M_NOWAIT | M_ZERO);
1238 	if (V_pf_udpendpointhash == NULL) {
1239 		printf("pf: Unable to allocate memory for "
1240 		    "udpendpoint_hashsize %lu.\n", V_pf_udpendpointhashsize);
1241 
1242 		V_pf_udpendpointhashsize = PF_UDPENDHASHSIZ;
1243 		V_pf_udpendpointhash = mallocarray(V_pf_udpendpointhashsize,
1244 		    sizeof(struct pf_udpendpointhash), M_PFHASH, M_WAITOK | M_ZERO);
1245 	}
1246 
1247 	V_pf_udpendpointhashmask = V_pf_udpendpointhashsize - 1;
1248 	for (i = 0, uh = V_pf_udpendpointhash;
1249 	    i <= V_pf_udpendpointhashmask;
1250 	    i++, uh++) {
1251 		mtx_init(&uh->lock, "pf_udpendpointhash", NULL,
1252 		    MTX_DEF | MTX_DUPOK);
1253 	}
1254 
1255 	/* ALTQ */
1256 	TAILQ_INIT(&V_pf_altqs[0]);
1257 	TAILQ_INIT(&V_pf_altqs[1]);
1258 	TAILQ_INIT(&V_pf_altqs[2]);
1259 	TAILQ_INIT(&V_pf_altqs[3]);
1260 	TAILQ_INIT(&V_pf_pabuf[0]);
1261 	TAILQ_INIT(&V_pf_pabuf[1]);
1262 	TAILQ_INIT(&V_pf_pabuf[2]);
1263 	V_pf_altqs_active = &V_pf_altqs[0];
1264 	V_pf_altq_ifs_active = &V_pf_altqs[1];
1265 	V_pf_altqs_inactive = &V_pf_altqs[2];
1266 	V_pf_altq_ifs_inactive = &V_pf_altqs[3];
1267 
1268 	/* Send & overload+flush queues. */
1269 	STAILQ_INIT(&V_pf_sendqueue);
1270 	SLIST_INIT(&V_pf_overloadqueue);
1271 	TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
1272 
1273 	/* Unlinked, but may be referenced rules. */
1274 	TAILQ_INIT(&V_pf_unlinked_rules);
1275 }
1276 
1277 void
pf_mtag_cleanup(void)1278 pf_mtag_cleanup(void)
1279 {
1280 
1281 	uma_zdestroy(pf_mtag_z);
1282 }
1283 
1284 void
pf_cleanup(void)1285 pf_cleanup(void)
1286 {
1287 	struct pf_keyhash	*kh;
1288 	struct pf_idhash	*ih;
1289 	struct pf_srchash	*sh;
1290 	struct pf_udpendpointhash	*uh;
1291 	struct pf_send_entry	*pfse, *next;
1292 	u_int i;
1293 
1294 	for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash;
1295 	    i <= V_pf_hashmask;
1296 	    i++, kh++, ih++) {
1297 		KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
1298 		    __func__));
1299 		KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
1300 		    __func__));
1301 		mtx_destroy(&kh->lock);
1302 		mtx_destroy(&ih->lock);
1303 	}
1304 	free(V_pf_keyhash, M_PFHASH);
1305 	free(V_pf_idhash, M_PFHASH);
1306 
1307 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
1308 		KASSERT(LIST_EMPTY(&sh->nodes),
1309 		    ("%s: source node hash not empty", __func__));
1310 		mtx_destroy(&sh->lock);
1311 	}
1312 	free(V_pf_srchash, M_PFHASH);
1313 
1314 	for (i = 0, uh = V_pf_udpendpointhash;
1315 	    i <= V_pf_udpendpointhashmask;
1316 	    i++, uh++) {
1317 		KASSERT(LIST_EMPTY(&uh->endpoints),
1318 		    ("%s: udp endpoint hash not empty", __func__));
1319 		mtx_destroy(&uh->lock);
1320 	}
1321 	free(V_pf_udpendpointhash, M_PFHASH);
1322 
1323 	STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
1324 		m_freem(pfse->pfse_m);
1325 		free(pfse, M_PFTEMP);
1326 	}
1327 	MPASS(RB_EMPTY(&V_pf_sctp_endpoints));
1328 
1329 	uma_zdestroy(V_pf_sources_z);
1330 	uma_zdestroy(V_pf_state_z);
1331 	uma_zdestroy(V_pf_state_key_z);
1332 	uma_zdestroy(V_pf_udp_mapping_z);
1333 }
1334 
1335 static int
pf_mtag_uminit(void * mem,int size,int how)1336 pf_mtag_uminit(void *mem, int size, int how)
1337 {
1338 	struct m_tag *t;
1339 
1340 	t = (struct m_tag *)mem;
1341 	t->m_tag_cookie = MTAG_ABI_COMPAT;
1342 	t->m_tag_id = PACKET_TAG_PF;
1343 	t->m_tag_len = sizeof(struct pf_mtag);
1344 	t->m_tag_free = pf_mtag_free;
1345 
1346 	return (0);
1347 }
1348 
1349 static void
pf_mtag_free(struct m_tag * t)1350 pf_mtag_free(struct m_tag *t)
1351 {
1352 
1353 	uma_zfree(pf_mtag_z, t);
1354 }
1355 
1356 struct pf_mtag *
pf_get_mtag(struct mbuf * m)1357 pf_get_mtag(struct mbuf *m)
1358 {
1359 	struct m_tag *mtag;
1360 
1361 	if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
1362 		return ((struct pf_mtag *)(mtag + 1));
1363 
1364 	mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
1365 	if (mtag == NULL)
1366 		return (NULL);
1367 	bzero(mtag + 1, sizeof(struct pf_mtag));
1368 	m_tag_prepend(m, mtag);
1369 
1370 	return ((struct pf_mtag *)(mtag + 1));
1371 }
1372 
1373 static int
pf_state_key_attach(struct pf_state_key * skw,struct pf_state_key * sks,struct pf_kstate * s)1374 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1375     struct pf_kstate *s)
1376 {
1377 	struct pf_keyhash	*khs, *khw, *kh;
1378 	struct pf_state_key	*sk, *cur;
1379 	struct pf_kstate	*si, *olds = NULL;
1380 	int idx;
1381 
1382 	NET_EPOCH_ASSERT();
1383 	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1384 	KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1385 	KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1386 
1387 	/*
1388 	 * We need to lock hash slots of both keys. To avoid deadlock
1389 	 * we always lock the slot with lower address first. Unlock order
1390 	 * isn't important.
1391 	 *
1392 	 * We also need to lock ID hash slot before dropping key
1393 	 * locks. On success we return with ID hash slot locked.
1394 	 */
1395 
1396 	if (skw == sks) {
1397 		khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1398 		PF_HASHROW_LOCK(khs);
1399 	} else {
1400 		khs = &V_pf_keyhash[pf_hashkey(sks)];
1401 		khw = &V_pf_keyhash[pf_hashkey(skw)];
1402 		if (khs == khw) {
1403 			PF_HASHROW_LOCK(khs);
1404 		} else if (khs < khw) {
1405 			PF_HASHROW_LOCK(khs);
1406 			PF_HASHROW_LOCK(khw);
1407 		} else {
1408 			PF_HASHROW_LOCK(khw);
1409 			PF_HASHROW_LOCK(khs);
1410 		}
1411 	}
1412 
1413 #define	KEYS_UNLOCK()	do {			\
1414 	if (khs != khw) {			\
1415 		PF_HASHROW_UNLOCK(khs);		\
1416 		PF_HASHROW_UNLOCK(khw);		\
1417 	} else					\
1418 		PF_HASHROW_UNLOCK(khs);		\
1419 } while (0)
1420 
1421 	/*
1422 	 * First run: start with wire key.
1423 	 */
1424 	sk = skw;
1425 	kh = khw;
1426 	idx = PF_SK_WIRE;
1427 
1428 	MPASS(s->lock == NULL);
1429 	s->lock = &V_pf_idhash[PF_IDHASH(s)].lock;
1430 
1431 keyattach:
1432 	LIST_FOREACH(cur, &kh->keys, entry)
1433 		if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1434 			break;
1435 
1436 	if (cur != NULL) {
1437 		/* Key exists. Check for same kif, if none, add to key. */
1438 		TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1439 			struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1440 
1441 			PF_HASHROW_LOCK(ih);
1442 			if (si->kif == s->kif &&
1443 			    ((si->key[PF_SK_WIRE]->af == sk->af &&
1444 			    si->direction == s->direction) ||
1445 			    (si->key[PF_SK_WIRE]->af !=
1446 			    si->key[PF_SK_STACK]->af &&
1447 			    sk->af == si->key[PF_SK_STACK]->af &&
1448 			    si->direction != s->direction))) {
1449 				bool reuse = false;
1450 
1451 				if (sk->proto == IPPROTO_TCP &&
1452 				    si->src.state >= TCPS_FIN_WAIT_2 &&
1453 				    si->dst.state >= TCPS_FIN_WAIT_2)
1454 					reuse = true;
1455 
1456 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
1457 					printf("pf: %s key attach "
1458 					    "%s on %s: ",
1459 					    (idx == PF_SK_WIRE) ?
1460 					    "wire" : "stack",
1461 					    reuse ? "reuse" : "failed",
1462 					    s->kif->pfik_name);
1463 					pf_print_state_parts(s,
1464 					    (idx == PF_SK_WIRE) ?
1465 					    sk : NULL,
1466 					    (idx == PF_SK_STACK) ?
1467 					    sk : NULL);
1468 					printf(", existing: ");
1469 					pf_print_state_parts(si,
1470 					    (idx == PF_SK_WIRE) ?
1471 					    sk : NULL,
1472 					    (idx == PF_SK_STACK) ?
1473 					    sk : NULL);
1474 					printf("\n");
1475 				}
1476 
1477 				if (reuse) {
1478 					/*
1479 					 * New state matches an old >FIN_WAIT_2
1480 					 * state. We can't drop key hash locks,
1481 					 * thus we can't unlink it properly.
1482 					 *
1483 					 * As a workaround we drop it into
1484 					 * TCPS_CLOSED state, schedule purge
1485 					 * ASAP and push it into the very end
1486 					 * of the slot TAILQ, so that it won't
1487 					 * conflict with our new state.
1488 					 */
1489 					pf_set_protostate(si, PF_PEER_BOTH,
1490 					    TCPS_CLOSED);
1491 					si->timeout = PFTM_PURGE;
1492 					olds = si;
1493 				} else {
1494 					s->timeout = PFTM_UNLINKED;
1495 					if (idx == PF_SK_STACK)
1496 						/*
1497 						 * Remove the wire key from
1498 						 * the hash. Other threads
1499 						 * can't be referencing it
1500 						 * because we still hold the
1501 						 * hash lock.
1502 						 */
1503 						pf_state_key_detach(s,
1504 						    PF_SK_WIRE);
1505 					PF_HASHROW_UNLOCK(ih);
1506 					KEYS_UNLOCK();
1507 					if (idx == PF_SK_WIRE)
1508 						/*
1509 						 * We've not inserted either key.
1510 						 * Free both.
1511 						 */
1512 						uma_zfree(V_pf_state_key_z, skw);
1513 					if (skw != sks)
1514 						uma_zfree(
1515 						    V_pf_state_key_z,
1516 						    sks);
1517 					return (EEXIST); /* collision! */
1518 				}
1519 			}
1520 			PF_HASHROW_UNLOCK(ih);
1521 		}
1522 		uma_zfree(V_pf_state_key_z, sk);
1523 		s->key[idx] = cur;
1524 	} else {
1525 		LIST_INSERT_HEAD(&kh->keys, sk, entry);
1526 		s->key[idx] = sk;
1527 	}
1528 
1529 stateattach:
1530 	/* List is sorted, if-bound states before floating. */
1531 	if (s->kif == V_pfi_all)
1532 		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1533 	else
1534 		TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1535 
1536 	if (olds) {
1537 		TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1538 		TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1539 		    key_list[idx]);
1540 		olds = NULL;
1541 	}
1542 
1543 	/*
1544 	 * Attach done. See how should we (or should not?)
1545 	 * attach a second key.
1546 	 */
1547 	if (sks == skw) {
1548 		s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1549 		idx = PF_SK_STACK;
1550 		sks = NULL;
1551 		goto stateattach;
1552 	} else if (sks != NULL) {
1553 		/*
1554 		 * Continue attaching with stack key.
1555 		 */
1556 		sk = sks;
1557 		kh = khs;
1558 		idx = PF_SK_STACK;
1559 		sks = NULL;
1560 		goto keyattach;
1561 	}
1562 
1563 	PF_STATE_LOCK(s);
1564 	KEYS_UNLOCK();
1565 
1566 	KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1567 	    ("%s failure", __func__));
1568 
1569 	return (0);
1570 #undef	KEYS_UNLOCK
1571 }
1572 
1573 static void
pf_detach_state(struct pf_kstate * s)1574 pf_detach_state(struct pf_kstate *s)
1575 {
1576 	struct pf_state_key *sks = s->key[PF_SK_STACK];
1577 	struct pf_keyhash *kh;
1578 
1579 	NET_EPOCH_ASSERT();
1580 	MPASS(s->timeout >= PFTM_MAX);
1581 
1582 	pf_sctp_multihome_detach_addr(s);
1583 
1584 	if ((s->state_flags & PFSTATE_PFLOW) && V_pflow_export_state_ptr)
1585 		V_pflow_export_state_ptr(s);
1586 
1587 	if (sks != NULL) {
1588 		kh = &V_pf_keyhash[pf_hashkey(sks)];
1589 		PF_HASHROW_LOCK(kh);
1590 		if (s->key[PF_SK_STACK] != NULL)
1591 			pf_state_key_detach(s, PF_SK_STACK);
1592 		/*
1593 		 * If both point to same key, then we are done.
1594 		 */
1595 		if (sks == s->key[PF_SK_WIRE]) {
1596 			pf_state_key_detach(s, PF_SK_WIRE);
1597 			PF_HASHROW_UNLOCK(kh);
1598 			return;
1599 		}
1600 		PF_HASHROW_UNLOCK(kh);
1601 	}
1602 
1603 	if (s->key[PF_SK_WIRE] != NULL) {
1604 		kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1605 		PF_HASHROW_LOCK(kh);
1606 		if (s->key[PF_SK_WIRE] != NULL)
1607 			pf_state_key_detach(s, PF_SK_WIRE);
1608 		PF_HASHROW_UNLOCK(kh);
1609 	}
1610 }
1611 
1612 static void
pf_state_key_detach(struct pf_kstate * s,int idx)1613 pf_state_key_detach(struct pf_kstate *s, int idx)
1614 {
1615 	struct pf_state_key *sk = s->key[idx];
1616 #ifdef INVARIANTS
1617 	struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1618 
1619 	PF_HASHROW_ASSERT(kh);
1620 #endif /* INVARIANTS */
1621 	TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1622 	s->key[idx] = NULL;
1623 
1624 	if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1625 		LIST_REMOVE(sk, entry);
1626 		uma_zfree(V_pf_state_key_z, sk);
1627 	}
1628 }
1629 
1630 static int
pf_state_key_ctor(void * mem,int size,void * arg,int flags)1631 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1632 {
1633 	struct pf_state_key *sk = mem;
1634 
1635 	bzero(sk, sizeof(struct pf_state_key_cmp));
1636 	TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1637 	TAILQ_INIT(&sk->states[PF_SK_STACK]);
1638 
1639 	return (0);
1640 }
1641 
1642 static int
pf_state_key_addr_setup(struct pf_pdesc * pd,struct pf_state_key_cmp * key,int multi)1643 pf_state_key_addr_setup(struct pf_pdesc *pd,
1644     struct pf_state_key_cmp *key, int multi)
1645 {
1646 	struct pf_addr *saddr = pd->src;
1647 	struct pf_addr *daddr = pd->dst;
1648 #ifdef INET6
1649 	struct nd_neighbor_solicit nd;
1650 	struct pf_addr *target;
1651 	u_short action, reason;
1652 
1653 	if (pd->af == AF_INET || pd->proto != IPPROTO_ICMPV6)
1654 		goto copy;
1655 
1656 	switch (pd->hdr.icmp6.icmp6_type) {
1657 	case ND_NEIGHBOR_SOLICIT:
1658 		if (multi)
1659 			return (-1);
1660 		if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af))
1661 			return (-1);
1662 		target = (struct pf_addr *)&nd.nd_ns_target;
1663 		daddr = target;
1664 		break;
1665 	case ND_NEIGHBOR_ADVERT:
1666 		if (multi)
1667 			return (-1);
1668 		if (!pf_pull_hdr(pd->m, pd->off, &nd, sizeof(nd), &action, &reason, pd->af))
1669 			return (-1);
1670 		target = (struct pf_addr *)&nd.nd_ns_target;
1671 		saddr = target;
1672 		if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)) {
1673 			key->addr[pd->didx].addr32[0] = 0;
1674 			key->addr[pd->didx].addr32[1] = 0;
1675 			key->addr[pd->didx].addr32[2] = 0;
1676 			key->addr[pd->didx].addr32[3] = 0;
1677 			daddr = NULL; /* overwritten */
1678 		}
1679 		break;
1680 	default:
1681 		if (multi) {
1682 			key->addr[pd->sidx].addr32[0] = IPV6_ADDR_INT32_MLL;
1683 			key->addr[pd->sidx].addr32[1] = 0;
1684 			key->addr[pd->sidx].addr32[2] = 0;
1685 			key->addr[pd->sidx].addr32[3] = IPV6_ADDR_INT32_ONE;
1686 			saddr = NULL; /* overwritten */
1687 		}
1688 	}
1689 copy:
1690 #endif /* INET6 */
1691 	if (saddr)
1692 		pf_addrcpy(&key->addr[pd->sidx], saddr, pd->af);
1693 	if (daddr)
1694 		pf_addrcpy(&key->addr[pd->didx], daddr, pd->af);
1695 
1696 	return (0);
1697 }
1698 
1699 int
pf_state_key_setup(struct pf_pdesc * pd,u_int16_t sport,u_int16_t dport,struct pf_state_key ** sk,struct pf_state_key ** nk)1700 pf_state_key_setup(struct pf_pdesc *pd, u_int16_t sport, u_int16_t dport,
1701     struct pf_state_key **sk, struct pf_state_key **nk)
1702 {
1703 	*sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1704 	if (*sk == NULL)
1705 		return (ENOMEM);
1706 
1707 	if (pf_state_key_addr_setup(pd, (struct pf_state_key_cmp *)*sk,
1708 	    0)) {
1709 		uma_zfree(V_pf_state_key_z, *sk);
1710 		*sk = NULL;
1711 		return (ENOMEM);
1712 	}
1713 
1714 	(*sk)->port[pd->sidx] = sport;
1715 	(*sk)->port[pd->didx] = dport;
1716 	(*sk)->proto = pd->proto;
1717 	(*sk)->af = pd->af;
1718 
1719 	*nk = pf_state_key_clone(*sk);
1720 	if (*nk == NULL) {
1721 		uma_zfree(V_pf_state_key_z, *sk);
1722 		*sk = NULL;
1723 		return (ENOMEM);
1724 	}
1725 
1726 	if (pd->af != pd->naf) {
1727 		(*sk)->port[pd->sidx] = pd->osport;
1728 		(*sk)->port[pd->didx] = pd->odport;
1729 
1730 		(*nk)->af = pd->naf;
1731 
1732 		/*
1733 		 * We're overwriting an address here, so potentially there's bits of an IPv6
1734 		 * address left in here. Clear that out first.
1735 		 */
1736 		bzero(&(*nk)->addr[0], sizeof((*nk)->addr[0]));
1737 		bzero(&(*nk)->addr[1], sizeof((*nk)->addr[1]));
1738 		if (pd->dir == PF_IN) {
1739 			pf_addrcpy(&(*nk)->addr[pd->didx], &pd->nsaddr,
1740 			    pd->naf);
1741 			pf_addrcpy(&(*nk)->addr[pd->sidx], &pd->ndaddr,
1742 			    pd->naf);
1743 			(*nk)->port[pd->didx] = pd->nsport;
1744 			(*nk)->port[pd->sidx] = pd->ndport;
1745 		} else {
1746 			pf_addrcpy(&(*nk)->addr[pd->sidx], &pd->nsaddr,
1747 			    pd->naf);
1748 			pf_addrcpy(&(*nk)->addr[pd->didx], &pd->ndaddr,
1749 			    pd->naf);
1750 			(*nk)->port[pd->sidx] = pd->nsport;
1751 			(*nk)->port[pd->didx] = pd->ndport;
1752 		}
1753 
1754 		switch (pd->proto) {
1755 		case IPPROTO_ICMP:
1756 			(*nk)->proto = IPPROTO_ICMPV6;
1757 			break;
1758 		case IPPROTO_ICMPV6:
1759 			(*nk)->proto = IPPROTO_ICMP;
1760 			break;
1761 		default:
1762 			(*nk)->proto = pd->proto;
1763 		}
1764 	}
1765 
1766 	return (0);
1767 }
1768 
1769 struct pf_state_key *
pf_state_key_clone(const struct pf_state_key * orig)1770 pf_state_key_clone(const struct pf_state_key *orig)
1771 {
1772 	struct pf_state_key *sk;
1773 
1774 	sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1775 	if (sk == NULL)
1776 		return (NULL);
1777 
1778 	bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1779 
1780 	return (sk);
1781 }
1782 
1783 int
pf_state_insert(struct pfi_kkif * kif,struct pfi_kkif * orig_kif,struct pf_state_key * skw,struct pf_state_key * sks,struct pf_kstate * s)1784 pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif,
1785     struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s)
1786 {
1787 	struct pf_idhash *ih;
1788 	struct pf_kstate *cur;
1789 	int error;
1790 
1791 	NET_EPOCH_ASSERT();
1792 
1793 	KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1794 	    ("%s: sks not pristine", __func__));
1795 	KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1796 	    ("%s: skw not pristine", __func__));
1797 	KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1798 
1799 	s->kif = kif;
1800 	s->orig_kif = orig_kif;
1801 
1802 	if (s->id == 0 && s->creatorid == 0) {
1803 		s->id = alloc_unr64(&V_pf_stateid);
1804 		s->id = htobe64(s->id);
1805 		s->creatorid = V_pf_status.hostid;
1806 	}
1807 
1808 	/* Returns with ID locked on success. */
1809 	if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1810 		return (error);
1811 	skw = sks = NULL;
1812 
1813 	ih = &V_pf_idhash[PF_IDHASH(s)];
1814 	PF_HASHROW_ASSERT(ih);
1815 	LIST_FOREACH(cur, &ih->states, entry)
1816 		if (cur->id == s->id && cur->creatorid == s->creatorid)
1817 			break;
1818 
1819 	if (cur != NULL) {
1820 		s->timeout = PFTM_UNLINKED;
1821 		PF_HASHROW_UNLOCK(ih);
1822 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1823 			printf("pf: state ID collision: "
1824 			    "id: %016llx creatorid: %08x\n",
1825 			    (unsigned long long)be64toh(s->id),
1826 			    ntohl(s->creatorid));
1827 		}
1828 		pf_detach_state(s);
1829 		return (EEXIST);
1830 	}
1831 	LIST_INSERT_HEAD(&ih->states, s, entry);
1832 	/* One for keys, one for ID hash. */
1833 	refcount_init(&s->refs, 2);
1834 
1835 	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1836 	if (V_pfsync_insert_state_ptr != NULL)
1837 		V_pfsync_insert_state_ptr(s);
1838 
1839 	/* Returns locked. */
1840 	return (0);
1841 }
1842 
1843 /*
1844  * Find state by ID: returns with locked row on success.
1845  */
1846 struct pf_kstate *
pf_find_state_byid(uint64_t id,uint32_t creatorid)1847 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1848 {
1849 	struct pf_idhash *ih;
1850 	struct pf_kstate *s;
1851 
1852 	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1853 
1854 	ih = &V_pf_idhash[PF_IDHASHID(id)];
1855 
1856 	PF_HASHROW_LOCK(ih);
1857 	LIST_FOREACH(s, &ih->states, entry)
1858 		if (s->id == id && s->creatorid == creatorid)
1859 			break;
1860 
1861 	if (s == NULL)
1862 		PF_HASHROW_UNLOCK(ih);
1863 
1864 	return (s);
1865 }
1866 
1867 /*
1868  * Find state by key.
1869  * Returns with ID hash slot locked on success.
1870  */
1871 static int
pf_find_state(struct pf_pdesc * pd,const struct pf_state_key_cmp * key,struct pf_kstate ** state)1872 pf_find_state(struct pf_pdesc *pd, const struct pf_state_key_cmp *key,
1873     struct pf_kstate **state)
1874 {
1875 	struct pf_keyhash	*kh;
1876 	struct pf_state_key	*sk;
1877 	struct pf_kstate	*s;
1878 	int idx;
1879 
1880 	*state = NULL;
1881 
1882 	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1883 
1884 	kh = &V_pf_keyhash[pf_hashkey((const struct pf_state_key *)key)];
1885 
1886 	PF_HASHROW_LOCK(kh);
1887 	LIST_FOREACH(sk, &kh->keys, entry)
1888 		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1889 			break;
1890 	if (sk == NULL) {
1891 		PF_HASHROW_UNLOCK(kh);
1892 		return (PF_DROP);
1893 	}
1894 
1895 	idx = (pd->dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1896 
1897 	/* List is sorted, if-bound states before floating ones. */
1898 	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1899 		if (s->kif == V_pfi_all || s->kif == pd->kif ||
1900 		    s->orig_kif == pd->kif) {
1901 			PF_STATE_LOCK(s);
1902 			PF_HASHROW_UNLOCK(kh);
1903 			if (__predict_false(s->timeout >= PFTM_MAX)) {
1904 				/*
1905 				 * State is either being processed by
1906 				 * pf_remove_state() in an other thread, or
1907 				 * is scheduled for immediate expiry.
1908 				 */
1909 				PF_STATE_UNLOCK(s);
1910 				SDT_PROBE5(pf, ip, state, lookup, pd->kif,
1911 				    key, (pd->dir), pd, *state);
1912 				return (PF_DROP);
1913 			}
1914 			goto out;
1915 		}
1916 
1917 	/* Look through the other list, in case of AF-TO */
1918 	idx = idx == PF_SK_WIRE ? PF_SK_STACK : PF_SK_WIRE;
1919 	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1920 		if (s->key[PF_SK_WIRE]->af == s->key[PF_SK_STACK]->af)
1921 			continue;
1922 		if (s->kif == V_pfi_all || s->kif == pd->kif ||
1923 		    s->orig_kif == pd->kif) {
1924 			PF_STATE_LOCK(s);
1925 			PF_HASHROW_UNLOCK(kh);
1926 			if (__predict_false(s->timeout >= PFTM_MAX)) {
1927 				/*
1928 				 * State is either being processed by
1929 				 * pf_remove_state() in an other thread, or
1930 				 * is scheduled for immediate expiry.
1931 				 */
1932 				PF_STATE_UNLOCK(s);
1933 				SDT_PROBE5(pf, ip, state, lookup, pd->kif,
1934 				    key, (pd->dir), pd, NULL);
1935 				return (PF_DROP);
1936 			}
1937 			goto out;
1938 		}
1939 	}
1940 
1941 	PF_HASHROW_UNLOCK(kh);
1942 
1943 out:
1944 	SDT_PROBE5(pf, ip, state, lookup, pd->kif, key, (pd->dir), pd, *state);
1945 
1946 	if (s == NULL || s->timeout == PFTM_PURGE) {
1947 		if (s)
1948 			PF_STATE_UNLOCK(s);
1949 		return (PF_DROP);
1950 	}
1951 
1952 	if ((s)->rule->pktrate.limit && pd->dir == (s)->direction) {
1953 		if (pf_check_threshold(&(s)->rule->pktrate)) {
1954 			PF_STATE_UNLOCK(s);
1955 			return (PF_DROP);
1956 		}
1957 	}
1958 	if (PACKET_LOOPED(pd)) {
1959 		PF_STATE_UNLOCK(s);
1960 		return (PF_PASS);
1961 	}
1962 
1963 	*state = s;
1964 
1965 	return (PF_MATCH);
1966 }
1967 
1968 /*
1969  * Returns with ID hash slot locked on success.
1970  */
1971 struct pf_kstate *
pf_find_state_all(const struct pf_state_key_cmp * key,u_int dir,int * more)1972 pf_find_state_all(const struct pf_state_key_cmp *key, u_int dir, int *more)
1973 {
1974 	struct pf_keyhash	*kh;
1975 	struct pf_state_key	*sk;
1976 	struct pf_kstate	*s, *ret = NULL;
1977 	int			 idx, inout = 0;
1978 
1979 	if (more != NULL)
1980 		*more = 0;
1981 
1982 	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1983 
1984 	kh = &V_pf_keyhash[pf_hashkey((const struct pf_state_key *)key)];
1985 
1986 	PF_HASHROW_LOCK(kh);
1987 	LIST_FOREACH(sk, &kh->keys, entry)
1988 		if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1989 			break;
1990 	if (sk == NULL) {
1991 		PF_HASHROW_UNLOCK(kh);
1992 		return (NULL);
1993 	}
1994 	switch (dir) {
1995 	case PF_IN:
1996 		idx = PF_SK_WIRE;
1997 		break;
1998 	case PF_OUT:
1999 		idx = PF_SK_STACK;
2000 		break;
2001 	case PF_INOUT:
2002 		idx = PF_SK_WIRE;
2003 		inout = 1;
2004 		break;
2005 	default:
2006 		panic("%s: dir %u", __func__, dir);
2007 	}
2008 second_run:
2009 	TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
2010 		if (more == NULL) {
2011 			PF_STATE_LOCK(s);
2012 			PF_HASHROW_UNLOCK(kh);
2013 			return (s);
2014 		}
2015 
2016 		if (ret)
2017 			(*more)++;
2018 		else {
2019 			ret = s;
2020 			PF_STATE_LOCK(s);
2021 		}
2022 	}
2023 	if (inout == 1) {
2024 		inout = 0;
2025 		idx = PF_SK_STACK;
2026 		goto second_run;
2027 	}
2028 	PF_HASHROW_UNLOCK(kh);
2029 
2030 	return (ret);
2031 }
2032 
2033 /*
2034  * FIXME
2035  * This routine is inefficient -- locks the state only to unlock immediately on
2036  * return.
2037  * It is racy -- after the state is unlocked nothing stops other threads from
2038  * removing it.
2039  */
2040 bool
pf_find_state_all_exists(const struct pf_state_key_cmp * key,u_int dir)2041 pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir)
2042 {
2043 	struct pf_kstate *s;
2044 
2045 	s = pf_find_state_all(key, dir, NULL);
2046 	if (s != NULL) {
2047 		PF_STATE_UNLOCK(s);
2048 		return (true);
2049 	}
2050 	return (false);
2051 }
2052 
2053 struct pf_udp_mapping *
pf_udp_mapping_create(sa_family_t af,struct pf_addr * src_addr,uint16_t src_port,struct pf_addr * nat_addr,uint16_t nat_port)2054 pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port,
2055     struct pf_addr *nat_addr, uint16_t nat_port)
2056 {
2057 	struct pf_udp_mapping *mapping;
2058 
2059 	mapping = uma_zalloc(V_pf_udp_mapping_z, M_NOWAIT | M_ZERO);
2060 	if (mapping == NULL)
2061 		return (NULL);
2062 	pf_addrcpy(&mapping->endpoints[0].addr, src_addr, af);
2063 	mapping->endpoints[0].port = src_port;
2064 	mapping->endpoints[0].af = af;
2065 	mapping->endpoints[0].mapping = mapping;
2066 	pf_addrcpy(&mapping->endpoints[1].addr, nat_addr, af);
2067 	mapping->endpoints[1].port = nat_port;
2068 	mapping->endpoints[1].af = af;
2069 	mapping->endpoints[1].mapping = mapping;
2070 	refcount_init(&mapping->refs, 1);
2071 	return (mapping);
2072 }
2073 
2074 int
pf_udp_mapping_insert(struct pf_udp_mapping * mapping)2075 pf_udp_mapping_insert(struct pf_udp_mapping *mapping)
2076 {
2077 	struct pf_udpendpointhash *h0, *h1;
2078 	struct pf_udp_endpoint *endpoint;
2079 	int ret = EEXIST;
2080 
2081 	h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])];
2082 	h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])];
2083 	if (h0 == h1) {
2084 		PF_HASHROW_LOCK(h0);
2085 	} else if (h0 < h1) {
2086 		PF_HASHROW_LOCK(h0);
2087 		PF_HASHROW_LOCK(h1);
2088 	} else {
2089 		PF_HASHROW_LOCK(h1);
2090 		PF_HASHROW_LOCK(h0);
2091 	}
2092 
2093 	LIST_FOREACH(endpoint, &h0->endpoints, entry) {
2094 		if (bcmp(endpoint, &mapping->endpoints[0],
2095 		    sizeof(struct pf_udp_endpoint_cmp)) == 0)
2096 			break;
2097 	}
2098 	if (endpoint != NULL)
2099 		goto cleanup;
2100 	LIST_FOREACH(endpoint, &h1->endpoints, entry) {
2101 		if (bcmp(endpoint, &mapping->endpoints[1],
2102 		    sizeof(struct pf_udp_endpoint_cmp)) == 0)
2103 			break;
2104 	}
2105 	if (endpoint != NULL)
2106 		goto cleanup;
2107 	LIST_INSERT_HEAD(&h0->endpoints, &mapping->endpoints[0], entry);
2108 	LIST_INSERT_HEAD(&h1->endpoints, &mapping->endpoints[1], entry);
2109 	ret = 0;
2110 
2111 cleanup:
2112 	if (h0 != h1) {
2113 		PF_HASHROW_UNLOCK(h0);
2114 		PF_HASHROW_UNLOCK(h1);
2115 	} else {
2116 		PF_HASHROW_UNLOCK(h0);
2117 	}
2118 	return (ret);
2119 }
2120 
2121 void
pf_udp_mapping_release(struct pf_udp_mapping * mapping)2122 pf_udp_mapping_release(struct pf_udp_mapping *mapping)
2123 {
2124 	/* refcount is synchronized on the source endpoint's row lock */
2125 	struct pf_udpendpointhash *h0, *h1;
2126 
2127 	if (mapping == NULL)
2128 		return;
2129 
2130 	h0 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[0])];
2131 	PF_HASHROW_LOCK(h0);
2132 	if (refcount_release(&mapping->refs)) {
2133 		LIST_REMOVE(&mapping->endpoints[0], entry);
2134 		PF_HASHROW_UNLOCK(h0);
2135 		h1 = &V_pf_udpendpointhash[pf_hashudpendpoint(&mapping->endpoints[1])];
2136 		PF_HASHROW_LOCK(h1);
2137 		LIST_REMOVE(&mapping->endpoints[1], entry);
2138 		PF_HASHROW_UNLOCK(h1);
2139 
2140 		uma_zfree(V_pf_udp_mapping_z, mapping);
2141 	} else {
2142 			PF_HASHROW_UNLOCK(h0);
2143 	}
2144 }
2145 
2146 
2147 struct pf_udp_mapping *
pf_udp_mapping_find(struct pf_udp_endpoint_cmp * key)2148 pf_udp_mapping_find(struct pf_udp_endpoint_cmp *key)
2149 {
2150 	struct pf_udpendpointhash *uh;
2151 	struct pf_udp_endpoint *endpoint;
2152 
2153 	uh = &V_pf_udpendpointhash[pf_hashudpendpoint((struct pf_udp_endpoint*)key)];
2154 
2155 	PF_HASHROW_LOCK(uh);
2156 	LIST_FOREACH(endpoint, &uh->endpoints, entry) {
2157 		if (bcmp(endpoint, key, sizeof(struct pf_udp_endpoint_cmp)) == 0 &&
2158 			bcmp(endpoint, &endpoint->mapping->endpoints[0],
2159 			    sizeof(struct pf_udp_endpoint_cmp)) == 0)
2160 			break;
2161 	}
2162 	if (endpoint == NULL) {
2163 		PF_HASHROW_UNLOCK(uh);
2164 		return (NULL);
2165 	}
2166 	refcount_acquire(&endpoint->mapping->refs);
2167 	PF_HASHROW_UNLOCK(uh);
2168 	return (endpoint->mapping);
2169 }
2170 /* END state table stuff */
2171 
2172 static void
pf_send(struct pf_send_entry * pfse)2173 pf_send(struct pf_send_entry *pfse)
2174 {
2175 
2176 	PF_SENDQ_LOCK();
2177 	STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
2178 	PF_SENDQ_UNLOCK();
2179 	swi_sched(V_pf_swi_cookie, 0);
2180 }
2181 
2182 static bool
pf_isforlocal(struct mbuf * m,int af)2183 pf_isforlocal(struct mbuf *m, int af)
2184 {
2185 	switch (af) {
2186 #ifdef INET
2187 	case AF_INET: {
2188 		struct ip *ip = mtod(m, struct ip *);
2189 
2190 		return (in_localip(ip->ip_dst));
2191 	}
2192 #endif /* INET */
2193 #ifdef INET6
2194 	case AF_INET6: {
2195 		struct ip6_hdr *ip6;
2196 		struct in6_ifaddr *ia;
2197 		ip6 = mtod(m, struct ip6_hdr *);
2198 		ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
2199 		if (ia == NULL)
2200 			return (false);
2201 		return (! (ia->ia6_flags & IN6_IFF_NOTREADY));
2202 	}
2203 #endif /* INET6 */
2204 	default:
2205 		unhandled_af(af);
2206 	}
2207 
2208 	return (false);
2209 }
2210 
2211 int
pf_icmp_mapping(struct pf_pdesc * pd,u_int8_t type,int * icmp_dir,u_int16_t * virtual_id,u_int16_t * virtual_type)2212 pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type,
2213     int *icmp_dir, u_int16_t *virtual_id, u_int16_t *virtual_type)
2214 {
2215 	/*
2216 	 * ICMP types marked with PF_OUT are typically responses to
2217 	 * PF_IN, and will match states in the opposite direction.
2218 	 * PF_IN ICMP types need to match a state with that type.
2219 	 */
2220 	*icmp_dir = PF_OUT;
2221 
2222 	/* Queries (and responses) */
2223 	switch (pd->af) {
2224 #ifdef INET
2225 	case AF_INET:
2226 		switch (type) {
2227 		case ICMP_ECHO:
2228 			*icmp_dir = PF_IN;
2229 			/* FALLTHROUGH */
2230 		case ICMP_ECHOREPLY:
2231 			*virtual_type = ICMP_ECHO;
2232 			*virtual_id = pd->hdr.icmp.icmp_id;
2233 			break;
2234 
2235 		case ICMP_TSTAMP:
2236 			*icmp_dir = PF_IN;
2237 			/* FALLTHROUGH */
2238 		case ICMP_TSTAMPREPLY:
2239 			*virtual_type = ICMP_TSTAMP;
2240 			*virtual_id = pd->hdr.icmp.icmp_id;
2241 			break;
2242 
2243 		case ICMP_IREQ:
2244 			*icmp_dir = PF_IN;
2245 			/* FALLTHROUGH */
2246 		case ICMP_IREQREPLY:
2247 			*virtual_type = ICMP_IREQ;
2248 			*virtual_id = pd->hdr.icmp.icmp_id;
2249 			break;
2250 
2251 		case ICMP_MASKREQ:
2252 			*icmp_dir = PF_IN;
2253 			/* FALLTHROUGH */
2254 		case ICMP_MASKREPLY:
2255 			*virtual_type = ICMP_MASKREQ;
2256 			*virtual_id = pd->hdr.icmp.icmp_id;
2257 			break;
2258 
2259 		case ICMP_IPV6_WHEREAREYOU:
2260 			*icmp_dir = PF_IN;
2261 			/* FALLTHROUGH */
2262 		case ICMP_IPV6_IAMHERE:
2263 			*virtual_type = ICMP_IPV6_WHEREAREYOU;
2264 			*virtual_id = 0; /* Nothing sane to match on! */
2265 			break;
2266 
2267 		case ICMP_MOBILE_REGREQUEST:
2268 			*icmp_dir = PF_IN;
2269 			/* FALLTHROUGH */
2270 		case ICMP_MOBILE_REGREPLY:
2271 			*virtual_type = ICMP_MOBILE_REGREQUEST;
2272 			*virtual_id = 0; /* Nothing sane to match on! */
2273 			break;
2274 
2275 		case ICMP_ROUTERSOLICIT:
2276 			*icmp_dir = PF_IN;
2277 			/* FALLTHROUGH */
2278 		case ICMP_ROUTERADVERT:
2279 			*virtual_type = ICMP_ROUTERSOLICIT;
2280 			*virtual_id = 0; /* Nothing sane to match on! */
2281 			break;
2282 
2283 		/* These ICMP types map to other connections */
2284 		case ICMP_UNREACH:
2285 		case ICMP_SOURCEQUENCH:
2286 		case ICMP_REDIRECT:
2287 		case ICMP_TIMXCEED:
2288 		case ICMP_PARAMPROB:
2289 			/* These will not be used, but set them anyway */
2290 			*icmp_dir = PF_IN;
2291 			*virtual_type = type;
2292 			*virtual_id = 0;
2293 			*virtual_type = htons(*virtual_type);
2294 			return (1);  /* These types match to another state */
2295 
2296 		/*
2297 		 * All remaining ICMP types get their own states,
2298 		 * and will only match in one direction.
2299 		 */
2300 		default:
2301 			*icmp_dir = PF_IN;
2302 			*virtual_type = type;
2303 			*virtual_id = 0;
2304 			break;
2305 		}
2306 		break;
2307 #endif /* INET */
2308 #ifdef INET6
2309 	case AF_INET6:
2310 		switch (type) {
2311 		case ICMP6_ECHO_REQUEST:
2312 			*icmp_dir = PF_IN;
2313 			/* FALLTHROUGH */
2314 		case ICMP6_ECHO_REPLY:
2315 			*virtual_type = ICMP6_ECHO_REQUEST;
2316 			*virtual_id = pd->hdr.icmp6.icmp6_id;
2317 			break;
2318 
2319 		case MLD_LISTENER_QUERY:
2320 		case MLD_LISTENER_REPORT: {
2321 			/*
2322 			 * Listener Report can be sent by clients
2323 			 * without an associated Listener Query.
2324 			 * In addition to that, when Report is sent as a
2325 			 * reply to a Query its source and destination
2326 			 * address are different.
2327 			 */
2328 			*icmp_dir = PF_IN;
2329 			*virtual_type = MLD_LISTENER_QUERY;
2330 			*virtual_id = 0;
2331 			break;
2332 		}
2333 		case MLD_MTRACE:
2334 			*icmp_dir = PF_IN;
2335 			/* FALLTHROUGH */
2336 		case MLD_MTRACE_RESP:
2337 			*virtual_type = MLD_MTRACE;
2338 			*virtual_id = 0; /* Nothing sane to match on! */
2339 			break;
2340 
2341 		case ND_NEIGHBOR_SOLICIT:
2342 			*icmp_dir = PF_IN;
2343 			/* FALLTHROUGH */
2344 		case ND_NEIGHBOR_ADVERT: {
2345 			*virtual_type = ND_NEIGHBOR_SOLICIT;
2346 			*virtual_id = 0;
2347 			break;
2348 		}
2349 
2350 		/*
2351 		 * These ICMP types map to other connections.
2352 		 * ND_REDIRECT can't be in this list because the triggering
2353 		 * packet header is optional.
2354 		 */
2355 		case ICMP6_DST_UNREACH:
2356 		case ICMP6_PACKET_TOO_BIG:
2357 		case ICMP6_TIME_EXCEEDED:
2358 		case ICMP6_PARAM_PROB:
2359 			/* These will not be used, but set them anyway */
2360 			*icmp_dir = PF_IN;
2361 			*virtual_type = type;
2362 			*virtual_id = 0;
2363 			*virtual_type = htons(*virtual_type);
2364 			return (1);  /* These types match to another state */
2365 		/*
2366 		 * All remaining ICMP6 types get their own states,
2367 		 * and will only match in one direction.
2368 		 */
2369 		default:
2370 			*icmp_dir = PF_IN;
2371 			*virtual_type = type;
2372 			*virtual_id = 0;
2373 			break;
2374 		}
2375 		break;
2376 #endif /* INET6 */
2377 	default:
2378 		unhandled_af(pd->af);
2379 	}
2380 	*virtual_type = htons(*virtual_type);
2381 	return (0);  /* These types match to their own state */
2382 }
2383 
2384 void
pf_intr(void * v)2385 pf_intr(void *v)
2386 {
2387 	struct epoch_tracker et;
2388 	struct pf_send_head queue;
2389 	struct pf_send_entry *pfse, *next;
2390 
2391 	CURVNET_SET((struct vnet *)v);
2392 
2393 	PF_SENDQ_LOCK();
2394 	queue = V_pf_sendqueue;
2395 	STAILQ_INIT(&V_pf_sendqueue);
2396 	PF_SENDQ_UNLOCK();
2397 
2398 	NET_EPOCH_ENTER(et);
2399 
2400 	STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
2401 		switch (pfse->pfse_type) {
2402 #ifdef INET
2403 		case PFSE_IP: {
2404 			if (pf_isforlocal(pfse->pfse_m, AF_INET)) {
2405 				KASSERT(pfse->pfse_m->m_pkthdr.rcvif == V_loif,
2406 				    ("%s: rcvif != loif", __func__));
2407 
2408 				pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
2409 				pfse->pfse_m->m_pkthdr.csum_flags |=
2410 				    CSUM_IP_VALID | CSUM_IP_CHECKED |
2411 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2412 				pfse->pfse_m->m_pkthdr.csum_data = 0xffff;
2413 				ip_input(pfse->pfse_m);
2414 			} else {
2415 				ip_output(pfse->pfse_m, NULL, NULL, 0, NULL,
2416 				    NULL);
2417 			}
2418 			break;
2419 		}
2420 		case PFSE_ICMP:
2421 			icmp_error(pfse->pfse_m, pfse->icmpopts.type,
2422 			    pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
2423 			break;
2424 #endif /* INET */
2425 #ifdef INET6
2426 		case PFSE_IP6:
2427 			if (pf_isforlocal(pfse->pfse_m, AF_INET6)) {
2428 				KASSERT(pfse->pfse_m->m_pkthdr.rcvif == V_loif,
2429 				    ("%s: rcvif != loif", __func__));
2430 
2431 				pfse->pfse_m->m_flags |= M_SKIP_FIREWALL |
2432 				    M_LOOP;
2433 				pfse->pfse_m->m_pkthdr.csum_flags |=
2434 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2435 				pfse->pfse_m->m_pkthdr.csum_data = 0xffff;
2436 				ip6_input(pfse->pfse_m);
2437 			} else {
2438 				ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL,
2439 				    NULL, NULL);
2440 			}
2441 			break;
2442 		case PFSE_ICMP6:
2443 			icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
2444 			    pfse->icmpopts.code, pfse->icmpopts.mtu);
2445 			break;
2446 #endif /* INET6 */
2447 		default:
2448 			panic("%s: unknown type", __func__);
2449 		}
2450 		free(pfse, M_PFTEMP);
2451 	}
2452 	NET_EPOCH_EXIT(et);
2453 	CURVNET_RESTORE();
2454 }
2455 
2456 #define	pf_purge_thread_period	(hz / 10)
2457 
2458 #ifdef PF_WANT_32_TO_64_COUNTER
2459 static void
pf_status_counter_u64_periodic(void)2460 pf_status_counter_u64_periodic(void)
2461 {
2462 
2463 	PF_RULES_RASSERT();
2464 
2465 	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) {
2466 		return;
2467 	}
2468 
2469 	for (int i = 0; i < FCNT_MAX; i++) {
2470 		pf_counter_u64_periodic(&V_pf_status.fcounters[i]);
2471 	}
2472 }
2473 
2474 static void
pf_kif_counter_u64_periodic(void)2475 pf_kif_counter_u64_periodic(void)
2476 {
2477 	struct pfi_kkif *kif;
2478 	size_t r, run;
2479 
2480 	PF_RULES_RASSERT();
2481 
2482 	if (__predict_false(V_pf_allkifcount == 0)) {
2483 		return;
2484 	}
2485 
2486 	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
2487 		return;
2488 	}
2489 
2490 	run = V_pf_allkifcount / 10;
2491 	if (run < 5)
2492 		run = 5;
2493 
2494 	for (r = 0; r < run; r++) {
2495 		kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist);
2496 		if (kif == NULL) {
2497 			LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
2498 			LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
2499 			break;
2500 		}
2501 
2502 		LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
2503 		LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist);
2504 
2505 		for (int i = 0; i < 2; i++) {
2506 			for (int j = 0; j < 2; j++) {
2507 				for (int k = 0; k < 2; k++) {
2508 					pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]);
2509 					pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]);
2510 				}
2511 			}
2512 		}
2513 	}
2514 }
2515 
2516 static void
pf_rule_counter_u64_periodic(void)2517 pf_rule_counter_u64_periodic(void)
2518 {
2519 	struct pf_krule *rule;
2520 	size_t r, run;
2521 
2522 	PF_RULES_RASSERT();
2523 
2524 	if (__predict_false(V_pf_allrulecount == 0)) {
2525 		return;
2526 	}
2527 
2528 	if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
2529 		return;
2530 	}
2531 
2532 	run = V_pf_allrulecount / 10;
2533 	if (run < 5)
2534 		run = 5;
2535 
2536 	for (r = 0; r < run; r++) {
2537 		rule = LIST_NEXT(V_pf_rulemarker, allrulelist);
2538 		if (rule == NULL) {
2539 			LIST_REMOVE(V_pf_rulemarker, allrulelist);
2540 			LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
2541 			break;
2542 		}
2543 
2544 		LIST_REMOVE(V_pf_rulemarker, allrulelist);
2545 		LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist);
2546 
2547 		pf_counter_u64_periodic(&rule->evaluations);
2548 		for (int i = 0; i < 2; i++) {
2549 			pf_counter_u64_periodic(&rule->packets[i]);
2550 			pf_counter_u64_periodic(&rule->bytes[i]);
2551 		}
2552 	}
2553 }
2554 
2555 static void
pf_counter_u64_periodic_main(void)2556 pf_counter_u64_periodic_main(void)
2557 {
2558 	PF_RULES_RLOCK_TRACKER;
2559 
2560 	V_pf_counter_periodic_iter++;
2561 
2562 	PF_RULES_RLOCK();
2563 	pf_counter_u64_critical_enter();
2564 	pf_status_counter_u64_periodic();
2565 	pf_kif_counter_u64_periodic();
2566 	pf_rule_counter_u64_periodic();
2567 	pf_counter_u64_critical_exit();
2568 	PF_RULES_RUNLOCK();
2569 }
2570 #else
2571 #define	pf_counter_u64_periodic_main()	do { } while (0)
2572 #endif
2573 
2574 void
pf_purge_thread(void * unused __unused)2575 pf_purge_thread(void *unused __unused)
2576 {
2577 	struct epoch_tracker	 et;
2578 
2579 	VNET_ITERATOR_DECL(vnet_iter);
2580 
2581 	sx_xlock(&pf_end_lock);
2582 	while (pf_end_threads == 0) {
2583 		sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period);
2584 
2585 		VNET_LIST_RLOCK();
2586 		NET_EPOCH_ENTER(et);
2587 		VNET_FOREACH(vnet_iter) {
2588 			CURVNET_SET(vnet_iter);
2589 
2590 			/* Wait until V_pf_default_rule is initialized. */
2591 			if (V_pf_vnet_active == 0) {
2592 				CURVNET_RESTORE();
2593 				continue;
2594 			}
2595 
2596 			pf_counter_u64_periodic_main();
2597 
2598 			/*
2599 			 *  Process 1/interval fraction of the state
2600 			 * table every run.
2601 			 */
2602 			V_pf_purge_idx =
2603 			    pf_purge_expired_states(V_pf_purge_idx, V_pf_hashmask /
2604 			    (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
2605 
2606 			/*
2607 			 * Purge other expired types every
2608 			 * PFTM_INTERVAL seconds.
2609 			 */
2610 			if (V_pf_purge_idx == 0) {
2611 				/*
2612 				 * Order is important:
2613 				 * - states and src nodes reference rules
2614 				 * - states and rules reference kifs
2615 				 */
2616 				pf_purge_expired_fragments();
2617 				pf_purge_expired_src_nodes();
2618 				pf_purge_unlinked_rules();
2619 				pfi_kkif_purge();
2620 			}
2621 			CURVNET_RESTORE();
2622 		}
2623 		NET_EPOCH_EXIT(et);
2624 		VNET_LIST_RUNLOCK();
2625 	}
2626 
2627 	pf_end_threads++;
2628 	sx_xunlock(&pf_end_lock);
2629 	kproc_exit(0);
2630 }
2631 
2632 void
pf_unload_vnet_purge(void)2633 pf_unload_vnet_purge(void)
2634 {
2635 
2636 	/*
2637 	 * To cleanse up all kifs and rules we need
2638 	 * two runs: first one clears reference flags,
2639 	 * then pf_purge_expired_states() doesn't
2640 	 * raise them, and then second run frees.
2641 	 */
2642 	pf_purge_unlinked_rules();
2643 	pfi_kkif_purge();
2644 
2645 	/*
2646 	 * Now purge everything.
2647 	 */
2648 	pf_purge_expired_states(0, V_pf_hashmask);
2649 	pf_purge_fragments(UINT_MAX);
2650 	pf_purge_expired_src_nodes();
2651 
2652 	/*
2653 	 * Now all kifs & rules should be unreferenced,
2654 	 * thus should be successfully freed.
2655 	 */
2656 	pf_purge_unlinked_rules();
2657 	pfi_kkif_purge();
2658 }
2659 
2660 u_int32_t
pf_state_expires(const struct pf_kstate * state)2661 pf_state_expires(const struct pf_kstate *state)
2662 {
2663 	u_int32_t	timeout;
2664 	u_int32_t	start;
2665 	u_int32_t	end;
2666 	u_int32_t	states;
2667 
2668 	/* handle all PFTM_* > PFTM_MAX here */
2669 	if (state->timeout == PFTM_PURGE)
2670 		return (time_uptime);
2671 	KASSERT(state->timeout != PFTM_UNLINKED,
2672 	    ("pf_state_expires: timeout == PFTM_UNLINKED"));
2673 	KASSERT((state->timeout < PFTM_MAX),
2674 	    ("pf_state_expires: timeout > PFTM_MAX"));
2675 	timeout = state->rule->timeout[state->timeout];
2676 	if (!timeout)
2677 		timeout = V_pf_default_rule.timeout[state->timeout];
2678 	start = state->rule->timeout[PFTM_ADAPTIVE_START];
2679 	if (start && state->rule != &V_pf_default_rule) {
2680 		end = state->rule->timeout[PFTM_ADAPTIVE_END];
2681 		states = counter_u64_fetch(state->rule->states_cur);
2682 	} else {
2683 		start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
2684 		end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
2685 		states = V_pf_status.states;
2686 	}
2687 	if (end && states > start && start < end) {
2688 		if (states < end) {
2689 			timeout = (u_int64_t)timeout * (end - states) /
2690 			    (end - start);
2691 			return ((state->expire / 1000) + timeout);
2692 		}
2693 		else
2694 			return (time_uptime);
2695 	}
2696 	return ((state->expire / 1000) + timeout);
2697 }
2698 
2699 void
pf_purge_expired_src_nodes(void)2700 pf_purge_expired_src_nodes(void)
2701 {
2702 	struct pf_ksrc_node_list	 freelist;
2703 	struct pf_srchash	*sh;
2704 	struct pf_ksrc_node	*cur, *next;
2705 	int i;
2706 
2707 	LIST_INIT(&freelist);
2708 	for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) {
2709 	    PF_HASHROW_LOCK(sh);
2710 	    LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
2711 		if (cur->states == 0 && cur->expire <= time_uptime) {
2712 			pf_unlink_src_node(cur);
2713 			LIST_INSERT_HEAD(&freelist, cur, entry);
2714 		} else if (cur->rule != NULL)
2715 			cur->rule->rule_ref |= PFRULE_REFS;
2716 	    PF_HASHROW_UNLOCK(sh);
2717 	}
2718 
2719 	pf_free_src_nodes(&freelist);
2720 
2721 	V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
2722 }
2723 
2724 static void
pf_src_tree_remove_state(struct pf_kstate * s)2725 pf_src_tree_remove_state(struct pf_kstate *s)
2726 {
2727 	uint32_t timeout;
2728 
2729 	timeout = s->rule->timeout[PFTM_SRC_NODE] ?
2730 	    s->rule->timeout[PFTM_SRC_NODE] :
2731 	    V_pf_default_rule.timeout[PFTM_SRC_NODE];
2732 
2733 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
2734 		if (s->sns[sn_type] == NULL)
2735 			continue;
2736 		PF_SRC_NODE_LOCK(s->sns[sn_type]);
2737 		if (sn_type == PF_SN_LIMIT && s->src.tcp_est)
2738 			--(s->sns[sn_type]->conn);
2739 		if (--(s->sns[sn_type]->states) == 0)
2740 			s->sns[sn_type]->expire = time_uptime + timeout;
2741 		PF_SRC_NODE_UNLOCK(s->sns[sn_type]);
2742 		s->sns[sn_type] = NULL;
2743 	}
2744 
2745 }
2746 
2747 /*
2748  * Unlink and potentilly free a state. Function may be
2749  * called with ID hash row locked, but always returns
2750  * unlocked, since it needs to go through key hash locking.
2751  */
2752 int
pf_remove_state(struct pf_kstate * s)2753 pf_remove_state(struct pf_kstate *s)
2754 {
2755 	struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
2756 
2757 	NET_EPOCH_ASSERT();
2758 	PF_HASHROW_ASSERT(ih);
2759 
2760 	if (s->timeout == PFTM_UNLINKED) {
2761 		/*
2762 		 * State is being processed
2763 		 * by pf_remove_state() in
2764 		 * an other thread.
2765 		 */
2766 		PF_HASHROW_UNLOCK(ih);
2767 		return (0);	/* XXXGL: undefined actually */
2768 	}
2769 
2770 	if (s->src.state == PF_TCPS_PROXY_DST) {
2771 		/* XXX wire key the right one? */
2772 		pf_send_tcp(s->rule, s->key[PF_SK_WIRE]->af,
2773 		    &s->key[PF_SK_WIRE]->addr[1],
2774 		    &s->key[PF_SK_WIRE]->addr[0],
2775 		    s->key[PF_SK_WIRE]->port[1],
2776 		    s->key[PF_SK_WIRE]->port[0],
2777 		    s->src.seqhi, s->src.seqlo + 1,
2778 		    TH_RST|TH_ACK, 0, 0, 0, M_SKIP_FIREWALL, s->tag, 0,
2779 		    s->act.rtableid);
2780 	}
2781 
2782 	LIST_REMOVE(s, entry);
2783 	pf_src_tree_remove_state(s);
2784 
2785 	if (V_pfsync_delete_state_ptr != NULL)
2786 		V_pfsync_delete_state_ptr(s);
2787 
2788 	STATE_DEC_COUNTERS(s);
2789 
2790 	s->timeout = PFTM_UNLINKED;
2791 
2792 	/* Ensure we remove it from the list of halfopen states, if needed. */
2793 	if (s->key[PF_SK_STACK] != NULL &&
2794 	    s->key[PF_SK_STACK]->proto == IPPROTO_TCP)
2795 		pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
2796 
2797 	PF_HASHROW_UNLOCK(ih);
2798 
2799 	pf_detach_state(s);
2800 
2801 	pf_udp_mapping_release(s->udp_mapping);
2802 
2803 	/* pf_state_insert() initialises refs to 2 */
2804 	return (pf_release_staten(s, 2));
2805 }
2806 
2807 struct pf_kstate *
pf_alloc_state(int flags)2808 pf_alloc_state(int flags)
2809 {
2810 
2811 	return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
2812 }
2813 
2814 void
pf_free_state(struct pf_kstate * cur)2815 pf_free_state(struct pf_kstate *cur)
2816 {
2817 	struct pf_krule_item *ri;
2818 
2819 	KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
2820 	KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
2821 	    cur->timeout));
2822 
2823 	while ((ri = SLIST_FIRST(&cur->match_rules))) {
2824 		SLIST_REMOVE_HEAD(&cur->match_rules, entry);
2825 		free(ri, M_PF_RULE_ITEM);
2826 	}
2827 
2828 	pf_normalize_tcp_cleanup(cur);
2829 	uma_zfree(V_pf_state_z, cur);
2830 	pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
2831 }
2832 
2833 /*
2834  * Called only from pf_purge_thread(), thus serialized.
2835  */
2836 static u_int
pf_purge_expired_states(u_int i,int maxcheck)2837 pf_purge_expired_states(u_int i, int maxcheck)
2838 {
2839 	struct pf_idhash *ih;
2840 	struct pf_kstate *s;
2841 	struct pf_krule_item *mrm;
2842 	size_t count __unused;
2843 
2844 	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2845 
2846 	/*
2847 	 * Go through hash and unlink states that expire now.
2848 	 */
2849 	while (maxcheck > 0) {
2850 		count = 0;
2851 		ih = &V_pf_idhash[i];
2852 
2853 		/* only take the lock if we expect to do work */
2854 		if (!LIST_EMPTY(&ih->states)) {
2855 relock:
2856 			PF_HASHROW_LOCK(ih);
2857 			LIST_FOREACH(s, &ih->states, entry) {
2858 				if (pf_state_expires(s) <= time_uptime) {
2859 					V_pf_status.states -=
2860 					    pf_remove_state(s);
2861 					goto relock;
2862 				}
2863 				s->rule->rule_ref |= PFRULE_REFS;
2864 				if (s->nat_rule != NULL)
2865 					s->nat_rule->rule_ref |= PFRULE_REFS;
2866 				if (s->anchor != NULL)
2867 					s->anchor->rule_ref |= PFRULE_REFS;
2868 				s->kif->pfik_flags |= PFI_IFLAG_REFS;
2869 				SLIST_FOREACH(mrm, &s->match_rules, entry)
2870 					mrm->r->rule_ref |= PFRULE_REFS;
2871 				if (s->act.rt_kif)
2872 					s->act.rt_kif->pfik_flags |= PFI_IFLAG_REFS;
2873 				count++;
2874 			}
2875 			PF_HASHROW_UNLOCK(ih);
2876 		}
2877 
2878 		SDT_PROBE2(pf, purge, state, rowcount, i, count);
2879 
2880 		/* Return when we hit end of hash. */
2881 		if (++i > V_pf_hashmask) {
2882 			V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2883 			return (0);
2884 		}
2885 
2886 		maxcheck--;
2887 	}
2888 
2889 	V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2890 
2891 	return (i);
2892 }
2893 
2894 static void
pf_purge_unlinked_rules(void)2895 pf_purge_unlinked_rules(void)
2896 {
2897 	struct pf_krulequeue tmpq;
2898 	struct pf_krule *r, *r1;
2899 
2900 	/*
2901 	 * If we have overloading task pending, then we'd
2902 	 * better skip purging this time. There is a tiny
2903 	 * probability that overloading task references
2904 	 * an already unlinked rule.
2905 	 */
2906 	PF_OVERLOADQ_LOCK();
2907 	if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
2908 		PF_OVERLOADQ_UNLOCK();
2909 		return;
2910 	}
2911 	PF_OVERLOADQ_UNLOCK();
2912 
2913 	/*
2914 	 * Do naive mark-and-sweep garbage collecting of old rules.
2915 	 * Reference flag is raised by pf_purge_expired_states()
2916 	 * and pf_purge_expired_src_nodes().
2917 	 *
2918 	 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
2919 	 * use a temporary queue.
2920 	 */
2921 	TAILQ_INIT(&tmpq);
2922 	PF_UNLNKDRULES_LOCK();
2923 	TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
2924 		if (!(r->rule_ref & PFRULE_REFS)) {
2925 			TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
2926 			TAILQ_INSERT_TAIL(&tmpq, r, entries);
2927 		} else
2928 			r->rule_ref &= ~PFRULE_REFS;
2929 	}
2930 	PF_UNLNKDRULES_UNLOCK();
2931 
2932 	if (!TAILQ_EMPTY(&tmpq)) {
2933 		PF_CONFIG_LOCK();
2934 		PF_RULES_WLOCK();
2935 		TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
2936 			TAILQ_REMOVE(&tmpq, r, entries);
2937 			pf_free_rule(r);
2938 		}
2939 		PF_RULES_WUNLOCK();
2940 		PF_CONFIG_UNLOCK();
2941 	}
2942 }
2943 
2944 void
pf_print_host(struct pf_addr * addr,u_int16_t p,sa_family_t af)2945 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2946 {
2947 	switch (af) {
2948 #ifdef INET
2949 	case AF_INET: {
2950 		u_int32_t a = ntohl(addr->addr32[0]);
2951 		printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2952 		    (a>>8)&255, a&255);
2953 		if (p) {
2954 			p = ntohs(p);
2955 			printf(":%u", p);
2956 		}
2957 		break;
2958 	}
2959 #endif /* INET */
2960 #ifdef INET6
2961 	case AF_INET6: {
2962 		u_int16_t b;
2963 		u_int8_t i, curstart, curend, maxstart, maxend;
2964 		curstart = curend = maxstart = maxend = 255;
2965 		for (i = 0; i < 8; i++) {
2966 			if (!addr->addr16[i]) {
2967 				if (curstart == 255)
2968 					curstart = i;
2969 				curend = i;
2970 			} else {
2971 				if ((curend - curstart) >
2972 				    (maxend - maxstart)) {
2973 					maxstart = curstart;
2974 					maxend = curend;
2975 				}
2976 				curstart = curend = 255;
2977 			}
2978 		}
2979 		if ((curend - curstart) >
2980 		    (maxend - maxstart)) {
2981 			maxstart = curstart;
2982 			maxend = curend;
2983 		}
2984 		for (i = 0; i < 8; i++) {
2985 			if (i >= maxstart && i <= maxend) {
2986 				if (i == 0)
2987 					printf(":");
2988 				if (i == maxend)
2989 					printf(":");
2990 			} else {
2991 				b = ntohs(addr->addr16[i]);
2992 				printf("%x", b);
2993 				if (i < 7)
2994 					printf(":");
2995 			}
2996 		}
2997 		if (p) {
2998 			p = ntohs(p);
2999 			printf("[%u]", p);
3000 		}
3001 		break;
3002 	}
3003 #endif /* INET6 */
3004 	default:
3005 		unhandled_af(af);
3006 	}
3007 }
3008 
3009 void
pf_print_state(struct pf_kstate * s)3010 pf_print_state(struct pf_kstate *s)
3011 {
3012 	pf_print_state_parts(s, NULL, NULL);
3013 }
3014 
3015 static void
pf_print_state_parts(struct pf_kstate * s,struct pf_state_key * skwp,struct pf_state_key * sksp)3016 pf_print_state_parts(struct pf_kstate *s,
3017     struct pf_state_key *skwp, struct pf_state_key *sksp)
3018 {
3019 	struct pf_state_key *skw, *sks;
3020 	u_int8_t proto, dir;
3021 
3022 	/* Do our best to fill these, but they're skipped if NULL */
3023 	skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
3024 	sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
3025 	proto = skw ? skw->proto : (sks ? sks->proto : 0);
3026 	dir = s ? s->direction : 0;
3027 
3028 	switch (proto) {
3029 	case IPPROTO_IPV4:
3030 		printf("IPv4");
3031 		break;
3032 	case IPPROTO_IPV6:
3033 		printf("IPv6");
3034 		break;
3035 	case IPPROTO_TCP:
3036 		printf("TCP");
3037 		break;
3038 	case IPPROTO_UDP:
3039 		printf("UDP");
3040 		break;
3041 	case IPPROTO_ICMP:
3042 		printf("ICMP");
3043 		break;
3044 	case IPPROTO_ICMPV6:
3045 		printf("ICMPv6");
3046 		break;
3047 	default:
3048 		printf("%u", proto);
3049 		break;
3050 	}
3051 	switch (dir) {
3052 	case PF_IN:
3053 		printf(" in");
3054 		break;
3055 	case PF_OUT:
3056 		printf(" out");
3057 		break;
3058 	}
3059 	if (skw) {
3060 		printf(" wire: ");
3061 		pf_print_host(&skw->addr[0], skw->port[0], skw->af);
3062 		printf(" ");
3063 		pf_print_host(&skw->addr[1], skw->port[1], skw->af);
3064 	}
3065 	if (sks) {
3066 		printf(" stack: ");
3067 		if (sks != skw) {
3068 			pf_print_host(&sks->addr[0], sks->port[0], sks->af);
3069 			printf(" ");
3070 			pf_print_host(&sks->addr[1], sks->port[1], sks->af);
3071 		} else
3072 			printf("-");
3073 	}
3074 	if (s) {
3075 		if (proto == IPPROTO_TCP) {
3076 			printf(" [lo=%u high=%u win=%u modulator=%u",
3077 			    s->src.seqlo, s->src.seqhi,
3078 			    s->src.max_win, s->src.seqdiff);
3079 			if (s->src.wscale && s->dst.wscale)
3080 				printf(" wscale=%u",
3081 				    s->src.wscale & PF_WSCALE_MASK);
3082 			printf("]");
3083 			printf(" [lo=%u high=%u win=%u modulator=%u",
3084 			    s->dst.seqlo, s->dst.seqhi,
3085 			    s->dst.max_win, s->dst.seqdiff);
3086 			if (s->src.wscale && s->dst.wscale)
3087 				printf(" wscale=%u",
3088 				s->dst.wscale & PF_WSCALE_MASK);
3089 			printf("]");
3090 		}
3091 		printf(" %u:%u", s->src.state, s->dst.state);
3092 		if (s->rule)
3093 			printf(" @%d", s->rule->nr);
3094 	}
3095 }
3096 
3097 void
pf_print_flags(uint16_t f)3098 pf_print_flags(uint16_t f)
3099 {
3100 	if (f)
3101 		printf(" ");
3102 	if (f & TH_FIN)
3103 		printf("F");
3104 	if (f & TH_SYN)
3105 		printf("S");
3106 	if (f & TH_RST)
3107 		printf("R");
3108 	if (f & TH_PUSH)
3109 		printf("P");
3110 	if (f & TH_ACK)
3111 		printf("A");
3112 	if (f & TH_URG)
3113 		printf("U");
3114 	if (f & TH_ECE)
3115 		printf("E");
3116 	if (f & TH_CWR)
3117 		printf("W");
3118 	if (f & TH_AE)
3119 		printf("e");
3120 }
3121 
3122 #define	PF_SET_SKIP_STEPS(i)					\
3123 	do {							\
3124 		while (head[i] != cur) {			\
3125 			head[i]->skip[i] = cur;			\
3126 			head[i] = TAILQ_NEXT(head[i], entries);	\
3127 		}						\
3128 	} while (0)
3129 
3130 void
pf_calc_skip_steps(struct pf_krulequeue * rules)3131 pf_calc_skip_steps(struct pf_krulequeue *rules)
3132 {
3133 	struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
3134 	int i;
3135 
3136 	cur = TAILQ_FIRST(rules);
3137 	prev = cur;
3138 	for (i = 0; i < PF_SKIP_COUNT; ++i)
3139 		head[i] = cur;
3140 	while (cur != NULL) {
3141 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
3142 			PF_SET_SKIP_STEPS(PF_SKIP_IFP);
3143 		if (cur->direction != prev->direction)
3144 			PF_SET_SKIP_STEPS(PF_SKIP_DIR);
3145 		if (cur->af != prev->af)
3146 			PF_SET_SKIP_STEPS(PF_SKIP_AF);
3147 		if (cur->proto != prev->proto)
3148 			PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
3149 		if (cur->src.neg != prev->src.neg ||
3150 		    pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
3151 			PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
3152 		if (cur->dst.neg != prev->dst.neg ||
3153 		    pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
3154 			PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
3155 		if (cur->src.port[0] != prev->src.port[0] ||
3156 		    cur->src.port[1] != prev->src.port[1] ||
3157 		    cur->src.port_op != prev->src.port_op)
3158 			PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
3159 		if (cur->dst.port[0] != prev->dst.port[0] ||
3160 		    cur->dst.port[1] != prev->dst.port[1] ||
3161 		    cur->dst.port_op != prev->dst.port_op)
3162 			PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
3163 
3164 		prev = cur;
3165 		cur = TAILQ_NEXT(cur, entries);
3166 	}
3167 	for (i = 0; i < PF_SKIP_COUNT; ++i)
3168 		PF_SET_SKIP_STEPS(i);
3169 }
3170 
3171 int
pf_addr_wrap_neq(struct pf_addr_wrap * aw1,struct pf_addr_wrap * aw2)3172 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
3173 {
3174 	if (aw1->type != aw2->type)
3175 		return (1);
3176 	switch (aw1->type) {
3177 	case PF_ADDR_ADDRMASK:
3178 	case PF_ADDR_RANGE:
3179 		if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
3180 			return (1);
3181 		if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
3182 			return (1);
3183 		return (0);
3184 	case PF_ADDR_DYNIFTL:
3185 		return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
3186 	case PF_ADDR_NONE:
3187 	case PF_ADDR_NOROUTE:
3188 	case PF_ADDR_URPFFAILED:
3189 		return (0);
3190 	case PF_ADDR_TABLE:
3191 		return (aw1->p.tbl != aw2->p.tbl);
3192 	default:
3193 		printf("invalid address type: %d\n", aw1->type);
3194 		return (1);
3195 	}
3196 }
3197 
3198 /**
3199  * Checksum updates are a little complicated because the checksum in the TCP/UDP
3200  * header isn't always a full checksum. In some cases (i.e. output) it's a
3201  * pseudo-header checksum, which is a partial checksum over src/dst IP
3202  * addresses, protocol number and length.
3203  *
3204  * That means we have the following cases:
3205  *  * Input or forwarding: we don't have TSO, the checksum fields are full
3206  *  	checksums, we need to update the checksum whenever we change anything.
3207  *  * Output (i.e. the checksum is a pseudo-header checksum):
3208  *  	x The field being updated is src/dst address or affects the length of
3209  *  	the packet. We need to update the pseudo-header checksum (note that this
3210  *  	checksum is not ones' complement).
3211  *  	x Some other field is being modified (e.g. src/dst port numbers): We
3212  *  	don't have to update anything.
3213  **/
3214 u_int16_t
pf_cksum_fixup(u_int16_t cksum,u_int16_t old,u_int16_t new,u_int8_t udp)3215 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
3216 {
3217 	u_int32_t x;
3218 
3219 	x = cksum + old - new;
3220 	x = (x + (x >> 16)) & 0xffff;
3221 
3222 	/* optimise: eliminate a branch when not udp */
3223 	if (udp && cksum == 0x0000)
3224 		return cksum;
3225 	if (udp && x == 0x0000)
3226 		x = 0xffff;
3227 
3228 	return (u_int16_t)(x);
3229 }
3230 
3231 static int
pf_patch_8(struct pf_pdesc * pd,u_int8_t * f,u_int8_t v,bool hi)3232 pf_patch_8(struct pf_pdesc *pd, u_int8_t *f, u_int8_t v, bool hi)
3233 {
3234 	int	 rewrite = 0;
3235 
3236 	if (*f != v) {
3237 		uint16_t old = htons(hi ? (*f << 8) : *f);
3238 		uint16_t new = htons(hi ? ( v << 8) :  v);
3239 
3240 		*f = v;
3241 
3242 		if (! (pd->m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
3243 		    CSUM_DELAY_DATA_IPV6)))
3244 			*pd->pcksum = pf_cksum_fixup(*pd->pcksum, old, new,
3245 			    pd->proto == IPPROTO_UDP);
3246 
3247 		rewrite = 1;
3248 	}
3249 
3250 	return (rewrite);
3251 }
3252 
3253 int
pf_patch_16(struct pf_pdesc * pd,void * f,u_int16_t v,bool hi)3254 pf_patch_16(struct pf_pdesc *pd, void *f, u_int16_t v, bool hi)
3255 {
3256 	int rewrite = 0;
3257 	u_int8_t *fb = (u_int8_t *)f;
3258 	u_int8_t *vb = (u_int8_t *)&v;
3259 
3260 	rewrite += pf_patch_8(pd, fb++, *vb++, hi);
3261 	rewrite += pf_patch_8(pd, fb++, *vb++, !hi);
3262 
3263 	return (rewrite);
3264 }
3265 
3266 int
pf_patch_32(struct pf_pdesc * pd,void * f,u_int32_t v,bool hi)3267 pf_patch_32(struct pf_pdesc *pd, void *f, u_int32_t v, bool hi)
3268 {
3269 	int rewrite = 0;
3270 	u_int8_t *fb = (u_int8_t *)f;
3271 	u_int8_t *vb = (u_int8_t *)&v;
3272 
3273 	rewrite += pf_patch_8(pd, fb++, *vb++, hi);
3274 	rewrite += pf_patch_8(pd, fb++, *vb++, !hi);
3275 	rewrite += pf_patch_8(pd, fb++, *vb++, hi);
3276 	rewrite += pf_patch_8(pd, fb++, *vb++, !hi);
3277 
3278 	return (rewrite);
3279 }
3280 
3281 u_int16_t
pf_proto_cksum_fixup(struct mbuf * m,u_int16_t cksum,u_int16_t old,u_int16_t new,u_int8_t udp)3282 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
3283         u_int16_t new, u_int8_t udp)
3284 {
3285 	if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
3286 		return (cksum);
3287 
3288 	return (pf_cksum_fixup(cksum, old, new, udp));
3289 }
3290 
3291 static void
pf_change_ap(struct pf_pdesc * pd,struct pf_addr * a,u_int16_t * p,struct pf_addr * an,u_int16_t pn)3292 pf_change_ap(struct pf_pdesc *pd, struct pf_addr *a, u_int16_t *p,
3293         struct pf_addr *an, u_int16_t pn)
3294 {
3295 	struct pf_addr	ao;
3296 	u_int16_t	po;
3297 	uint8_t		u = pd->virtual_proto == IPPROTO_UDP;
3298 
3299 	MPASS(pd->pcksum);
3300 	if (pd->af == AF_INET) {
3301 		MPASS(pd->ip_sum);
3302 	}
3303 
3304 	pf_addrcpy(&ao, a, pd->af);
3305 	if (pd->af == pd->naf)
3306 		pf_addrcpy(a, an, pd->af);
3307 
3308 	if (pd->m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
3309 		*pd->pcksum = ~*pd->pcksum;
3310 
3311 	if (p == NULL)  /* no port -> done. no cksum to worry about. */
3312 		return;
3313 	po = *p;
3314 	*p = pn;
3315 
3316 	switch (pd->af) {
3317 #ifdef INET
3318 	case AF_INET:
3319 		switch (pd->naf) {
3320 		case AF_INET:
3321 			*pd->ip_sum = pf_cksum_fixup(pf_cksum_fixup(*pd->ip_sum,
3322 			    ao.addr16[0], an->addr16[0], 0),
3323 			    ao.addr16[1], an->addr16[1], 0);
3324 			*p = pn;
3325 
3326 			*pd->pcksum = pf_cksum_fixup(pf_cksum_fixup(*pd->pcksum,
3327 			    ao.addr16[0], an->addr16[0], u),
3328 			    ao.addr16[1], an->addr16[1], u);
3329 
3330 			*pd->pcksum = pf_proto_cksum_fixup(pd->m, *pd->pcksum, po, pn, u);
3331 			break;
3332 #ifdef INET6
3333 		case AF_INET6:
3334 			*pd->pcksum = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3335 			   pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3336 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pd->pcksum,
3337 			    ao.addr16[0], an->addr16[0], u),
3338 			    ao.addr16[1], an->addr16[1], u),
3339 			    0,            an->addr16[2], u),
3340 			    0,            an->addr16[3], u),
3341 			    0,            an->addr16[4], u),
3342 			    0,            an->addr16[5], u),
3343 			    0,            an->addr16[6], u),
3344 			    0,            an->addr16[7], u),
3345 			    po, pn, u);
3346 			break;
3347 #endif /* INET6 */
3348 		default:
3349 			unhandled_af(pd->naf);
3350 		}
3351 		break;
3352 #endif /* INET */
3353 #ifdef INET6
3354 	case AF_INET6:
3355 		switch (pd->naf) {
3356 #ifdef INET
3357 		case AF_INET:
3358 			*pd->pcksum = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3359 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3360 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pd->pcksum,
3361 			    ao.addr16[0], an->addr16[0], u),
3362 			    ao.addr16[1], an->addr16[1], u),
3363 			    ao.addr16[2], 0,             u),
3364 			    ao.addr16[3], 0,             u),
3365 			    ao.addr16[4], 0,             u),
3366 			    ao.addr16[5], 0,             u),
3367 			    ao.addr16[6], 0,             u),
3368 			    ao.addr16[7], 0,             u),
3369 			    po, pn, u);
3370 			break;
3371 #endif /* INET */
3372 		case AF_INET6:
3373 			*pd->pcksum  = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3374 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3375 			    pf_cksum_fixup(pf_cksum_fixup(*pd->pcksum,
3376 			    ao.addr16[0], an->addr16[0], u),
3377 			    ao.addr16[1], an->addr16[1], u),
3378 			    ao.addr16[2], an->addr16[2], u),
3379 			    ao.addr16[3], an->addr16[3], u),
3380 			    ao.addr16[4], an->addr16[4], u),
3381 			    ao.addr16[5], an->addr16[5], u),
3382 			    ao.addr16[6], an->addr16[6], u),
3383 			    ao.addr16[7], an->addr16[7], u);
3384 
3385 			*pd->pcksum = pf_proto_cksum_fixup(pd->m, *pd->pcksum, po, pn, u);
3386 			break;
3387 		default:
3388 			unhandled_af(pd->naf);
3389 		}
3390 		break;
3391 #endif /* INET6 */
3392 	default:
3393 		unhandled_af(pd->af);
3394 	}
3395 
3396 	if (pd->m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
3397 	    CSUM_DELAY_DATA_IPV6)) {
3398 		*pd->pcksum = ~*pd->pcksum;
3399 		if (! *pd->pcksum)
3400 			*pd->pcksum = 0xffff;
3401 	}
3402 }
3403 
3404 /* Changes a u_int32_t.  Uses a void * so there are no align restrictions */
3405 void
pf_change_a(void * a,u_int16_t * c,u_int32_t an,u_int8_t u)3406 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
3407 {
3408 	u_int32_t	ao;
3409 
3410 	memcpy(&ao, a, sizeof(ao));
3411 	memcpy(a, &an, sizeof(u_int32_t));
3412 	*c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
3413 	    ao % 65536, an % 65536, u);
3414 }
3415 
3416 void
pf_change_proto_a(struct mbuf * m,void * a,u_int16_t * c,u_int32_t an,u_int8_t udp)3417 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
3418 {
3419 	u_int32_t	ao;
3420 
3421 	memcpy(&ao, a, sizeof(ao));
3422 	memcpy(a, &an, sizeof(u_int32_t));
3423 
3424 	*c = pf_proto_cksum_fixup(m,
3425 	    pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
3426 	    ao % 65536, an % 65536, udp);
3427 }
3428 
3429 #ifdef INET6
3430 static void
pf_change_a6(struct pf_addr * a,u_int16_t * c,struct pf_addr * an,u_int8_t u)3431 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
3432 {
3433 	struct pf_addr	ao;
3434 
3435 	pf_addrcpy(&ao, a, AF_INET6);
3436 	pf_addrcpy(a, an, AF_INET6);
3437 
3438 	*c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3439 	    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3440 	    pf_cksum_fixup(pf_cksum_fixup(*c,
3441 	    ao.addr16[0], an->addr16[0], u),
3442 	    ao.addr16[1], an->addr16[1], u),
3443 	    ao.addr16[2], an->addr16[2], u),
3444 	    ao.addr16[3], an->addr16[3], u),
3445 	    ao.addr16[4], an->addr16[4], u),
3446 	    ao.addr16[5], an->addr16[5], u),
3447 	    ao.addr16[6], an->addr16[6], u),
3448 	    ao.addr16[7], an->addr16[7], u);
3449 }
3450 #endif /* INET6 */
3451 
3452 static void
pf_change_icmp(struct pf_addr * ia,u_int16_t * ip,struct pf_addr * oa,struct pf_addr * na,u_int16_t np,u_int16_t * pc,u_int16_t * h2c,u_int16_t * ic,u_int16_t * hc,u_int8_t u,sa_family_t af)3453 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
3454     struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
3455     u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
3456 {
3457 	struct pf_addr	oia, ooa;
3458 
3459 	pf_addrcpy(&oia, ia, af);
3460 	if (oa)
3461 		pf_addrcpy(&ooa, oa, af);
3462 
3463 	/* Change inner protocol port, fix inner protocol checksum. */
3464 	if (ip != NULL) {
3465 		u_int16_t	oip = *ip;
3466 		u_int32_t	opc;
3467 
3468 		if (pc != NULL)
3469 			opc = *pc;
3470 		*ip = np;
3471 		if (pc != NULL)
3472 			*pc = pf_cksum_fixup(*pc, oip, *ip, u);
3473 		*ic = pf_cksum_fixup(*ic, oip, *ip, 0);
3474 		if (pc != NULL)
3475 			*ic = pf_cksum_fixup(*ic, opc, *pc, 0);
3476 	}
3477 	/* Change inner ip address, fix inner ip and icmp checksums. */
3478 	pf_addrcpy(ia, na, af);
3479 	switch (af) {
3480 #ifdef INET
3481 	case AF_INET: {
3482 		u_int32_t	 oh2c = *h2c;
3483 
3484 		*h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
3485 		    oia.addr16[0], ia->addr16[0], 0),
3486 		    oia.addr16[1], ia->addr16[1], 0);
3487 		*ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
3488 		    oia.addr16[0], ia->addr16[0], 0),
3489 		    oia.addr16[1], ia->addr16[1], 0);
3490 		*ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
3491 		break;
3492 	}
3493 #endif /* INET */
3494 #ifdef INET6
3495 	case AF_INET6:
3496 		*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3497 		    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3498 		    pf_cksum_fixup(pf_cksum_fixup(*ic,
3499 		    oia.addr16[0], ia->addr16[0], u),
3500 		    oia.addr16[1], ia->addr16[1], u),
3501 		    oia.addr16[2], ia->addr16[2], u),
3502 		    oia.addr16[3], ia->addr16[3], u),
3503 		    oia.addr16[4], ia->addr16[4], u),
3504 		    oia.addr16[5], ia->addr16[5], u),
3505 		    oia.addr16[6], ia->addr16[6], u),
3506 		    oia.addr16[7], ia->addr16[7], u);
3507 		break;
3508 #endif /* INET6 */
3509 	}
3510 	/* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
3511 	if (oa) {
3512 		pf_addrcpy(oa, na, af);
3513 		switch (af) {
3514 #ifdef INET
3515 		case AF_INET:
3516 			*hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
3517 			    ooa.addr16[0], oa->addr16[0], 0),
3518 			    ooa.addr16[1], oa->addr16[1], 0);
3519 			break;
3520 #endif /* INET */
3521 #ifdef INET6
3522 		case AF_INET6:
3523 			*ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3524 			    pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
3525 			    pf_cksum_fixup(pf_cksum_fixup(*ic,
3526 			    ooa.addr16[0], oa->addr16[0], u),
3527 			    ooa.addr16[1], oa->addr16[1], u),
3528 			    ooa.addr16[2], oa->addr16[2], u),
3529 			    ooa.addr16[3], oa->addr16[3], u),
3530 			    ooa.addr16[4], oa->addr16[4], u),
3531 			    ooa.addr16[5], oa->addr16[5], u),
3532 			    ooa.addr16[6], oa->addr16[6], u),
3533 			    ooa.addr16[7], oa->addr16[7], u);
3534 			break;
3535 #endif /* INET6 */
3536 		}
3537 	}
3538 }
3539 
3540 int
pf_translate_af(struct pf_pdesc * pd)3541 pf_translate_af(struct pf_pdesc *pd)
3542 {
3543 #if defined(INET) && defined(INET6)
3544 	struct mbuf		*mp;
3545 	struct ip		*ip4;
3546 	struct ip6_hdr		*ip6;
3547 	struct icmp6_hdr	*icmp;
3548 	struct m_tag		*mtag;
3549 	struct pf_fragment_tag	*ftag;
3550 	int			 hlen;
3551 
3552 	hlen = pd->naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
3553 
3554 	/* trim the old header */
3555 	m_adj(pd->m, pd->off);
3556 
3557 	/* prepend a new one */
3558 	M_PREPEND(pd->m, hlen, M_NOWAIT);
3559 	if (pd->m == NULL)
3560 		return (-1);
3561 
3562 	switch (pd->naf) {
3563 	case AF_INET:
3564 		ip4 = mtod(pd->m, struct ip *);
3565 		bzero(ip4, hlen);
3566 		ip4->ip_v = IPVERSION;
3567 		ip4->ip_hl = hlen >> 2;
3568 		ip4->ip_tos = pd->tos;
3569 		ip4->ip_len = htons(hlen + (pd->tot_len - pd->off));
3570 		ip_fillid(ip4, V_ip_random_id);
3571 		ip4->ip_ttl = pd->ttl;
3572 		ip4->ip_p = pd->proto;
3573 		ip4->ip_src = pd->nsaddr.v4;
3574 		ip4->ip_dst = pd->ndaddr.v4;
3575 		pd->src = (struct pf_addr *)&ip4->ip_src;
3576 		pd->dst = (struct pf_addr *)&ip4->ip_dst;
3577 		pd->off = sizeof(struct ip);
3578 		break;
3579 	case AF_INET6:
3580 		ip6 = mtod(pd->m, struct ip6_hdr *);
3581 		bzero(ip6, hlen);
3582 		ip6->ip6_vfc = IPV6_VERSION;
3583 		ip6->ip6_flow |= htonl((u_int32_t)pd->tos << 20);
3584 		ip6->ip6_plen = htons(pd->tot_len - pd->off);
3585 		ip6->ip6_nxt = pd->proto;
3586 		if (!pd->ttl || pd->ttl > IPV6_DEFHLIM)
3587 			ip6->ip6_hlim = IPV6_DEFHLIM;
3588 		else
3589 			ip6->ip6_hlim = pd->ttl;
3590 		ip6->ip6_src = pd->nsaddr.v6;
3591 		ip6->ip6_dst = pd->ndaddr.v6;
3592 		pd->src = (struct pf_addr *)&ip6->ip6_src;
3593 		pd->dst = (struct pf_addr *)&ip6->ip6_dst;
3594 		pd->off = sizeof(struct ip6_hdr);
3595 
3596 		/*
3597 		 * If we're dealing with a reassembled packet we need to adjust
3598 		 * the header length from the IPv4 header size to IPv6 header
3599 		 * size.
3600 		 */
3601 		mtag = m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL);
3602 		if (mtag) {
3603 			ftag = (struct pf_fragment_tag *)(mtag + 1);
3604 			ftag->ft_hdrlen = sizeof(*ip6);
3605 			ftag->ft_maxlen -= sizeof(struct ip6_hdr) -
3606 			    sizeof(struct ip) + sizeof(struct ip6_frag);
3607 		}
3608 		break;
3609 	default:
3610 		return (-1);
3611 	}
3612 
3613 	/* recalculate icmp/icmp6 checksums */
3614 	if (pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) {
3615 		int off;
3616 		if ((mp = m_pulldown(pd->m, hlen, sizeof(*icmp), &off)) ==
3617 		    NULL) {
3618 			pd->m = NULL;
3619 			return (-1);
3620 		}
3621 		icmp = (struct icmp6_hdr *)(mp->m_data + off);
3622 		icmp->icmp6_cksum = 0;
3623 		icmp->icmp6_cksum = pd->naf == AF_INET ?
3624 		    in4_cksum(pd->m, 0, hlen, ntohs(ip4->ip_len) - hlen) :
3625 		    in6_cksum(pd->m, IPPROTO_ICMPV6, hlen,
3626 			ntohs(ip6->ip6_plen));
3627 	}
3628 #endif /* INET && INET6 */
3629 
3630 	return (0);
3631 }
3632 
3633 int
pf_change_icmp_af(struct mbuf * m,int off,struct pf_pdesc * pd,struct pf_pdesc * pd2,struct pf_addr * src,struct pf_addr * dst,sa_family_t af,sa_family_t naf)3634 pf_change_icmp_af(struct mbuf *m, int off, struct pf_pdesc *pd,
3635     struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst,
3636     sa_family_t af, sa_family_t naf)
3637 {
3638 #if defined(INET) && defined(INET6)
3639 	struct mbuf	*n = NULL;
3640 	struct ip	*ip4;
3641 	struct ip6_hdr	*ip6;
3642 	int		 hlen, olen, mlen;
3643 
3644 	if (af == naf || (af != AF_INET && af != AF_INET6) ||
3645 	    (naf != AF_INET && naf != AF_INET6))
3646 		return (-1);
3647 
3648 	/* split the mbuf chain on the inner ip/ip6 header boundary */
3649 	if ((n = m_split(m, off, M_NOWAIT)) == NULL)
3650 		return (-1);
3651 
3652 	/* old header */
3653 	olen = pd2->off - off;
3654 	/* new header */
3655 	hlen = naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
3656 
3657 	/* trim old header */
3658 	m_adj(n, olen);
3659 
3660 	/* prepend a new one */
3661 	M_PREPEND(n, hlen, M_NOWAIT);
3662 	if (n == NULL)
3663 		return (-1);
3664 
3665 	/* translate inner ip/ip6 header */
3666 	switch (naf) {
3667 	case AF_INET:
3668 		ip4 = mtod(n, struct ip *);
3669 		bzero(ip4, sizeof(*ip4));
3670 		ip4->ip_v = IPVERSION;
3671 		ip4->ip_hl = sizeof(*ip4) >> 2;
3672 		ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - olen);
3673 		ip_fillid(ip4, V_ip_random_id);
3674 		ip4->ip_off = htons(IP_DF);
3675 		ip4->ip_ttl = pd2->ttl;
3676 		if (pd2->proto == IPPROTO_ICMPV6)
3677 			ip4->ip_p = IPPROTO_ICMP;
3678 		else
3679 			ip4->ip_p = pd2->proto;
3680 		ip4->ip_src = src->v4;
3681 		ip4->ip_dst = dst->v4;
3682 		ip4->ip_sum = in_cksum(n, ip4->ip_hl << 2);
3683 		break;
3684 	case AF_INET6:
3685 		ip6 = mtod(n, struct ip6_hdr *);
3686 		bzero(ip6, sizeof(*ip6));
3687 		ip6->ip6_vfc = IPV6_VERSION;
3688 		ip6->ip6_plen = htons(pd2->tot_len - olen);
3689 		if (pd2->proto == IPPROTO_ICMP)
3690 			ip6->ip6_nxt = IPPROTO_ICMPV6;
3691 		else
3692 			ip6->ip6_nxt = pd2->proto;
3693 		if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM)
3694 			ip6->ip6_hlim = IPV6_DEFHLIM;
3695 		else
3696 			ip6->ip6_hlim = pd2->ttl;
3697 		ip6->ip6_src = src->v6;
3698 		ip6->ip6_dst = dst->v6;
3699 		break;
3700 	default:
3701 		unhandled_af(naf);
3702 	}
3703 
3704 	/* adjust payload offset and total packet length */
3705 	pd2->off += hlen - olen;
3706 	pd->tot_len += hlen - olen;
3707 
3708 	/* merge modified inner packet with the original header */
3709 	mlen = n->m_pkthdr.len;
3710 	m_cat(m, n);
3711 	m->m_pkthdr.len += mlen;
3712 #endif /* INET && INET6 */
3713 
3714 	return (0);
3715 }
3716 
3717 #define PTR_IP(field)	(offsetof(struct ip, field))
3718 #define PTR_IP6(field)	(offsetof(struct ip6_hdr, field))
3719 
3720 int
pf_translate_icmp_af(int af,void * arg)3721 pf_translate_icmp_af(int af, void *arg)
3722 {
3723 #if defined(INET) && defined(INET6)
3724 	struct icmp		*icmp4;
3725 	struct icmp6_hdr	*icmp6;
3726 	u_int32_t		 mtu;
3727 	int32_t			 ptr = -1;
3728 	u_int8_t		 type;
3729 	u_int8_t		 code;
3730 
3731 	switch (af) {
3732 	case AF_INET:
3733 		icmp6 = arg;
3734 		type = icmp6->icmp6_type;
3735 		code = icmp6->icmp6_code;
3736 		mtu = ntohl(icmp6->icmp6_mtu);
3737 
3738 		switch (type) {
3739 		case ICMP6_ECHO_REQUEST:
3740 			type = ICMP_ECHO;
3741 			break;
3742 		case ICMP6_ECHO_REPLY:
3743 			type = ICMP_ECHOREPLY;
3744 			break;
3745 		case ICMP6_DST_UNREACH:
3746 			type = ICMP_UNREACH;
3747 			switch (code) {
3748 			case ICMP6_DST_UNREACH_NOROUTE:
3749 			case ICMP6_DST_UNREACH_BEYONDSCOPE:
3750 			case ICMP6_DST_UNREACH_ADDR:
3751 				code = ICMP_UNREACH_HOST;
3752 				break;
3753 			case ICMP6_DST_UNREACH_ADMIN:
3754 				code = ICMP_UNREACH_HOST_PROHIB;
3755 				break;
3756 			case ICMP6_DST_UNREACH_NOPORT:
3757 				code = ICMP_UNREACH_PORT;
3758 				break;
3759 			default:
3760 				return (-1);
3761 			}
3762 			break;
3763 		case ICMP6_PACKET_TOO_BIG:
3764 			type = ICMP_UNREACH;
3765 			code = ICMP_UNREACH_NEEDFRAG;
3766 			mtu -= 20;
3767 			break;
3768 		case ICMP6_TIME_EXCEEDED:
3769 			type = ICMP_TIMXCEED;
3770 			break;
3771 		case ICMP6_PARAM_PROB:
3772 			switch (code) {
3773 			case ICMP6_PARAMPROB_HEADER:
3774 				type = ICMP_PARAMPROB;
3775 				code = ICMP_PARAMPROB_ERRATPTR;
3776 				ptr = ntohl(icmp6->icmp6_pptr);
3777 
3778 				if (ptr == PTR_IP6(ip6_vfc))
3779 					; /* preserve */
3780 				else if (ptr == PTR_IP6(ip6_vfc) + 1)
3781 					ptr = PTR_IP(ip_tos);
3782 				else if (ptr == PTR_IP6(ip6_plen) ||
3783 				    ptr == PTR_IP6(ip6_plen) + 1)
3784 					ptr = PTR_IP(ip_len);
3785 				else if (ptr == PTR_IP6(ip6_nxt))
3786 					ptr = PTR_IP(ip_p);
3787 				else if (ptr == PTR_IP6(ip6_hlim))
3788 					ptr = PTR_IP(ip_ttl);
3789 				else if (ptr >= PTR_IP6(ip6_src) &&
3790 				    ptr < PTR_IP6(ip6_dst))
3791 					ptr = PTR_IP(ip_src);
3792 				else if (ptr >= PTR_IP6(ip6_dst) &&
3793 				    ptr < sizeof(struct ip6_hdr))
3794 					ptr = PTR_IP(ip_dst);
3795 				else {
3796 					return (-1);
3797 				}
3798 				break;
3799 			case ICMP6_PARAMPROB_NEXTHEADER:
3800 				type = ICMP_UNREACH;
3801 				code = ICMP_UNREACH_PROTOCOL;
3802 				break;
3803 			default:
3804 				return (-1);
3805 			}
3806 			break;
3807 		default:
3808 			return (-1);
3809 		}
3810 		if (icmp6->icmp6_type != type) {
3811 			icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum,
3812 			    icmp6->icmp6_type, type, 0);
3813 			icmp6->icmp6_type = type;
3814 		}
3815 		if (icmp6->icmp6_code != code) {
3816 			icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum,
3817 			    icmp6->icmp6_code, code, 0);
3818 			icmp6->icmp6_code = code;
3819 		}
3820 		if (icmp6->icmp6_mtu != htonl(mtu)) {
3821 			icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum,
3822 			    htons(ntohl(icmp6->icmp6_mtu)), htons(mtu), 0);
3823 			/* aligns well with a icmpv4 nextmtu */
3824 			icmp6->icmp6_mtu = htonl(mtu);
3825 		}
3826 		if (ptr >= 0 && icmp6->icmp6_pptr != htonl(ptr)) {
3827 			icmp6->icmp6_cksum = pf_cksum_fixup(icmp6->icmp6_cksum,
3828 			    htons(ntohl(icmp6->icmp6_pptr)), htons(ptr), 0);
3829 			/* icmpv4 pptr is a one most significant byte */
3830 			icmp6->icmp6_pptr = htonl(ptr << 24);
3831 		}
3832 		break;
3833 	case AF_INET6:
3834 		icmp4 = arg;
3835 		type = icmp4->icmp_type;
3836 		code = icmp4->icmp_code;
3837 		mtu = ntohs(icmp4->icmp_nextmtu);
3838 
3839 		switch (type) {
3840 		case ICMP_ECHO:
3841 			type = ICMP6_ECHO_REQUEST;
3842 			break;
3843 		case ICMP_ECHOREPLY:
3844 			type = ICMP6_ECHO_REPLY;
3845 			break;
3846 		case ICMP_UNREACH:
3847 			type = ICMP6_DST_UNREACH;
3848 			switch (code) {
3849 			case ICMP_UNREACH_NET:
3850 			case ICMP_UNREACH_HOST:
3851 			case ICMP_UNREACH_NET_UNKNOWN:
3852 			case ICMP_UNREACH_HOST_UNKNOWN:
3853 			case ICMP_UNREACH_ISOLATED:
3854 			case ICMP_UNREACH_TOSNET:
3855 			case ICMP_UNREACH_TOSHOST:
3856 				code = ICMP6_DST_UNREACH_NOROUTE;
3857 				break;
3858 			case ICMP_UNREACH_PORT:
3859 				code = ICMP6_DST_UNREACH_NOPORT;
3860 				break;
3861 			case ICMP_UNREACH_NET_PROHIB:
3862 			case ICMP_UNREACH_HOST_PROHIB:
3863 			case ICMP_UNREACH_FILTER_PROHIB:
3864 			case ICMP_UNREACH_PRECEDENCE_CUTOFF:
3865 				code = ICMP6_DST_UNREACH_ADMIN;
3866 				break;
3867 			case ICMP_UNREACH_PROTOCOL:
3868 				type = ICMP6_PARAM_PROB;
3869 				code = ICMP6_PARAMPROB_NEXTHEADER;
3870 				ptr = offsetof(struct ip6_hdr, ip6_nxt);
3871 				break;
3872 			case ICMP_UNREACH_NEEDFRAG:
3873 				type = ICMP6_PACKET_TOO_BIG;
3874 				code = 0;
3875 				mtu += 20;
3876 				break;
3877 			default:
3878 				return (-1);
3879 			}
3880 			break;
3881 		case ICMP_TIMXCEED:
3882 			type = ICMP6_TIME_EXCEEDED;
3883 			break;
3884 		case ICMP_PARAMPROB:
3885 			type = ICMP6_PARAM_PROB;
3886 			switch (code) {
3887 			case ICMP_PARAMPROB_ERRATPTR:
3888 				code = ICMP6_PARAMPROB_HEADER;
3889 				break;
3890 			case ICMP_PARAMPROB_LENGTH:
3891 				code = ICMP6_PARAMPROB_HEADER;
3892 				break;
3893 			default:
3894 				return (-1);
3895 			}
3896 
3897 			ptr = icmp4->icmp_pptr;
3898 			if (ptr == 0 || ptr == PTR_IP(ip_tos))
3899 				; /* preserve */
3900 			else if (ptr == PTR_IP(ip_len) ||
3901 			    ptr == PTR_IP(ip_len) + 1)
3902 				ptr = PTR_IP6(ip6_plen);
3903 			else if (ptr == PTR_IP(ip_ttl))
3904 				ptr = PTR_IP6(ip6_hlim);
3905 			else if (ptr == PTR_IP(ip_p))
3906 				ptr = PTR_IP6(ip6_nxt);
3907 			else if (ptr >= PTR_IP(ip_src) && ptr < PTR_IP(ip_dst))
3908 				ptr = PTR_IP6(ip6_src);
3909 			else if (ptr >= PTR_IP(ip_dst) &&
3910 			    ptr < sizeof(struct ip))
3911 				ptr = PTR_IP6(ip6_dst);
3912 			else {
3913 				return (-1);
3914 			}
3915 			break;
3916 		default:
3917 			return (-1);
3918 		}
3919 		if (icmp4->icmp_type != type) {
3920 			icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum,
3921 			    icmp4->icmp_type, type, 0);
3922 			icmp4->icmp_type = type;
3923 		}
3924 		if (icmp4->icmp_code != code) {
3925 			icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum,
3926 			    icmp4->icmp_code, code, 0);
3927 			icmp4->icmp_code = code;
3928 		}
3929 		if (icmp4->icmp_nextmtu != htons(mtu)) {
3930 			icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum,
3931 			    icmp4->icmp_nextmtu, htons(mtu), 0);
3932 			icmp4->icmp_nextmtu = htons(mtu);
3933 		}
3934 		if (ptr >= 0 && icmp4->icmp_void != ptr) {
3935 			icmp4->icmp_cksum = pf_cksum_fixup(icmp4->icmp_cksum,
3936 			    htons(icmp4->icmp_pptr), htons(ptr), 0);
3937 			icmp4->icmp_void = htonl(ptr);
3938 		}
3939 		break;
3940 	default:
3941 		unhandled_af(af);
3942 	}
3943 #endif /* INET && INET6 */
3944 
3945 	return (0);
3946 }
3947 
3948 /*
3949  * Need to modulate the sequence numbers in the TCP SACK option
3950  * (credits to Krzysztof Pfaff for report and patch)
3951  */
3952 static int
pf_modulate_sack(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * dst)3953 pf_modulate_sack(struct pf_pdesc *pd, struct tcphdr *th,
3954     struct pf_state_peer *dst)
3955 {
3956 	struct sackblk	 sack;
3957 	int		 copyback = 0, i;
3958 	int		 olen, optsoff;
3959 	uint8_t		 opts[MAX_TCPOPTLEN], *opt, *eoh;
3960 
3961 	olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3962 	optsoff = pd->off + sizeof(struct tcphdr);
3963 #define	TCPOLEN_MINSACK	(TCPOLEN_SACK + 2)
3964 	if (olen < TCPOLEN_MINSACK ||
3965 	    !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
3966 		return (0);
3967 
3968 	eoh = opts + olen;
3969 	opt = opts;
3970 	while ((opt = pf_find_tcpopt(opt, opts, olen,
3971 	    TCPOPT_SACK, TCPOLEN_MINSACK)) != NULL)
3972 	{
3973 		size_t safelen = MIN(opt[1], (eoh - opt));
3974 		for (i = 2; i + TCPOLEN_SACK <= safelen; i += TCPOLEN_SACK) {
3975 			size_t startoff = (opt + i) - opts;
3976 			memcpy(&sack, &opt[i], sizeof(sack));
3977 			pf_patch_32(pd, &sack.start,
3978 			    htonl(ntohl(sack.start) - dst->seqdiff),
3979 			    PF_ALGNMNT(startoff));
3980 			pf_patch_32(pd, &sack.end,
3981 			    htonl(ntohl(sack.end) - dst->seqdiff),
3982 			    PF_ALGNMNT(startoff + sizeof(sack.start)));
3983 			memcpy(&opt[i], &sack, sizeof(sack));
3984 		}
3985 		copyback = 1;
3986 		opt += opt[1];
3987 	}
3988 
3989 	if (copyback)
3990 		m_copyback(pd->m, optsoff, olen, (caddr_t)opts);
3991 
3992 	return (copyback);
3993 }
3994 
3995 struct mbuf *
pf_build_tcp(const struct pf_krule * r,sa_family_t af,const struct pf_addr * saddr,const struct pf_addr * daddr,u_int16_t sport,u_int16_t dport,u_int32_t seq,u_int32_t ack,u_int8_t tcp_flags,u_int16_t win,u_int16_t mss,u_int8_t ttl,int mbuf_flags,u_int16_t mtag_tag,u_int16_t mtag_flags,u_int sack,int rtableid)3996 pf_build_tcp(const struct pf_krule *r, sa_family_t af,
3997     const struct pf_addr *saddr, const struct pf_addr *daddr,
3998     u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3999     u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
4000     int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, u_int sack,
4001     int rtableid)
4002 {
4003 	struct mbuf	*m;
4004 	int		 len, tlen;
4005 #ifdef INET
4006 	struct ip	*h = NULL;
4007 #endif /* INET */
4008 #ifdef INET6
4009 	struct ip6_hdr	*h6 = NULL;
4010 #endif /* INET6 */
4011 	struct tcphdr	*th;
4012 	char		*opt;
4013 	struct pf_mtag  *pf_mtag;
4014 
4015 	len = 0;
4016 	th = NULL;
4017 
4018 	/* maximum segment size tcp option */
4019 	tlen = sizeof(struct tcphdr);
4020 	if (mss)
4021 		tlen += 4;
4022 	if (sack)
4023 		tlen += 2;
4024 
4025 	switch (af) {
4026 #ifdef INET
4027 	case AF_INET:
4028 		len = sizeof(struct ip) + tlen;
4029 		break;
4030 #endif /* INET */
4031 #ifdef INET6
4032 	case AF_INET6:
4033 		len = sizeof(struct ip6_hdr) + tlen;
4034 		break;
4035 #endif /* INET6 */
4036 	default:
4037 		unhandled_af(af);
4038 	}
4039 
4040 	m = m_gethdr(M_NOWAIT, MT_DATA);
4041 	if (m == NULL)
4042 		return (NULL);
4043 
4044 #ifdef MAC
4045 	mac_netinet_firewall_send(m);
4046 #endif
4047 	if ((pf_mtag = pf_get_mtag(m)) == NULL) {
4048 		m_freem(m);
4049 		return (NULL);
4050 	}
4051 	m->m_flags |= mbuf_flags;
4052 	pf_mtag->tag = mtag_tag;
4053 	pf_mtag->flags = mtag_flags;
4054 
4055 	if (rtableid >= 0)
4056 		M_SETFIB(m, rtableid);
4057 
4058 #ifdef ALTQ
4059 	if (r != NULL && r->qid) {
4060 		pf_mtag->qid = r->qid;
4061 
4062 		/* add hints for ecn */
4063 		pf_mtag->hdr = mtod(m, struct ip *);
4064 	}
4065 #endif /* ALTQ */
4066 	m->m_data += max_linkhdr;
4067 	m->m_pkthdr.len = m->m_len = len;
4068 	/* The rest of the stack assumes a rcvif, so provide one.
4069 	 * This is a locally generated packet, so .. close enough. */
4070 	m->m_pkthdr.rcvif = V_loif;
4071 	bzero(m->m_data, len);
4072 	switch (af) {
4073 #ifdef INET
4074 	case AF_INET:
4075 		m->m_pkthdr.csum_flags |= CSUM_TCP;
4076 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
4077 
4078 		h = mtod(m, struct ip *);
4079 
4080 		h->ip_p = IPPROTO_TCP;
4081 		h->ip_len = htons(tlen);
4082 		h->ip_v = 4;
4083 		h->ip_hl = sizeof(*h) >> 2;
4084 		h->ip_tos = IPTOS_LOWDELAY;
4085 		h->ip_len = htons(len);
4086 		h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
4087 		h->ip_ttl = ttl ? ttl : V_ip_defttl;
4088 		h->ip_sum = 0;
4089 		h->ip_src.s_addr = saddr->v4.s_addr;
4090 		h->ip_dst.s_addr = daddr->v4.s_addr;
4091 
4092 		th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
4093 		th->th_sum = in_pseudo(h->ip_src.s_addr, h->ip_dst.s_addr,
4094 		    htons(len - sizeof(struct ip) + IPPROTO_TCP));
4095 		break;
4096 #endif /* INET */
4097 #ifdef INET6
4098 	case AF_INET6:
4099 		m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
4100 		m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
4101 
4102 		h6 = mtod(m, struct ip6_hdr *);
4103 
4104 		/* IP header fields included in the TCP checksum */
4105 		h6->ip6_nxt = IPPROTO_TCP;
4106 		h6->ip6_plen = htons(tlen);
4107 		h6->ip6_vfc |= IPV6_VERSION;
4108 		h6->ip6_hlim = V_ip6_defhlim;
4109 		memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
4110 		memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
4111 
4112 		th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
4113 		th->th_sum = in6_cksum_pseudo(h6, len - sizeof(struct ip6_hdr),
4114 		    IPPROTO_TCP, 0);
4115 		break;
4116 #endif /* INET6 */
4117 	}
4118 
4119 	/* TCP header */
4120 	th->th_sport = sport;
4121 	th->th_dport = dport;
4122 	th->th_seq = htonl(seq);
4123 	th->th_ack = htonl(ack);
4124 	th->th_off = tlen >> 2;
4125 	tcp_set_flags(th, tcp_flags);
4126 	th->th_win = htons(win);
4127 
4128 	opt = (char *)(th + 1);
4129 	if (mss) {
4130 		opt = (char *)(th + 1);
4131 		opt[0] = TCPOPT_MAXSEG;
4132 		opt[1] = 4;
4133 		mss = htons(mss);
4134 		memcpy((opt + 2), &mss, 2);
4135 		opt += 4;
4136 	}
4137 	if (sack) {
4138 		opt[0] = TCPOPT_SACK_PERMITTED;
4139 		opt[1] = 2;
4140 		opt += 2;
4141 	}
4142 
4143 	return (m);
4144 }
4145 
4146 static void
pf_send_sctp_abort(sa_family_t af,struct pf_pdesc * pd,uint8_t ttl,int rtableid)4147 pf_send_sctp_abort(sa_family_t af, struct pf_pdesc *pd,
4148     uint8_t ttl, int rtableid)
4149 {
4150 	struct mbuf		*m;
4151 #ifdef INET
4152 	struct ip		*h = NULL;
4153 #endif /* INET */
4154 #ifdef INET6
4155 	struct ip6_hdr		*h6 = NULL;
4156 #endif /* INET6 */
4157 	struct sctphdr		*hdr;
4158 	struct sctp_chunkhdr	*chunk;
4159 	struct pf_send_entry	*pfse;
4160 	int			 off = 0;
4161 
4162 	MPASS(af == pd->af);
4163 
4164 	m = m_gethdr(M_NOWAIT, MT_DATA);
4165 	if (m == NULL)
4166 		return;
4167 
4168 	m->m_data += max_linkhdr;
4169 	m->m_flags |= M_SKIP_FIREWALL;
4170 	/* The rest of the stack assumes a rcvif, so provide one.
4171 	 * This is a locally generated packet, so .. close enough. */
4172 	m->m_pkthdr.rcvif = V_loif;
4173 
4174 	/* IPv4|6 header */
4175 	switch (af) {
4176 #ifdef INET
4177 	case AF_INET:
4178 		bzero(m->m_data, sizeof(struct ip) + sizeof(*hdr) + sizeof(*chunk));
4179 
4180 		h = mtod(m, struct ip *);
4181 
4182 		/* IP header fields included in the TCP checksum */
4183 
4184 		h->ip_p = IPPROTO_SCTP;
4185 		h->ip_len = htons(sizeof(*h) + sizeof(*hdr) + sizeof(*chunk));
4186 		h->ip_ttl = ttl ? ttl : V_ip_defttl;
4187 		h->ip_src = pd->dst->v4;
4188 		h->ip_dst = pd->src->v4;
4189 
4190 		off += sizeof(struct ip);
4191 		break;
4192 #endif /* INET */
4193 #ifdef INET6
4194 	case AF_INET6:
4195 		bzero(m->m_data, sizeof(struct ip6_hdr) + sizeof(*hdr) + sizeof(*chunk));
4196 
4197 		h6 = mtod(m, struct ip6_hdr *);
4198 
4199 		/* IP header fields included in the TCP checksum */
4200 		h6->ip6_vfc |= IPV6_VERSION;
4201 		h6->ip6_nxt = IPPROTO_SCTP;
4202 		h6->ip6_plen = htons(sizeof(*h6) + sizeof(*hdr) + sizeof(*chunk));
4203 		h6->ip6_hlim = ttl ? ttl : V_ip6_defhlim;
4204 		memcpy(&h6->ip6_src, &pd->dst->v6, sizeof(struct in6_addr));
4205 		memcpy(&h6->ip6_dst, &pd->src->v6, sizeof(struct in6_addr));
4206 
4207 		off += sizeof(struct ip6_hdr);
4208 		break;
4209 #endif /* INET6 */
4210 	default:
4211 		unhandled_af(af);
4212 	}
4213 
4214 	/* SCTP header */
4215 	hdr = mtodo(m, off);
4216 
4217 	hdr->src_port = pd->hdr.sctp.dest_port;
4218 	hdr->dest_port = pd->hdr.sctp.src_port;
4219 	hdr->v_tag = pd->sctp_initiate_tag;
4220 	hdr->checksum = 0;
4221 
4222 	/* Abort chunk. */
4223 	off += sizeof(struct sctphdr);
4224 	chunk = mtodo(m, off);
4225 
4226 	chunk->chunk_type = SCTP_ABORT_ASSOCIATION;
4227 	chunk->chunk_length = htons(sizeof(*chunk));
4228 
4229 	/* SCTP checksum */
4230 	off += sizeof(*chunk);
4231 	m->m_pkthdr.len = m->m_len = off;
4232 
4233 	pf_sctp_checksum(m, off - sizeof(*hdr) - sizeof(*chunk));
4234 
4235 	if (rtableid >= 0)
4236 		M_SETFIB(m, rtableid);
4237 
4238 	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
4239 	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
4240 	if (pfse == NULL) {
4241 		m_freem(m);
4242 		return;
4243 	}
4244 
4245 	switch (af) {
4246 #ifdef INET
4247 	case AF_INET:
4248 		pfse->pfse_type = PFSE_IP;
4249 		break;
4250 #endif /* INET */
4251 #ifdef INET6
4252 	case AF_INET6:
4253 		pfse->pfse_type = PFSE_IP6;
4254 		break;
4255 #endif /* INET6 */
4256 	}
4257 
4258 	pfse->pfse_m = m;
4259 	pf_send(pfse);
4260 }
4261 
4262 void
pf_send_tcp(const struct pf_krule * r,sa_family_t af,const struct pf_addr * saddr,const struct pf_addr * daddr,u_int16_t sport,u_int16_t dport,u_int32_t seq,u_int32_t ack,u_int8_t tcp_flags,u_int16_t win,u_int16_t mss,u_int8_t ttl,int mbuf_flags,u_int16_t mtag_tag,u_int16_t mtag_flags,int rtableid)4263 pf_send_tcp(const struct pf_krule *r, sa_family_t af,
4264     const struct pf_addr *saddr, const struct pf_addr *daddr,
4265     u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
4266     u_int8_t tcp_flags, u_int16_t win, u_int16_t mss, u_int8_t ttl,
4267     int mbuf_flags, u_int16_t mtag_tag, u_int16_t mtag_flags, int rtableid)
4268 {
4269 	struct pf_send_entry *pfse;
4270 	struct mbuf	*m;
4271 
4272 	m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, tcp_flags,
4273 	    win, mss, ttl, mbuf_flags, mtag_tag, mtag_flags, 0, rtableid);
4274 	if (m == NULL)
4275 		return;
4276 
4277 	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
4278 	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
4279 	if (pfse == NULL) {
4280 		m_freem(m);
4281 		return;
4282 	}
4283 
4284 	switch (af) {
4285 #ifdef INET
4286 	case AF_INET:
4287 		pfse->pfse_type = PFSE_IP;
4288 		break;
4289 #endif /* INET */
4290 #ifdef INET6
4291 	case AF_INET6:
4292 		pfse->pfse_type = PFSE_IP6;
4293 		break;
4294 #endif /* INET6 */
4295 	default:
4296 		unhandled_af(af);
4297 	}
4298 
4299 	pfse->pfse_m = m;
4300 	pf_send(pfse);
4301 }
4302 
4303 static void
pf_undo_nat(struct pf_krule * nr,struct pf_pdesc * pd,uint16_t bip_sum)4304 pf_undo_nat(struct pf_krule *nr, struct pf_pdesc *pd, uint16_t bip_sum)
4305 {
4306 	/* undo NAT changes, if they have taken place */
4307 	if (nr != NULL) {
4308 		pf_addrcpy(pd->src, &pd->osrc, pd->af);
4309 		pf_addrcpy(pd->dst, &pd->odst, pd->af);
4310 		if (pd->sport)
4311 			*pd->sport = pd->osport;
4312 		if (pd->dport)
4313 			*pd->dport = pd->odport;
4314 		if (pd->ip_sum)
4315 			*pd->ip_sum = bip_sum;
4316 		m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
4317 	}
4318 }
4319 
4320 static void
pf_return(struct pf_krule * r,struct pf_krule * nr,struct pf_pdesc * pd,struct tcphdr * th,u_int16_t bproto_sum,u_int16_t bip_sum,u_short * reason,int rtableid)4321 pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
4322     struct tcphdr *th, u_int16_t bproto_sum, u_int16_t bip_sum,
4323     u_short *reason, int rtableid)
4324 {
4325 	pf_undo_nat(nr, pd, bip_sum);
4326 
4327 	if (pd->proto == IPPROTO_TCP &&
4328 	    ((r->rule_flag & PFRULE_RETURNRST) ||
4329 	    (r->rule_flag & PFRULE_RETURN)) &&
4330 	    !(tcp_get_flags(th) & TH_RST)) {
4331 		u_int32_t	 ack = ntohl(th->th_seq) + pd->p_len;
4332 
4333 		if (pf_check_proto_cksum(pd->m, pd->off, pd->tot_len - pd->off,
4334 		    IPPROTO_TCP, pd->af))
4335 			REASON_SET(reason, PFRES_PROTCKSUM);
4336 		else {
4337 			if (tcp_get_flags(th) & TH_SYN)
4338 				ack++;
4339 			if (tcp_get_flags(th) & TH_FIN)
4340 				ack++;
4341 			pf_send_tcp(r, pd->af, pd->dst,
4342 				pd->src, th->th_dport, th->th_sport,
4343 				ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
4344 				r->return_ttl, M_SKIP_FIREWALL, 0, 0, rtableid);
4345 		}
4346 	} else if (pd->proto == IPPROTO_SCTP &&
4347 	    (r->rule_flag & PFRULE_RETURN)) {
4348 		pf_send_sctp_abort(pd->af, pd, r->return_ttl, rtableid);
4349 	} else if (pd->proto != IPPROTO_ICMP && pd->af == AF_INET &&
4350 		r->return_icmp)
4351 		pf_send_icmp(pd->m, r->return_icmp >> 8,
4352 			r->return_icmp & 255, 0, pd->af, r, rtableid);
4353 	else if (pd->proto != IPPROTO_ICMPV6 && pd->af == AF_INET6 &&
4354 		r->return_icmp6)
4355 		pf_send_icmp(pd->m, r->return_icmp6 >> 8,
4356 			r->return_icmp6 & 255, 0, pd->af, r, rtableid);
4357 }
4358 
4359 static int
pf_match_ieee8021q_pcp(u_int8_t prio,struct mbuf * m)4360 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
4361 {
4362 	struct m_tag *mtag;
4363 	u_int8_t mpcp;
4364 
4365 	mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
4366 	if (mtag == NULL)
4367 		return (0);
4368 
4369 	if (prio == PF_PRIO_ZERO)
4370 		prio = 0;
4371 
4372 	mpcp = *(uint8_t *)(mtag + 1);
4373 
4374 	return (mpcp == prio);
4375 }
4376 
4377 static int
pf_icmp_to_bandlim(uint8_t type)4378 pf_icmp_to_bandlim(uint8_t type)
4379 {
4380 	switch (type) {
4381 		case ICMP_ECHO:
4382 		case ICMP_ECHOREPLY:
4383 			return (BANDLIM_ICMP_ECHO);
4384 		case ICMP_TSTAMP:
4385 		case ICMP_TSTAMPREPLY:
4386 			return (BANDLIM_ICMP_TSTAMP);
4387 		case ICMP_UNREACH:
4388 		default:
4389 			return (BANDLIM_ICMP_UNREACH);
4390 	}
4391 }
4392 
4393 static void
pf_send_challenge_ack(struct pf_pdesc * pd,struct pf_kstate * s,struct pf_state_peer * src,struct pf_state_peer * dst)4394 pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_kstate *s,
4395     struct pf_state_peer *src, struct pf_state_peer *dst)
4396 {
4397 	/*
4398 	 * We are sending challenge ACK as a response to SYN packet, which
4399 	 * matches existing state (modulo TCP window check). Therefore packet
4400 	 * must be sent on behalf of destination.
4401 	 *
4402 	 * We expect sender to remain either silent, or send RST packet
4403 	 * so both, firewall and remote peer, can purge dead state from
4404 	 * memory.
4405 	 */
4406 	pf_send_tcp(s->rule, pd->af, pd->dst, pd->src,
4407 	    pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo,
4408 	    src->seqlo, TH_ACK, 0, 0, s->rule->return_ttl, 0, 0, 0,
4409 	    s->rule->rtableid);
4410 }
4411 
4412 static void
pf_send_icmp(struct mbuf * m,u_int8_t type,u_int8_t code,int mtu,sa_family_t af,struct pf_krule * r,int rtableid)4413 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, int mtu,
4414     sa_family_t af, struct pf_krule *r, int rtableid)
4415 {
4416 	struct pf_send_entry *pfse;
4417 	struct mbuf *m0;
4418 	struct pf_mtag *pf_mtag;
4419 
4420 	/* ICMP packet rate limitation. */
4421 	switch (af) {
4422 #ifdef INET6
4423 	case AF_INET6:
4424 		if (icmp6_ratelimit(NULL, type, code))
4425 			return;
4426 		break;
4427 #endif /* INET6 */
4428 #ifdef INET
4429 	case AF_INET:
4430 		if (badport_bandlim(pf_icmp_to_bandlim(type)) != 0)
4431 			return;
4432 		break;
4433 #endif /* INET */
4434 	}
4435 
4436 	/* Allocate outgoing queue entry, mbuf and mbuf tag. */
4437 	pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
4438 	if (pfse == NULL)
4439 		return;
4440 
4441 	if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
4442 		free(pfse, M_PFTEMP);
4443 		return;
4444 	}
4445 
4446 	if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
4447 		free(pfse, M_PFTEMP);
4448 		return;
4449 	}
4450 	/* XXX: revisit */
4451 	m0->m_flags |= M_SKIP_FIREWALL;
4452 
4453 	if (rtableid >= 0)
4454 		M_SETFIB(m0, rtableid);
4455 
4456 #ifdef ALTQ
4457 	if (r->qid) {
4458 		pf_mtag->qid = r->qid;
4459 		/* add hints for ecn */
4460 		pf_mtag->hdr = mtod(m0, struct ip *);
4461 	}
4462 #endif /* ALTQ */
4463 
4464 	switch (af) {
4465 #ifdef INET
4466 	case AF_INET:
4467 		pfse->pfse_type = PFSE_ICMP;
4468 		break;
4469 #endif /* INET */
4470 #ifdef INET6
4471 	case AF_INET6:
4472 		pfse->pfse_type = PFSE_ICMP6;
4473 		break;
4474 #endif /* INET6 */
4475 	}
4476 	pfse->pfse_m = m0;
4477 	pfse->icmpopts.type = type;
4478 	pfse->icmpopts.code = code;
4479 	pfse->icmpopts.mtu = mtu;
4480 	pf_send(pfse);
4481 }
4482 
4483 /*
4484  * Return ((n = 0) == (a = b [with mask m]))
4485  * Note: n != 0 => returns (a != b [with mask m])
4486  */
4487 int
pf_match_addr(u_int8_t n,const struct pf_addr * a,const struct pf_addr * m,const struct pf_addr * b,sa_family_t af)4488 pf_match_addr(u_int8_t n, const struct pf_addr *a, const struct pf_addr *m,
4489     const struct pf_addr *b, sa_family_t af)
4490 {
4491 	switch (af) {
4492 #ifdef INET
4493 	case AF_INET:
4494 		if (IN_ARE_MASKED_ADDR_EQUAL(a->v4, b->v4, m->v4))
4495 			return (n == 0);
4496 		break;
4497 #endif /* INET */
4498 #ifdef INET6
4499 	case AF_INET6:
4500 		if (IN6_ARE_MASKED_ADDR_EQUAL(&a->v6, &b->v6, &m->v6))
4501 			return (n == 0);
4502 		break;
4503 #endif /* INET6 */
4504 	}
4505 
4506 	return (n != 0);
4507 }
4508 
4509 /*
4510  * Return 1 if b <= a <= e, otherwise return 0.
4511  */
4512 int
pf_match_addr_range(const struct pf_addr * b,const struct pf_addr * e,const struct pf_addr * a,sa_family_t af)4513 pf_match_addr_range(const struct pf_addr *b, const struct pf_addr *e,
4514     const struct pf_addr *a, sa_family_t af)
4515 {
4516 	switch (af) {
4517 #ifdef INET
4518 	case AF_INET:
4519 		if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
4520 		    (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
4521 			return (0);
4522 		break;
4523 #endif /* INET */
4524 #ifdef INET6
4525 	case AF_INET6: {
4526 		int	i;
4527 
4528 		/* check a >= b */
4529 		for (i = 0; i < 4; ++i)
4530 			if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
4531 				break;
4532 			else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
4533 				return (0);
4534 		/* check a <= e */
4535 		for (i = 0; i < 4; ++i)
4536 			if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
4537 				break;
4538 			else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
4539 				return (0);
4540 		break;
4541 	}
4542 #endif /* INET6 */
4543 	}
4544 	return (1);
4545 }
4546 
4547 static int
pf_match(u_int8_t op,u_int32_t a1,u_int32_t a2,u_int32_t p)4548 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
4549 {
4550 	switch (op) {
4551 	case PF_OP_IRG:
4552 		return ((p > a1) && (p < a2));
4553 	case PF_OP_XRG:
4554 		return ((p < a1) || (p > a2));
4555 	case PF_OP_RRG:
4556 		return ((p >= a1) && (p <= a2));
4557 	case PF_OP_EQ:
4558 		return (p == a1);
4559 	case PF_OP_NE:
4560 		return (p != a1);
4561 	case PF_OP_LT:
4562 		return (p < a1);
4563 	case PF_OP_LE:
4564 		return (p <= a1);
4565 	case PF_OP_GT:
4566 		return (p > a1);
4567 	case PF_OP_GE:
4568 		return (p >= a1);
4569 	}
4570 	return (0); /* never reached */
4571 }
4572 
4573 int
pf_match_port(u_int8_t op,u_int16_t a1,u_int16_t a2,u_int16_t p)4574 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
4575 {
4576 	return (pf_match(op, ntohs(a1), ntohs(a2), ntohs(p)));
4577 }
4578 
4579 static int
pf_match_uid(u_int8_t op,uid_t a1,uid_t a2,uid_t u)4580 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
4581 {
4582 	if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
4583 		return (0);
4584 	return (pf_match(op, a1, a2, u));
4585 }
4586 
4587 static int
pf_match_gid(u_int8_t op,gid_t a1,gid_t a2,gid_t g)4588 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
4589 {
4590 	if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
4591 		return (0);
4592 	return (pf_match(op, a1, a2, g));
4593 }
4594 
4595 int
pf_match_tag(struct mbuf * m,struct pf_krule * r,int * tag,int mtag)4596 pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
4597 {
4598 	if (*tag == -1)
4599 		*tag = mtag;
4600 
4601 	return ((!r->match_tag_not && r->match_tag == *tag) ||
4602 	    (r->match_tag_not && r->match_tag != *tag));
4603 }
4604 
4605 static int
pf_match_rcvif(struct mbuf * m,struct pf_krule * r)4606 pf_match_rcvif(struct mbuf *m, struct pf_krule *r)
4607 {
4608 	struct ifnet *ifp = m->m_pkthdr.rcvif;
4609 	struct pfi_kkif *kif;
4610 
4611 	if (ifp == NULL)
4612 		return (0);
4613 
4614 	kif = (struct pfi_kkif *)ifp->if_pf_kif;
4615 
4616 	if (kif == NULL) {
4617 		DPFPRINTF(PF_DEBUG_URGENT,
4618 		    ("%s: kif == NULL, @%d via %s\n", __func__, r->nr,
4619 			r->rcv_ifname));
4620 		return (0);
4621 	}
4622 
4623 	return (pfi_kkif_match(r->rcv_kif, kif));
4624 }
4625 
4626 int
pf_tag_packet(struct pf_pdesc * pd,int tag)4627 pf_tag_packet(struct pf_pdesc *pd, int tag)
4628 {
4629 
4630 	KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
4631 
4632 	if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(pd->m)) == NULL))
4633 		return (ENOMEM);
4634 
4635 	pd->pf_mtag->tag = tag;
4636 
4637 	return (0);
4638 }
4639 
4640 /*
4641  * XXX: We rely on malloc(9) returning pointer aligned addresses.
4642  */
4643 #define	PF_ANCHORSTACK_MATCH	0x00000001
4644 #define	PF_ANCHORSTACK_MASK	(PF_ANCHORSTACK_MATCH)
4645 
4646 #define	PF_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
4647 #define	PF_ANCHOR_RULE(f)	(struct pf_krule *)			\
4648 				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
4649 #define	PF_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 			\
4650 				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
4651 } while (0)
4652 
4653 enum pf_test_status
pf_step_into_anchor(struct pf_test_ctx * ctx,struct pf_krule * r)4654 pf_step_into_anchor(struct pf_test_ctx *ctx, struct pf_krule *r)
4655 {
4656 	enum pf_test_status	rv;
4657 
4658 	PF_RULES_RASSERT();
4659 
4660 	if (ctx->depth >= PF_ANCHOR_STACK_MAX) {
4661 		printf("%s: anchor stack overflow on %s\n",
4662 		    __func__, r->anchor->name);
4663 		return (PF_TEST_FAIL);
4664 	}
4665 
4666 	ctx->depth++;
4667 
4668 	if (r->anchor_wildcard) {
4669 		struct pf_kanchor *child;
4670 		rv = PF_TEST_OK;
4671 		RB_FOREACH(child, pf_kanchor_node, &r->anchor->children) {
4672 			rv = pf_match_rule(ctx, &child->ruleset);
4673 			if ((rv == PF_TEST_QUICK) || (rv == PF_TEST_FAIL)) {
4674 				/*
4675 				 * we either hit a rule with quick action
4676 				 * (more likely), or hit some runtime
4677 				 * error (e.g. pool_get() failure).
4678 				 */
4679 				break;
4680 			}
4681 		}
4682 	} else {
4683 		rv = pf_match_rule(ctx, &r->anchor->ruleset);
4684 		/*
4685 		 * Unless errors occured, stop iff any rule matched
4686 		 * within quick anchors.
4687 		 */
4688 		if (rv != PF_TEST_FAIL && r->quick == PF_TEST_QUICK &&
4689 		    *ctx->am == r)
4690 			rv = PF_TEST_QUICK;
4691 	}
4692 
4693 	ctx->depth--;
4694 
4695 	return (rv);
4696 }
4697 
4698 struct pf_keth_anchor_stackframe {
4699 	struct pf_keth_ruleset	*rs;
4700 	struct pf_keth_rule	*r;	/* XXX: + match bit */
4701 	struct pf_keth_anchor	*child;
4702 };
4703 
4704 #define	PF_ETH_ANCHOR_MATCH(f)	((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
4705 #define	PF_ETH_ANCHOR_RULE(f)	(struct pf_keth_rule *)			\
4706 				((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
4707 #define	PF_ETH_ANCHOR_SET_MATCH(f)	do { (f)->r = (void *) 		\
4708 				((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH);  \
4709 } while (0)
4710 
4711 void
pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe * stack,int * depth,struct pf_keth_ruleset ** rs,struct pf_keth_rule ** r,struct pf_keth_rule ** a,int * match)4712 pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
4713     struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
4714     struct pf_keth_rule **a, int *match)
4715 {
4716 	struct pf_keth_anchor_stackframe	*f;
4717 
4718 	NET_EPOCH_ASSERT();
4719 
4720 	if (match)
4721 		*match = 0;
4722 	if (*depth >= PF_ANCHOR_STACK_MAX) {
4723 		printf("%s: anchor stack overflow on %s\n",
4724 		    __func__, (*r)->anchor->name);
4725 		*r = TAILQ_NEXT(*r, entries);
4726 		return;
4727 	} else if (*depth == 0 && a != NULL)
4728 		*a = *r;
4729 	f = stack + (*depth)++;
4730 	f->rs = *rs;
4731 	f->r = *r;
4732 	if ((*r)->anchor_wildcard) {
4733 		struct pf_keth_anchor_node *parent = &(*r)->anchor->children;
4734 
4735 		if ((f->child = RB_MIN(pf_keth_anchor_node, parent)) == NULL) {
4736 			*r = NULL;
4737 			return;
4738 		}
4739 		*rs = &f->child->ruleset;
4740 	} else {
4741 		f->child = NULL;
4742 		*rs = &(*r)->anchor->ruleset;
4743 	}
4744 	*r = TAILQ_FIRST((*rs)->active.rules);
4745 }
4746 
4747 int
pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe * stack,int * depth,struct pf_keth_ruleset ** rs,struct pf_keth_rule ** r,struct pf_keth_rule ** a,int * match)4748 pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
4749     struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
4750     struct pf_keth_rule **a, int *match)
4751 {
4752 	struct pf_keth_anchor_stackframe	*f;
4753 	struct pf_keth_rule *fr;
4754 	int quick = 0;
4755 
4756 	NET_EPOCH_ASSERT();
4757 
4758 	do {
4759 		if (*depth <= 0)
4760 			break;
4761 		f = stack + *depth - 1;
4762 		fr = PF_ETH_ANCHOR_RULE(f);
4763 		if (f->child != NULL) {
4764 			/*
4765 			 * This block traverses through
4766 			 * a wildcard anchor.
4767 			 */
4768 			if (match != NULL && *match) {
4769 				/*
4770 				 * If any of "*" matched, then
4771 				 * "foo/ *" matched, mark frame
4772 				 * appropriately.
4773 				 */
4774 				PF_ETH_ANCHOR_SET_MATCH(f);
4775 				*match = 0;
4776 			}
4777 			f->child = RB_NEXT(pf_keth_anchor_node,
4778 			    &fr->anchor->children, f->child);
4779 			if (f->child != NULL) {
4780 				*rs = &f->child->ruleset;
4781 				*r = TAILQ_FIRST((*rs)->active.rules);
4782 				if (*r == NULL)
4783 					continue;
4784 				else
4785 					break;
4786 			}
4787 		}
4788 		(*depth)--;
4789 		if (*depth == 0 && a != NULL)
4790 			*a = NULL;
4791 		*rs = f->rs;
4792 		if (PF_ETH_ANCHOR_MATCH(f) || (match != NULL && *match))
4793 			quick = fr->quick;
4794 		*r = TAILQ_NEXT(fr, entries);
4795 	} while (*r == NULL);
4796 
4797 	return (quick);
4798 }
4799 
4800 void
pf_poolmask(struct pf_addr * naddr,struct pf_addr * raddr,struct pf_addr * rmask,struct pf_addr * saddr,sa_family_t af)4801 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
4802     struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
4803 {
4804 	switch (af) {
4805 #ifdef INET
4806 	case AF_INET:
4807 		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
4808 		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
4809 		break;
4810 #endif /* INET */
4811 #ifdef INET6
4812 	case AF_INET6:
4813 		naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
4814 		((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
4815 		naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
4816 		((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
4817 		naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
4818 		((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
4819 		naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
4820 		((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
4821 		break;
4822 #endif /* INET6 */
4823 	}
4824 }
4825 
4826 void
pf_addr_inc(struct pf_addr * addr,sa_family_t af)4827 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
4828 {
4829 	switch (af) {
4830 #ifdef INET
4831 	case AF_INET:
4832 		addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
4833 		break;
4834 #endif /* INET */
4835 #ifdef INET6
4836 	case AF_INET6:
4837 		if (addr->addr32[3] == 0xffffffff) {
4838 			addr->addr32[3] = 0;
4839 			if (addr->addr32[2] == 0xffffffff) {
4840 				addr->addr32[2] = 0;
4841 				if (addr->addr32[1] == 0xffffffff) {
4842 					addr->addr32[1] = 0;
4843 					addr->addr32[0] =
4844 					    htonl(ntohl(addr->addr32[0]) + 1);
4845 				} else
4846 					addr->addr32[1] =
4847 					    htonl(ntohl(addr->addr32[1]) + 1);
4848 			} else
4849 				addr->addr32[2] =
4850 				    htonl(ntohl(addr->addr32[2]) + 1);
4851 		} else
4852 			addr->addr32[3] =
4853 			    htonl(ntohl(addr->addr32[3]) + 1);
4854 		break;
4855 #endif /* INET6 */
4856 	}
4857 }
4858 
4859 void
pf_rule_to_actions(struct pf_krule * r,struct pf_rule_actions * a)4860 pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
4861 {
4862 	/*
4863 	 * Modern rules use the same flags in rules as they do in states.
4864 	 */
4865 	a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
4866 	    PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
4867 
4868 	/*
4869 	 * Old-style scrub rules have different flags which need to be translated.
4870 	 */
4871 	if (r->rule_flag & PFRULE_RANDOMID)
4872 		a->flags |= PFSTATE_RANDOMID;
4873 	if (r->scrub_flags & PFSTATE_SETTOS || r->rule_flag & PFRULE_SET_TOS ) {
4874 		a->flags |= PFSTATE_SETTOS;
4875 		a->set_tos = r->set_tos;
4876 	}
4877 
4878 	if (r->qid)
4879 		a->qid = r->qid;
4880 	if (r->pqid)
4881 		a->pqid = r->pqid;
4882 	if (r->rtableid >= 0)
4883 		a->rtableid = r->rtableid;
4884 	a->log |= r->log;
4885 	if (r->min_ttl)
4886 		a->min_ttl = r->min_ttl;
4887 	if (r->max_mss)
4888 		a->max_mss = r->max_mss;
4889 	if (r->dnpipe)
4890 		a->dnpipe = r->dnpipe;
4891 	if (r->dnrpipe)
4892 		a->dnrpipe = r->dnrpipe;
4893 	if (r->dnpipe || r->dnrpipe) {
4894 		if (r->free_flags & PFRULE_DN_IS_PIPE)
4895 			a->flags |= PFSTATE_DN_IS_PIPE;
4896 		else
4897 			a->flags &= ~PFSTATE_DN_IS_PIPE;
4898 	}
4899 	if (r->scrub_flags & PFSTATE_SETPRIO) {
4900 		a->set_prio[0] = r->set_prio[0];
4901 		a->set_prio[1] = r->set_prio[1];
4902 	}
4903 	if (r->allow_opts)
4904 		a->allow_opts = r->allow_opts;
4905 	if (r->max_pkt_size)
4906 		a->max_pkt_size = r->max_pkt_size;
4907 }
4908 
4909 int
pf_socket_lookup(struct pf_pdesc * pd)4910 pf_socket_lookup(struct pf_pdesc *pd)
4911 {
4912 	struct pf_addr		*saddr, *daddr;
4913 	u_int16_t		 sport, dport;
4914 	struct inpcbinfo	*pi;
4915 	struct inpcb		*inp;
4916 
4917 	pd->lookup.uid = UID_MAX;
4918 	pd->lookup.gid = GID_MAX;
4919 
4920 	switch (pd->proto) {
4921 	case IPPROTO_TCP:
4922 		sport = pd->hdr.tcp.th_sport;
4923 		dport = pd->hdr.tcp.th_dport;
4924 		pi = &V_tcbinfo;
4925 		break;
4926 	case IPPROTO_UDP:
4927 		sport = pd->hdr.udp.uh_sport;
4928 		dport = pd->hdr.udp.uh_dport;
4929 		pi = &V_udbinfo;
4930 		break;
4931 	default:
4932 		return (-1);
4933 	}
4934 	if (pd->dir == PF_IN) {
4935 		saddr = pd->src;
4936 		daddr = pd->dst;
4937 	} else {
4938 		u_int16_t	p;
4939 
4940 		p = sport;
4941 		sport = dport;
4942 		dport = p;
4943 		saddr = pd->dst;
4944 		daddr = pd->src;
4945 	}
4946 	switch (pd->af) {
4947 #ifdef INET
4948 	case AF_INET:
4949 		inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
4950 		    dport, INPLOOKUP_RLOCKPCB, NULL, pd->m);
4951 		if (inp == NULL) {
4952 			inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
4953 			   daddr->v4, dport, INPLOOKUP_WILDCARD |
4954 			   INPLOOKUP_RLOCKPCB, NULL, pd->m);
4955 			if (inp == NULL)
4956 				return (-1);
4957 		}
4958 		break;
4959 #endif /* INET */
4960 #ifdef INET6
4961 	case AF_INET6:
4962 		inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
4963 		    dport, INPLOOKUP_RLOCKPCB, NULL, pd->m);
4964 		if (inp == NULL) {
4965 			inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
4966 			    &daddr->v6, dport, INPLOOKUP_WILDCARD |
4967 			    INPLOOKUP_RLOCKPCB, NULL, pd->m);
4968 			if (inp == NULL)
4969 				return (-1);
4970 		}
4971 		break;
4972 #endif /* INET6 */
4973 	default:
4974 		unhandled_af(pd->af);
4975 	}
4976 	INP_RLOCK_ASSERT(inp);
4977 	pd->lookup.uid = inp->inp_cred->cr_uid;
4978 	pd->lookup.gid = inp->inp_cred->cr_groups[0];
4979 	INP_RUNLOCK(inp);
4980 
4981 	return (1);
4982 }
4983 
4984 /* post: r  => (r[0] == type /\ r[1] >= min_typelen >= 2  "validity"
4985  *                      /\ (eoh - r) >= min_typelen >= 2  "safety"  )
4986  *
4987  * warning: r + r[1] may exceed opts bounds for r[1] > min_typelen
4988  */
4989 uint8_t*
pf_find_tcpopt(u_int8_t * opt,u_int8_t * opts,size_t hlen,u_int8_t type,u_int8_t min_typelen)4990 pf_find_tcpopt(u_int8_t *opt, u_int8_t *opts, size_t hlen, u_int8_t type,
4991     u_int8_t min_typelen)
4992 {
4993 	uint8_t	*eoh = opts + hlen;
4994 
4995 	if (min_typelen < 2)
4996 		return (NULL);
4997 
4998 	while ((eoh - opt) >= min_typelen) {
4999 		switch (*opt) {
5000 		case TCPOPT_EOL:
5001 			/* FALLTHROUGH - Workaround the failure of some
5002 			 systems to NOP-pad their bzero'd option buffers,
5003 			 producing spurious EOLs */
5004 		case TCPOPT_NOP:
5005 			opt++;
5006 			continue;
5007 		default:
5008 		if (opt[0] == type &&
5009 			    opt[1] >= min_typelen)
5010 			return (opt);
5011 		}
5012 
5013 		opt += MAX(opt[1], 2); /* evade infinite loops */
5014 	}
5015 
5016 	return (NULL);
5017 }
5018 
5019 u_int8_t
pf_get_wscale(struct pf_pdesc * pd)5020 pf_get_wscale(struct pf_pdesc *pd)
5021 {
5022 	int	 olen;
5023 	uint8_t	 opts[MAX_TCPOPTLEN], *opt;
5024 	uint8_t	 wscale = 0;
5025 
5026 	olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
5027 	if (olen < TCPOLEN_WINDOW || !pf_pull_hdr(pd->m,
5028 	    pd->off + sizeof(struct tcphdr), opts, olen, NULL, NULL, pd->af))
5029 		return (0);
5030 
5031 	opt = opts;
5032 	while ((opt = pf_find_tcpopt(opt, opts, olen,
5033 		    TCPOPT_WINDOW, TCPOLEN_WINDOW)) != NULL) {
5034 		wscale = opt[2];
5035 		wscale = MIN(wscale, TCP_MAX_WINSHIFT);
5036 		wscale |= PF_WSCALE_FLAG;
5037 
5038 		opt += opt[1];
5039 	}
5040 
5041 	return (wscale);
5042 }
5043 
5044 u_int16_t
pf_get_mss(struct pf_pdesc * pd)5045 pf_get_mss(struct pf_pdesc *pd)
5046 {
5047 	int		 olen;
5048 	uint8_t		 opts[MAX_TCPOPTLEN], *opt;
5049 	u_int16_t	 mss = V_tcp_mssdflt;
5050 
5051 	olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
5052 	if (olen < TCPOLEN_MAXSEG || !pf_pull_hdr(pd->m,
5053 	    pd->off + sizeof(struct tcphdr), opts, olen, NULL, NULL, pd->af))
5054 		return (0);
5055 
5056 	opt = opts;
5057 	while ((opt = pf_find_tcpopt(opt, opts, olen,
5058 	    TCPOPT_MAXSEG, TCPOLEN_MAXSEG)) != NULL) {
5059 		memcpy(&mss, (opt + 2), 2);
5060 		mss = ntohs(mss);
5061 		opt += opt[1];
5062 	}
5063 
5064 	return (mss);
5065 }
5066 
5067 static u_int16_t
pf_calc_mss(struct pf_addr * addr,sa_family_t af,int rtableid,u_int16_t offer)5068 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
5069 {
5070 	struct nhop_object *nh;
5071 #ifdef INET6
5072 	struct in6_addr		dst6;
5073 	uint32_t		scopeid;
5074 #endif /* INET6 */
5075 	int			 hlen = 0;
5076 	uint16_t		 mss = 0;
5077 
5078 	NET_EPOCH_ASSERT();
5079 
5080 	switch (af) {
5081 #ifdef INET
5082 	case AF_INET:
5083 		hlen = sizeof(struct ip);
5084 		nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
5085 		if (nh != NULL)
5086 			mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
5087 		break;
5088 #endif /* INET */
5089 #ifdef INET6
5090 	case AF_INET6:
5091 		hlen = sizeof(struct ip6_hdr);
5092 		in6_splitscope(&addr->v6, &dst6, &scopeid);
5093 		nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
5094 		if (nh != NULL)
5095 			mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
5096 		break;
5097 #endif /* INET6 */
5098 	}
5099 
5100 	mss = max(V_tcp_mssdflt, mss);
5101 	mss = min(mss, offer);
5102 	mss = max(mss, 64);		/* sanity - at least max opt space */
5103 	return (mss);
5104 }
5105 
5106 static u_int32_t
pf_tcp_iss(struct pf_pdesc * pd)5107 pf_tcp_iss(struct pf_pdesc *pd)
5108 {
5109 	SHA512_CTX ctx;
5110 	union {
5111 		uint8_t bytes[SHA512_DIGEST_LENGTH];
5112 		uint32_t words[1];
5113 	} digest;
5114 
5115 	if (V_pf_tcp_secret_init == 0) {
5116 		arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
5117 		SHA512_Init(&V_pf_tcp_secret_ctx);
5118 		SHA512_Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
5119 		    sizeof(V_pf_tcp_secret));
5120 		V_pf_tcp_secret_init = 1;
5121 	}
5122 
5123 	ctx = V_pf_tcp_secret_ctx;
5124 
5125 	SHA512_Update(&ctx, &pd->hdr.tcp.th_sport, sizeof(u_short));
5126 	SHA512_Update(&ctx, &pd->hdr.tcp.th_dport, sizeof(u_short));
5127 	switch (pd->af) {
5128 	case AF_INET6:
5129 		SHA512_Update(&ctx, &pd->src->v6, sizeof(struct in6_addr));
5130 		SHA512_Update(&ctx, &pd->dst->v6, sizeof(struct in6_addr));
5131 		break;
5132 	case AF_INET:
5133 		SHA512_Update(&ctx, &pd->src->v4, sizeof(struct in_addr));
5134 		SHA512_Update(&ctx, &pd->dst->v4, sizeof(struct in_addr));
5135 		break;
5136 	}
5137 	SHA512_Final(digest.bytes, &ctx);
5138 	V_pf_tcp_iss_off += 4096;
5139 #define	ISN_RANDOM_INCREMENT (4096 - 1)
5140 	return (digest.words[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
5141 	    V_pf_tcp_iss_off);
5142 #undef	ISN_RANDOM_INCREMENT
5143 }
5144 
5145 static bool
pf_match_eth_addr(const uint8_t * a,const struct pf_keth_rule_addr * r)5146 pf_match_eth_addr(const uint8_t *a, const struct pf_keth_rule_addr *r)
5147 {
5148 	bool match = true;
5149 
5150 	/* Always matches if not set */
5151 	if (! r->isset)
5152 		return (!r->neg);
5153 
5154 	for (int i = 0; i < ETHER_ADDR_LEN; i++) {
5155 		if ((a[i] & r->mask[i]) != (r->addr[i] & r->mask[i])) {
5156 			match = false;
5157 			break;
5158 		}
5159 	}
5160 
5161 	return (match ^ r->neg);
5162 }
5163 
5164 static int
pf_match_eth_tag(struct mbuf * m,struct pf_keth_rule * r,int * tag,int mtag)5165 pf_match_eth_tag(struct mbuf *m, struct pf_keth_rule *r, int *tag, int mtag)
5166 {
5167 	if (*tag == -1)
5168 		*tag = mtag;
5169 
5170 	return ((!r->match_tag_not && r->match_tag == *tag) ||
5171 	    (r->match_tag_not && r->match_tag != *tag));
5172 }
5173 
5174 static void
pf_bridge_to(struct ifnet * ifp,struct mbuf * m)5175 pf_bridge_to(struct ifnet *ifp, struct mbuf *m)
5176 {
5177 	/* If we don't have the interface drop the packet. */
5178 	if (ifp == NULL) {
5179 		m_freem(m);
5180 		return;
5181 	}
5182 
5183 	switch (ifp->if_type) {
5184 	case IFT_ETHER:
5185 	case IFT_XETHER:
5186 	case IFT_L2VLAN:
5187 	case IFT_BRIDGE:
5188 	case IFT_IEEE8023ADLAG:
5189 		break;
5190 	default:
5191 		m_freem(m);
5192 		return;
5193 	}
5194 
5195 	ifp->if_transmit(ifp, m);
5196 }
5197 
5198 static int
pf_test_eth_rule(int dir,struct pfi_kkif * kif,struct mbuf ** m0)5199 pf_test_eth_rule(int dir, struct pfi_kkif *kif, struct mbuf **m0)
5200 {
5201 #ifdef INET
5202 	struct ip ip;
5203 #endif /* INET */
5204 #ifdef INET6
5205 	struct ip6_hdr ip6;
5206 #endif /* INET6 */
5207 	struct mbuf *m = *m0;
5208 	struct ether_header *e;
5209 	struct pf_keth_rule *r, *rm, *a = NULL;
5210 	struct pf_keth_ruleset *ruleset = NULL;
5211 	struct pf_mtag *mtag;
5212 	struct pf_keth_ruleq *rules;
5213 	struct pf_addr *src = NULL, *dst = NULL;
5214 	struct pfi_kkif *bridge_to;
5215 	sa_family_t af = 0;
5216 	uint16_t proto;
5217 	int asd = 0, match = 0;
5218 	int tag = -1;
5219 	uint8_t action;
5220 	struct pf_keth_anchor_stackframe	anchor_stack[PF_ANCHOR_STACK_MAX];
5221 
5222 	MPASS(kif->pfik_ifp->if_vnet == curvnet);
5223 	NET_EPOCH_ASSERT();
5224 
5225 	PF_RULES_RLOCK_TRACKER;
5226 
5227 	SDT_PROBE3(pf, eth, test_rule, entry, dir, kif->pfik_ifp, m);
5228 
5229 	mtag = pf_find_mtag(m);
5230 	if (mtag != NULL && mtag->flags & PF_MTAG_FLAG_DUMMYNET) {
5231 		/* Dummynet re-injects packets after they've
5232 		 * completed their delay. We've already
5233 		 * processed them, so pass unconditionally. */
5234 
5235 		/* But only once. We may see the packet multiple times (e.g.
5236 		 * PFIL_IN/PFIL_OUT). */
5237 		pf_dummynet_flag_remove(m, mtag);
5238 
5239 		return (PF_PASS);
5240 	}
5241 
5242 	if (__predict_false(m->m_len < sizeof(struct ether_header)) &&
5243 	    (m = *m0 = m_pullup(*m0, sizeof(struct ether_header))) == NULL) {
5244 		DPFPRINTF(PF_DEBUG_URGENT,
5245 		    ("%s: m_len < sizeof(struct ether_header)"
5246 		     ", pullup failed\n", __func__));
5247 		return (PF_DROP);
5248 	}
5249 	e = mtod(m, struct ether_header *);
5250 	proto = ntohs(e->ether_type);
5251 
5252 	switch (proto) {
5253 #ifdef INET
5254 	case ETHERTYPE_IP: {
5255 		if (m_length(m, NULL) < (sizeof(struct ether_header) +
5256 		    sizeof(ip)))
5257 			return (PF_DROP);
5258 
5259 		af = AF_INET;
5260 		m_copydata(m, sizeof(struct ether_header), sizeof(ip),
5261 		    (caddr_t)&ip);
5262 		src = (struct pf_addr *)&ip.ip_src;
5263 		dst = (struct pf_addr *)&ip.ip_dst;
5264 		break;
5265 	}
5266 #endif /* INET */
5267 #ifdef INET6
5268 	case ETHERTYPE_IPV6: {
5269 		if (m_length(m, NULL) < (sizeof(struct ether_header) +
5270 		    sizeof(ip6)))
5271 			return (PF_DROP);
5272 
5273 		af = AF_INET6;
5274 		m_copydata(m, sizeof(struct ether_header), sizeof(ip6),
5275 		    (caddr_t)&ip6);
5276 		src = (struct pf_addr *)&ip6.ip6_src;
5277 		dst = (struct pf_addr *)&ip6.ip6_dst;
5278 		break;
5279 	}
5280 #endif /* INET6 */
5281 	}
5282 
5283 	PF_RULES_RLOCK();
5284 
5285 	ruleset = V_pf_keth;
5286 	rules = atomic_load_ptr(&ruleset->active.rules);
5287 	for (r = TAILQ_FIRST(rules), rm = NULL; r != NULL;) {
5288 		counter_u64_add(r->evaluations, 1);
5289 		SDT_PROBE2(pf, eth, test_rule, test, r->nr, r);
5290 
5291 		if (pfi_kkif_match(r->kif, kif) == r->ifnot) {
5292 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5293 			    "kif");
5294 			r = r->skip[PFE_SKIP_IFP].ptr;
5295 		}
5296 		else if (r->direction && r->direction != dir) {
5297 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5298 			    "dir");
5299 			r = r->skip[PFE_SKIP_DIR].ptr;
5300 		}
5301 		else if (r->proto && r->proto != proto) {
5302 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5303 			    "proto");
5304 			r = r->skip[PFE_SKIP_PROTO].ptr;
5305 		}
5306 		else if (! pf_match_eth_addr(e->ether_shost, &r->src)) {
5307 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5308 			    "src");
5309 			r = r->skip[PFE_SKIP_SRC_ADDR].ptr;
5310 		}
5311 		else if (! pf_match_eth_addr(e->ether_dhost, &r->dst)) {
5312 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5313 			    "dst");
5314 			r = r->skip[PFE_SKIP_DST_ADDR].ptr;
5315 		}
5316 		else if (src != NULL && PF_MISMATCHAW(&r->ipsrc.addr, src, af,
5317 		    r->ipsrc.neg, kif, M_GETFIB(m))) {
5318 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5319 			    "ip_src");
5320 			r = r->skip[PFE_SKIP_SRC_IP_ADDR].ptr;
5321 		}
5322 		else if (dst != NULL && PF_MISMATCHAW(&r->ipdst.addr, dst, af,
5323 		    r->ipdst.neg, kif, M_GETFIB(m))) {
5324 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5325 			    "ip_dst");
5326 			r = r->skip[PFE_SKIP_DST_IP_ADDR].ptr;
5327 		}
5328 		else if (r->match_tag && !pf_match_eth_tag(m, r, &tag,
5329 		    mtag ? mtag->tag : 0)) {
5330 			SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
5331 			    "match_tag");
5332 			r = TAILQ_NEXT(r, entries);
5333 		}
5334 		else {
5335 			if (r->tag)
5336 				tag = r->tag;
5337 			if (r->anchor == NULL) {
5338 				/* Rule matches */
5339 				rm = r;
5340 
5341 				SDT_PROBE2(pf, eth, test_rule, match, r->nr, r);
5342 
5343 				if (r->quick)
5344 					break;
5345 
5346 				r = TAILQ_NEXT(r, entries);
5347 			} else {
5348 				pf_step_into_keth_anchor(anchor_stack, &asd,
5349 				    &ruleset, &r, &a, &match);
5350 			}
5351 		}
5352 		if (r == NULL && pf_step_out_of_keth_anchor(anchor_stack, &asd,
5353 		    &ruleset, &r, &a, &match))
5354 			break;
5355 	}
5356 
5357 	r = rm;
5358 
5359 	SDT_PROBE2(pf, eth, test_rule, final_match, (r != NULL ? r->nr : -1), r);
5360 
5361 	/* Default to pass. */
5362 	if (r == NULL) {
5363 		PF_RULES_RUNLOCK();
5364 		return (PF_PASS);
5365 	}
5366 
5367 	/* Execute action. */
5368 	counter_u64_add(r->packets[dir == PF_OUT], 1);
5369 	counter_u64_add(r->bytes[dir == PF_OUT], m_length(m, NULL));
5370 	pf_update_timestamp(r);
5371 
5372 	/* Shortcut. Don't tag if we're just going to drop anyway. */
5373 	if (r->action == PF_DROP) {
5374 		PF_RULES_RUNLOCK();
5375 		return (PF_DROP);
5376 	}
5377 
5378 	if (tag > 0) {
5379 		if (mtag == NULL)
5380 			mtag = pf_get_mtag(m);
5381 		if (mtag == NULL) {
5382 			PF_RULES_RUNLOCK();
5383 			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
5384 			return (PF_DROP);
5385 		}
5386 		mtag->tag = tag;
5387 	}
5388 
5389 	if (r->qid != 0) {
5390 		if (mtag == NULL)
5391 			mtag = pf_get_mtag(m);
5392 		if (mtag == NULL) {
5393 			PF_RULES_RUNLOCK();
5394 			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
5395 			return (PF_DROP);
5396 		}
5397 		mtag->qid = r->qid;
5398 	}
5399 
5400 	action = r->action;
5401 	bridge_to = r->bridge_to;
5402 
5403 	/* Dummynet */
5404 	if (r->dnpipe) {
5405 		struct ip_fw_args dnflow;
5406 
5407 		/* Drop packet if dummynet is not loaded. */
5408 		if (ip_dn_io_ptr == NULL) {
5409 			PF_RULES_RUNLOCK();
5410 			m_freem(m);
5411 			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
5412 			return (PF_DROP);
5413 		}
5414 		if (mtag == NULL)
5415 			mtag = pf_get_mtag(m);
5416 		if (mtag == NULL) {
5417 			PF_RULES_RUNLOCK();
5418 			counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
5419 			return (PF_DROP);
5420 		}
5421 
5422 		bzero(&dnflow, sizeof(dnflow));
5423 
5424 		/* We don't have port numbers here, so we set 0.  That means
5425 		 * that we'll be somewhat limited in distinguishing flows (i.e.
5426 		 * only based on IP addresses, not based on port numbers), but
5427 		 * it's better than nothing. */
5428 		dnflow.f_id.dst_port = 0;
5429 		dnflow.f_id.src_port = 0;
5430 		dnflow.f_id.proto = 0;
5431 
5432 		dnflow.rule.info = r->dnpipe;
5433 		dnflow.rule.info |= IPFW_IS_DUMMYNET;
5434 		if (r->dnflags & PFRULE_DN_IS_PIPE)
5435 			dnflow.rule.info |= IPFW_IS_PIPE;
5436 
5437 		dnflow.f_id.extra = dnflow.rule.info;
5438 
5439 		dnflow.flags = dir == PF_IN ? IPFW_ARGS_IN : IPFW_ARGS_OUT;
5440 		dnflow.flags |= IPFW_ARGS_ETHER;
5441 		dnflow.ifp = kif->pfik_ifp;
5442 
5443 		switch (af) {
5444 		case AF_INET:
5445 			dnflow.f_id.addr_type = 4;
5446 			dnflow.f_id.src_ip = src->v4.s_addr;
5447 			dnflow.f_id.dst_ip = dst->v4.s_addr;
5448 			break;
5449 		case AF_INET6:
5450 			dnflow.flags |= IPFW_ARGS_IP6;
5451 			dnflow.f_id.addr_type = 6;
5452 			dnflow.f_id.src_ip6 = src->v6;
5453 			dnflow.f_id.dst_ip6 = dst->v6;
5454 			break;
5455 		}
5456 
5457 		PF_RULES_RUNLOCK();
5458 
5459 		mtag->flags |= PF_MTAG_FLAG_DUMMYNET;
5460 		ip_dn_io_ptr(m0, &dnflow);
5461 		if (*m0 != NULL)
5462 			pf_dummynet_flag_remove(m, mtag);
5463 	} else {
5464 		PF_RULES_RUNLOCK();
5465 	}
5466 
5467 	if (action == PF_PASS && bridge_to) {
5468 		pf_bridge_to(bridge_to->pfik_ifp, *m0);
5469 		*m0 = NULL; /* We've eaten the packet. */
5470 	}
5471 
5472 	return (action);
5473 }
5474 
5475 #define PF_TEST_ATTRIB(t, a)		\
5476 	if (t) {			\
5477 		r = a;			\
5478 		continue;		\
5479 	} else do {			\
5480 	} while (0)
5481 
5482 static __inline u_short
pf_rule_apply_nat(struct pf_test_ctx * ctx,struct pf_krule * r)5483 pf_rule_apply_nat(struct pf_test_ctx *ctx, struct pf_krule *r)
5484 {
5485 	struct pf_pdesc	*pd = ctx->pd;
5486 	u_short		 transerror;
5487 	u_int8_t	 nat_action;
5488 
5489 	if (r->rule_flag & PFRULE_AFTO) {
5490 		/* Don't translate if there was an old style NAT rule */
5491 		if (ctx->nr != NULL)
5492 			return (PFRES_TRANSLATE);
5493 
5494 		/* pass af-to rules, unsupported on match rules */
5495 		KASSERT(r->action != PF_MATCH, ("%s: af-to on match rule", __func__));
5496 		/* XXX I can imagine scenarios where we have both NAT and RDR source tracking */
5497 		ctx->nat_pool = &(r->nat);
5498 		ctx->nr = r;
5499 		pd->naf = r->naf;
5500 		if (pf_get_transaddr_af(ctx->nr, pd) == -1) {
5501 			return (PFRES_TRANSLATE);
5502 		}
5503 		return (PFRES_MATCH);
5504 	} else if (r->rdr.cur || r->nat.cur) {
5505 		/* Don't translate if there was an old style NAT rule */
5506 		if (ctx->nr != NULL)
5507 			return (PFRES_TRANSLATE);
5508 
5509 		/* match/pass nat-to/rdr-to rules */
5510 		ctx->nr = r;
5511 		if (r->nat.cur) {
5512 			nat_action = PF_NAT;
5513 			ctx->nat_pool = &(r->nat);
5514 		} else {
5515 			nat_action = PF_RDR;
5516 			ctx->nat_pool = &(r->rdr);
5517 		}
5518 
5519 		transerror = pf_get_transaddr(ctx, ctx->nr,
5520 		    nat_action, ctx->nat_pool);
5521 		if (transerror == PFRES_MATCH) {
5522 			ctx->rewrite += pf_translate_compat(ctx);
5523 			return(PFRES_MATCH);
5524 		}
5525 		return (transerror);
5526 	}
5527 
5528 	return (PFRES_MAX);
5529 }
5530 
5531 enum pf_test_status
pf_match_rule(struct pf_test_ctx * ctx,struct pf_kruleset * ruleset)5532 pf_match_rule(struct pf_test_ctx *ctx, struct pf_kruleset *ruleset)
5533 {
5534 	struct pf_krule_item	*ri;
5535 	struct pf_krule		*r;
5536 	struct pf_krule		*save_a;
5537 	struct pf_kruleset	*save_aruleset;
5538 	struct pf_pdesc		*pd = ctx->pd;
5539 	u_short			 transerror;
5540 
5541 	r = TAILQ_FIRST(ruleset->rules[PF_RULESET_FILTER].active.ptr);
5542 	while (r != NULL) {
5543 		if (ctx->pd->related_rule) {
5544 			*ctx->rm = ctx->pd->related_rule;
5545 			break;
5546 		}
5547 		pf_counter_u64_add(&r->evaluations, 1);
5548 		PF_TEST_ATTRIB(pfi_kkif_match(r->kif, pd->kif) == r->ifnot,
5549 			r->skip[PF_SKIP_IFP]);
5550 		PF_TEST_ATTRIB(r->direction && r->direction != pd->dir,
5551 			r->skip[PF_SKIP_DIR]);
5552 		PF_TEST_ATTRIB(r->af && r->af != pd->af,
5553 			r->skip[PF_SKIP_AF]);
5554 		PF_TEST_ATTRIB(r->proto && r->proto != pd->proto,
5555 			r->skip[PF_SKIP_PROTO]);
5556 		PF_TEST_ATTRIB(PF_MISMATCHAW(&r->src.addr, &pd->nsaddr, pd->naf,
5557 		    r->src.neg, pd->kif, M_GETFIB(pd->m)),
5558 			r->skip[PF_SKIP_SRC_ADDR]);
5559 		PF_TEST_ATTRIB(PF_MISMATCHAW(&r->dst.addr, &pd->ndaddr, pd->af,
5560 		    r->dst.neg, NULL, M_GETFIB(pd->m)),
5561 			r->skip[PF_SKIP_DST_ADDR]);
5562 		switch (pd->virtual_proto) {
5563 		case PF_VPROTO_FRAGMENT:
5564 			/* tcp/udp only. port_op always 0 in other cases */
5565 			PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),
5566 				TAILQ_NEXT(r, entries));
5567 			PF_TEST_ATTRIB((pd->proto == IPPROTO_TCP && r->flagset),
5568 				TAILQ_NEXT(r, entries));
5569 			/* icmp only. type/code always 0 in other cases */
5570 			PF_TEST_ATTRIB((r->type || r->code),
5571 				TAILQ_NEXT(r, entries));
5572 			/* tcp/udp only. {uid|gid}.op always 0 in other cases */
5573 			PF_TEST_ATTRIB((r->gid.op || r->uid.op),
5574 				TAILQ_NEXT(r, entries));
5575 			break;
5576 
5577 		case IPPROTO_TCP:
5578 			PF_TEST_ATTRIB((r->flagset & tcp_get_flags(ctx->th))
5579 			    != r->flags,
5580 				TAILQ_NEXT(r, entries));
5581 			/* FALLTHROUGH */
5582 		case IPPROTO_SCTP:
5583 		case IPPROTO_UDP:
5584 			/* tcp/udp only. port_op always 0 in other cases */
5585 			PF_TEST_ATTRIB(r->src.port_op && !pf_match_port(r->src.port_op,
5586 			    r->src.port[0], r->src.port[1], pd->nsport),
5587 				r->skip[PF_SKIP_SRC_PORT]);
5588 			/* tcp/udp only. port_op always 0 in other cases */
5589 			PF_TEST_ATTRIB(r->dst.port_op && !pf_match_port(r->dst.port_op,
5590 			    r->dst.port[0], r->dst.port[1], pd->ndport),
5591 				r->skip[PF_SKIP_DST_PORT]);
5592 			/* tcp/udp only. uid.op always 0 in other cases */
5593 			PF_TEST_ATTRIB(r->uid.op && (pd->lookup.done || (pd->lookup.done =
5594 			    pf_socket_lookup(pd), 1)) &&
5595 			    !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
5596 			    pd->lookup.uid),
5597 				TAILQ_NEXT(r, entries));
5598 			/* tcp/udp only. gid.op always 0 in other cases */
5599 			PF_TEST_ATTRIB(r->gid.op && (pd->lookup.done || (pd->lookup.done =
5600 			    pf_socket_lookup(pd), 1)) &&
5601 			    !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
5602 			    pd->lookup.gid),
5603 				TAILQ_NEXT(r, entries));
5604 			break;
5605 
5606 		case IPPROTO_ICMP:
5607 		case IPPROTO_ICMPV6:
5608 			/* icmp only. type always 0 in other cases */
5609 			PF_TEST_ATTRIB(r->type && r->type != ctx->icmptype + 1,
5610 				TAILQ_NEXT(r, entries));
5611 			/* icmp only. type always 0 in other cases */
5612 			PF_TEST_ATTRIB(r->code && r->code != ctx->icmpcode + 1,
5613 				TAILQ_NEXT(r, entries));
5614 			break;
5615 
5616 		default:
5617 			break;
5618 		}
5619 		PF_TEST_ATTRIB(r->tos && !(r->tos == pd->tos),
5620 			TAILQ_NEXT(r, entries));
5621 		PF_TEST_ATTRIB(r->prio &&
5622 		    !pf_match_ieee8021q_pcp(r->prio, pd->m),
5623 			TAILQ_NEXT(r, entries));
5624 		PF_TEST_ATTRIB(r->prob &&
5625 		    r->prob <= arc4random(),
5626 			TAILQ_NEXT(r, entries));
5627 		PF_TEST_ATTRIB(r->match_tag && !pf_match_tag(pd->m, r,
5628 		    &ctx->tag, pd->pf_mtag ? pd->pf_mtag->tag : 0),
5629 			TAILQ_NEXT(r, entries));
5630 		PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(pd->m, r) ==
5631 		   r->rcvifnot),
5632 			TAILQ_NEXT(r, entries));
5633 		PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&
5634 		    pd->virtual_proto != PF_VPROTO_FRAGMENT),
5635 			TAILQ_NEXT(r, entries));
5636 		PF_TEST_ATTRIB(r->os_fingerprint != PF_OSFP_ANY &&
5637 		    (pd->virtual_proto != IPPROTO_TCP || !pf_osfp_match(
5638 		    pf_osfp_fingerprint(pd, ctx->th),
5639 		    r->os_fingerprint)),
5640 			TAILQ_NEXT(r, entries));
5641 		/* must be last! */
5642 		if (r->pktrate.limit) {
5643 			PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),
5644 			    TAILQ_NEXT(r, entries));
5645 		}
5646 		/* FALLTHROUGH */
5647 		if (r->tag)
5648 			ctx->tag = r->tag;
5649 		if (r->anchor == NULL) {
5650 			if (r->action == PF_MATCH) {
5651 				/*
5652 				 * Apply translations before increasing counters,
5653 				 * in case it fails.
5654 				 */
5655 				transerror = pf_rule_apply_nat(ctx, r);
5656 				switch (transerror) {
5657 				case PFRES_MATCH:
5658 					/* Translation action found in rule and applied successfully */
5659 				case PFRES_MAX:
5660 					/* No translation action found in rule */
5661 					break;
5662 				default:
5663 					/* Translation action found in rule but failed to apply */
5664 					REASON_SET(&ctx->reason, transerror);
5665 					return (PF_TEST_FAIL);
5666 				}
5667 				ri = malloc(sizeof(struct pf_krule_item), M_PF_RULE_ITEM, M_NOWAIT | M_ZERO);
5668 				if (ri == NULL) {
5669 					REASON_SET(&ctx->reason, PFRES_MEMORY);
5670 					return (PF_TEST_FAIL);
5671 				}
5672 				ri->r = r;
5673 				SLIST_INSERT_HEAD(&ctx->rules, ri, entry);
5674 				pf_counter_u64_critical_enter();
5675 				pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
5676 				pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
5677 				pf_counter_u64_critical_exit();
5678 				pf_rule_to_actions(r, &pd->act);
5679 				if (r->log)
5680 					PFLOG_PACKET(r->action, PFRES_MATCH, r,
5681 					    ctx->a, ruleset, pd, 1, NULL);
5682 			} else {
5683 				/*
5684 				 * found matching r
5685 				 */
5686 				*ctx->rm = r;
5687 				/*
5688 				 * anchor, with ruleset, where r belongs to
5689 				 */
5690 				*ctx->am = ctx->a;
5691 				/*
5692 				 * ruleset where r belongs to
5693 				 */
5694 				*ctx->rsm = ruleset;
5695 				/*
5696 				 * ruleset, where anchor belongs to.
5697 				 */
5698 				ctx->arsm = ctx->aruleset;
5699 			}
5700 			if (pd->act.log & PF_LOG_MATCHES)
5701 				pf_log_matches(pd, r, ctx->a, ruleset, &ctx->rules);
5702 			if (r->quick) {
5703 				ctx->test_status = PF_TEST_QUICK;
5704 				break;
5705 			}
5706 		} else {
5707 			save_a = ctx->a;
5708 			save_aruleset = ctx->aruleset;
5709 
5710 			ctx->a = r;			/* remember anchor */
5711 			ctx->aruleset = ruleset;	/* and its ruleset */
5712 			if (ctx->a->quick)
5713 				ctx->test_status = PF_TEST_QUICK;
5714 			/*
5715 			 * Note: we don't need to restore if we are not going
5716 			 * to continue with ruleset evaluation.
5717 			 */
5718 			if (pf_step_into_anchor(ctx, r) != PF_TEST_OK) {
5719 				break;
5720 			}
5721 			ctx->a = save_a;
5722 			ctx->aruleset = save_aruleset;
5723 		}
5724 		r = TAILQ_NEXT(r, entries);
5725 	}
5726 
5727 	return (ctx->test_status);
5728 }
5729 
5730 static int
pf_test_rule(struct pf_krule ** rm,struct pf_kstate ** sm,struct pf_pdesc * pd,struct pf_krule ** am,struct pf_kruleset ** rsm,u_short * reason,struct inpcb * inp)5731 pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm,
5732     struct pf_pdesc *pd, struct pf_krule **am,
5733     struct pf_kruleset **rsm, u_short *reason, struct inpcb *inp)
5734 {
5735 	struct pf_krule		*r = NULL;
5736 	struct pf_kruleset	*ruleset = NULL;
5737 	struct pf_krule_item	*ri;
5738 	struct pf_test_ctx	 ctx;
5739 	u_short			 transerror;
5740 	int			 action = PF_PASS;
5741 	u_int16_t		 bproto_sum = 0, bip_sum = 0;
5742 	enum pf_test_status	 rv;
5743 
5744 	PF_RULES_RASSERT();
5745 
5746 	bzero(&ctx, sizeof(ctx));
5747 	ctx.tag = -1;
5748 	ctx.pd = pd;
5749 	ctx.rm = rm;
5750 	ctx.am = am;
5751 	ctx.rsm = rsm;
5752 	ctx.th = &pd->hdr.tcp;
5753 	ctx.reason = *reason;
5754 	SLIST_INIT(&ctx.rules);
5755 
5756 	pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
5757 	pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
5758 
5759 	if (inp != NULL) {
5760 		INP_LOCK_ASSERT(inp);
5761 		pd->lookup.uid = inp->inp_cred->cr_uid;
5762 		pd->lookup.gid = inp->inp_cred->cr_groups[0];
5763 		pd->lookup.done = 1;
5764 	}
5765 
5766 	if (pd->ip_sum)
5767 		bip_sum = *pd->ip_sum;
5768 
5769 	switch (pd->virtual_proto) {
5770 	case IPPROTO_TCP:
5771 		bproto_sum = ctx.th->th_sum;
5772 		pd->nsport = ctx.th->th_sport;
5773 		pd->ndport = ctx.th->th_dport;
5774 		break;
5775 	case IPPROTO_UDP:
5776 		bproto_sum = pd->hdr.udp.uh_sum;
5777 		pd->nsport = pd->hdr.udp.uh_sport;
5778 		pd->ndport = pd->hdr.udp.uh_dport;
5779 		break;
5780 	case IPPROTO_SCTP:
5781 		pd->nsport = pd->hdr.sctp.src_port;
5782 		pd->ndport = pd->hdr.sctp.dest_port;
5783 		break;
5784 #ifdef INET
5785 	case IPPROTO_ICMP:
5786 		MPASS(pd->af == AF_INET);
5787 		ctx.icmptype = pd->hdr.icmp.icmp_type;
5788 		ctx.icmpcode = pd->hdr.icmp.icmp_code;
5789 		ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
5790 		    &ctx.icmp_dir, &ctx.virtual_id, &ctx.virtual_type);
5791 		if (ctx.icmp_dir == PF_IN) {
5792 			pd->nsport = ctx.virtual_id;
5793 			pd->ndport = ctx.virtual_type;
5794 		} else {
5795 			pd->nsport = ctx.virtual_type;
5796 			pd->ndport = ctx.virtual_id;
5797 		}
5798 		break;
5799 #endif /* INET */
5800 #ifdef INET6
5801 	case IPPROTO_ICMPV6:
5802 		MPASS(pd->af == AF_INET6);
5803 		ctx.icmptype = pd->hdr.icmp6.icmp6_type;
5804 		ctx.icmpcode = pd->hdr.icmp6.icmp6_code;
5805 		ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
5806 		    &ctx.icmp_dir, &ctx.virtual_id, &ctx.virtual_type);
5807 		if (ctx.icmp_dir == PF_IN) {
5808 			pd->nsport = ctx.virtual_id;
5809 			pd->ndport = ctx.virtual_type;
5810 		} else {
5811 			pd->nsport = ctx.virtual_type;
5812 			pd->ndport = ctx.virtual_id;
5813 		}
5814 
5815 		break;
5816 #endif /* INET6 */
5817 	default:
5818 		pd->nsport = pd->ndport = 0;
5819 		break;
5820 	}
5821 	pd->osport = pd->nsport;
5822 	pd->odport = pd->ndport;
5823 
5824 	/* check packet for BINAT/NAT/RDR */
5825 	transerror = pf_get_translation(&ctx);
5826 	switch (transerror) {
5827 	default:
5828 		/* A translation error occurred. */
5829 		REASON_SET(&ctx.reason, transerror);
5830 		goto cleanup;
5831 	case PFRES_MAX:
5832 		/* No match. */
5833 		break;
5834 	case PFRES_MATCH:
5835 		KASSERT(ctx.sk != NULL, ("%s: null sk", __func__));
5836 		KASSERT(ctx.nk != NULL, ("%s: null nk", __func__));
5837 		if (ctx.nr->log) {
5838 			PFLOG_PACKET(ctx.nr->action, PFRES_MATCH, ctx.nr, ctx.a,
5839 			    ruleset, pd, 1, NULL);
5840 		}
5841 
5842 		ctx.rewrite += pf_translate_compat(&ctx);
5843 		ctx.nat_pool = &(ctx.nr->rdr);
5844 	}
5845 
5846 	ruleset = &pf_main_ruleset;
5847 	rv = pf_match_rule(&ctx, ruleset);
5848 	if (rv == PF_TEST_FAIL) {
5849 		/*
5850 		 * Reason has been set in pf_match_rule() already.
5851 		 */
5852 		goto cleanup;
5853 	}
5854 
5855 	r = *ctx.rm;			/* matching rule */
5856 	ctx.a = *ctx.am;		/* rule that defines an anchor containing 'r' */
5857 	ruleset = *ctx.rsm;		/* ruleset of the anchor defined by the rule 'a' */
5858 	ctx.aruleset = ctx.arsm;	/* ruleset of the 'a' rule itself */
5859 
5860 	REASON_SET(&ctx.reason, PFRES_MATCH);
5861 
5862 	/* apply actions for last matching pass/block rule */
5863 	pf_rule_to_actions(r, &pd->act);
5864 	transerror = pf_rule_apply_nat(&ctx, r);
5865 	switch (transerror) {
5866 	case PFRES_MATCH:
5867 		/* Translation action found in rule and applied successfully */
5868 	case PFRES_MAX:
5869 		/* No translation action found in rule */
5870 		break;
5871 	default:
5872 		/* Translation action found in rule but failed to apply */
5873 		REASON_SET(&ctx.reason, transerror);
5874 		goto cleanup;
5875 	}
5876 
5877 	if (r->log) {
5878 		if (ctx.rewrite)
5879 			m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
5880 		PFLOG_PACKET(r->action, ctx.reason, r, ctx.a, ruleset, pd, 1, NULL);
5881 	}
5882 	if (pd->act.log & PF_LOG_MATCHES)
5883 		pf_log_matches(pd, r, ctx.a, ruleset, &ctx.rules);
5884 	if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
5885 	   (r->action == PF_DROP) &&
5886 	    ((r->rule_flag & PFRULE_RETURNRST) ||
5887 	    (r->rule_flag & PFRULE_RETURNICMP) ||
5888 	    (r->rule_flag & PFRULE_RETURN))) {
5889 		pf_return(r, ctx.nr, pd, ctx.th, bproto_sum,
5890 		    bip_sum, &ctx.reason, r->rtableid);
5891 	}
5892 
5893 	if (r->action == PF_DROP)
5894 		goto cleanup;
5895 
5896 	if (ctx.tag > 0 && pf_tag_packet(pd, ctx.tag)) {
5897 		REASON_SET(&ctx.reason, PFRES_MEMORY);
5898 		goto cleanup;
5899 	}
5900 	if (pd->act.rtableid >= 0)
5901 		M_SETFIB(pd->m, pd->act.rtableid);
5902 
5903 	if (r->rt) {
5904 		struct pf_ksrc_node	*sn = NULL;
5905 		struct pf_srchash	*snh = NULL;
5906 		/*
5907 		 * Set act.rt here instead of in pf_rule_to_actions() because
5908 		 * it is applied only from the last pass rule.
5909 		 */
5910 		pd->act.rt = r->rt;
5911 		/* Don't use REASON_SET, pf_map_addr increases the reason counters */
5912 		ctx.reason = pf_map_addr_sn(pd->af, r, pd->src, &pd->act.rt_addr,
5913 		    &pd->act.rt_kif, NULL, &sn, &snh, &(r->route), PF_SN_ROUTE);
5914 		if (ctx.reason != 0)
5915 			goto cleanup;
5916 	}
5917 
5918 	if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
5919 	   (!ctx.state_icmp && (r->keep_state || ctx.nr != NULL ||
5920 	    (pd->flags & PFDESC_TCP_NORM)))) {
5921 		bool nat64;
5922 
5923 		action = pf_create_state(r, &ctx, sm, bproto_sum, bip_sum);
5924 		ctx.sk = ctx.nk = NULL;
5925 		if (action != PF_PASS) {
5926 			pf_udp_mapping_release(ctx.udp_mapping);
5927 			if (r->log || (ctx.nr != NULL && ctx.nr->log) ||
5928 			    ctx.reason == PFRES_MEMORY)
5929 				pd->act.log |= PF_LOG_FORCE;
5930 			if (action == PF_DROP &&
5931 			    (r->rule_flag & PFRULE_RETURN))
5932 				pf_return(r, ctx.nr, pd, ctx.th,
5933 				    bproto_sum, bip_sum, &ctx.reason,
5934 				    pd->act.rtableid);
5935 			*reason = ctx.reason;
5936 			return (action);
5937 		}
5938 
5939 		nat64 = pd->af != pd->naf;
5940 		if (nat64) {
5941 			int			 ret;
5942 
5943 			if (ctx.sk == NULL)
5944 				ctx.sk = (*sm)->key[pd->dir == PF_IN ? PF_SK_STACK : PF_SK_WIRE];
5945 			if (ctx.nk == NULL)
5946 				ctx.nk = (*sm)->key[pd->dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK];
5947 
5948 			if (pd->dir == PF_IN) {
5949 				ret = pf_translate(pd, &ctx.sk->addr[pd->didx],
5950 				    ctx.sk->port[pd->didx], &ctx.sk->addr[pd->sidx],
5951 				    ctx.sk->port[pd->sidx], ctx.virtual_type,
5952 				    ctx.icmp_dir);
5953 			} else {
5954 				ret = pf_translate(pd, &ctx.sk->addr[pd->sidx],
5955 				    ctx.sk->port[pd->sidx], &ctx.sk->addr[pd->didx],
5956 				    ctx.sk->port[pd->didx], ctx.virtual_type,
5957 				    ctx.icmp_dir);
5958 			}
5959 
5960 			if (ret < 0)
5961 				goto cleanup;
5962 
5963 			ctx.rewrite += ret;
5964 
5965 			if (ctx.rewrite && ctx.sk->af != ctx.nk->af)
5966 				action = PF_AFRT;
5967 		}
5968 	} else {
5969 		while ((ri = SLIST_FIRST(&ctx.rules))) {
5970 			SLIST_REMOVE_HEAD(&ctx.rules, entry);
5971 			free(ri, M_PF_RULE_ITEM);
5972 		}
5973 
5974 		uma_zfree(V_pf_state_key_z, ctx.sk);
5975 		uma_zfree(V_pf_state_key_z, ctx.nk);
5976 		ctx.sk = ctx.nk = NULL;
5977 		pf_udp_mapping_release(ctx.udp_mapping);
5978 	}
5979 
5980 	/* copy back packet headers if we performed NAT operations */
5981 	if (ctx.rewrite)
5982 		m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
5983 
5984 	if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
5985 	    pd->dir == PF_OUT &&
5986 	    V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, pd->m)) {
5987 		/*
5988 		 * We want the state created, but we dont
5989 		 * want to send this in case a partner
5990 		 * firewall has to know about it to allow
5991 		 * replies through it.
5992 		 */
5993 		*reason = ctx.reason;
5994 		return (PF_DEFER);
5995 	}
5996 
5997 	*reason = ctx.reason;
5998 	return (action);
5999 
6000 cleanup:
6001 	while ((ri = SLIST_FIRST(&ctx.rules))) {
6002 		SLIST_REMOVE_HEAD(&ctx.rules, entry);
6003 		free(ri, M_PF_RULE_ITEM);
6004 	}
6005 
6006 	uma_zfree(V_pf_state_key_z, ctx.sk);
6007 	uma_zfree(V_pf_state_key_z, ctx.nk);
6008 	pf_udp_mapping_release(ctx.udp_mapping);
6009 	*reason = ctx.reason;
6010 
6011 	return (PF_DROP);
6012 }
6013 
6014 static int
pf_create_state(struct pf_krule * r,struct pf_test_ctx * ctx,struct pf_kstate ** sm,u_int16_t bproto_sum,u_int16_t bip_sum)6015 pf_create_state(struct pf_krule *r, struct pf_test_ctx *ctx,
6016     struct pf_kstate **sm, u_int16_t bproto_sum, u_int16_t bip_sum)
6017 {
6018 	struct pf_pdesc		*pd = ctx->pd;
6019 	struct pf_kstate	*s = NULL;
6020 	struct pf_ksrc_node	*sns[PF_SN_MAX] = { NULL };
6021 	/*
6022 	 * XXXKS: The hash for PF_SN_LIMIT and PF_SN_ROUTE should be the same
6023 	 *        but for PF_SN_NAT it is different. Don't try optimizing it,
6024 	 *        just store all 3 hashes.
6025 	 */
6026 	struct pf_srchash	*snhs[PF_SN_MAX] = { NULL };
6027 	struct tcphdr		*th = &pd->hdr.tcp;
6028 	u_int16_t		 mss = V_tcp_mssdflt;
6029 	u_short			 sn_reason;
6030 	struct pf_krule_item	*ri;
6031 
6032 	/* check maximums */
6033 	if (r->max_states &&
6034 	    (counter_u64_fetch(r->states_cur) >= r->max_states)) {
6035 		counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
6036 		REASON_SET(&ctx->reason, PFRES_MAXSTATES);
6037 		goto csfailed;
6038 	}
6039 	/* src node for limits */
6040 	if ((r->rule_flag & PFRULE_SRCTRACK) &&
6041 	    (sn_reason = pf_insert_src_node(sns, snhs, r, pd->src, pd->af,
6042 	        NULL, NULL, PF_SN_LIMIT)) != 0) {
6043 		REASON_SET(&ctx->reason, sn_reason);
6044 		goto csfailed;
6045 	}
6046 	/* src node for route-to rule */
6047 	if (r->rt) {
6048 		if ((r->route.opts & PF_POOL_STICKYADDR) &&
6049 		    (sn_reason = pf_insert_src_node(sns, snhs, r, pd->src,
6050 		    pd->af, &pd->act.rt_addr, pd->act.rt_kif,
6051 		    PF_SN_ROUTE)) != 0) {
6052 			REASON_SET(&ctx->reason, sn_reason);
6053 			goto csfailed;
6054 		}
6055 	}
6056 	/* src node for translation rule */
6057 	if (ctx->nr != NULL) {
6058 		KASSERT(ctx->nat_pool != NULL, ("%s: nat_pool is NULL", __func__));
6059 		if ((ctx->nat_pool->opts & PF_POOL_STICKYADDR) &&
6060 		    (sn_reason = pf_insert_src_node(sns, snhs, ctx->nr,
6061 		    &ctx->sk->addr[pd->sidx], pd->af, &ctx->nk->addr[1], NULL,
6062 		    PF_SN_NAT)) != 0 ) {
6063 			REASON_SET(&ctx->reason, sn_reason);
6064 			goto csfailed;
6065 		}
6066 	}
6067 	s = pf_alloc_state(M_NOWAIT);
6068 	if (s == NULL) {
6069 		REASON_SET(&ctx->reason, PFRES_MEMORY);
6070 		goto csfailed;
6071 	}
6072 	s->rule = r;
6073 	s->nat_rule = ctx->nr;
6074 	s->anchor = ctx->a;
6075 	memcpy(&s->match_rules, &ctx->rules, sizeof(s->match_rules));
6076 	memcpy(&s->act, &pd->act, sizeof(struct pf_rule_actions));
6077 
6078 	if (pd->act.allow_opts)
6079 		s->state_flags |= PFSTATE_ALLOWOPTS;
6080 	if (r->rule_flag & PFRULE_STATESLOPPY)
6081 		s->state_flags |= PFSTATE_SLOPPY;
6082 	if (pd->flags & PFDESC_TCP_NORM) /* Set by old-style scrub rules */
6083 		s->state_flags |= PFSTATE_SCRUB_TCP;
6084 	if ((r->rule_flag & PFRULE_PFLOW) ||
6085 	    (ctx->nr != NULL && ctx->nr->rule_flag & PFRULE_PFLOW))
6086 		s->state_flags |= PFSTATE_PFLOW;
6087 
6088 	s->act.log = pd->act.log & PF_LOG_ALL;
6089 	s->sync_state = PFSYNC_S_NONE;
6090 	s->state_flags |= pd->act.flags; /* Only needed for pfsync and state export */
6091 
6092 	if (ctx->nr != NULL)
6093 		s->act.log |= ctx->nr->log & PF_LOG_ALL;
6094 	switch (pd->proto) {
6095 	case IPPROTO_TCP:
6096 		s->src.seqlo = ntohl(th->th_seq);
6097 		s->src.seqhi = s->src.seqlo + pd->p_len + 1;
6098 		if ((tcp_get_flags(th) & (TH_SYN|TH_ACK)) == TH_SYN &&
6099 		    r->keep_state == PF_STATE_MODULATE) {
6100 			/* Generate sequence number modulator */
6101 			if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
6102 			    0)
6103 				s->src.seqdiff = 1;
6104 			pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum,
6105 			    htonl(s->src.seqlo + s->src.seqdiff), 0);
6106 			ctx->rewrite = 1;
6107 		} else
6108 			s->src.seqdiff = 0;
6109 		if (tcp_get_flags(th) & TH_SYN) {
6110 			s->src.seqhi++;
6111 			s->src.wscale = pf_get_wscale(pd);
6112 		}
6113 		s->src.max_win = MAX(ntohs(th->th_win), 1);
6114 		if (s->src.wscale & PF_WSCALE_MASK) {
6115 			/* Remove scale factor from initial window */
6116 			int win = s->src.max_win;
6117 			win += 1 << (s->src.wscale & PF_WSCALE_MASK);
6118 			s->src.max_win = (win - 1) >>
6119 			    (s->src.wscale & PF_WSCALE_MASK);
6120 		}
6121 		if (tcp_get_flags(th) & TH_FIN)
6122 			s->src.seqhi++;
6123 		s->dst.seqhi = 1;
6124 		s->dst.max_win = 1;
6125 		pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT);
6126 		pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED);
6127 		s->timeout = PFTM_TCP_FIRST_PACKET;
6128 		atomic_add_32(&V_pf_status.states_halfopen, 1);
6129 		break;
6130 	case IPPROTO_UDP:
6131 		pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE);
6132 		pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
6133 		s->timeout = PFTM_UDP_FIRST_PACKET;
6134 		break;
6135 	case IPPROTO_SCTP:
6136 		pf_set_protostate(s, PF_PEER_SRC, SCTP_COOKIE_WAIT);
6137 		pf_set_protostate(s, PF_PEER_DST, SCTP_CLOSED);
6138 		s->timeout = PFTM_SCTP_FIRST_PACKET;
6139 		break;
6140 	case IPPROTO_ICMP:
6141 #ifdef INET6
6142 	case IPPROTO_ICMPV6:
6143 #endif /* INET6 */
6144 		s->timeout = PFTM_ICMP_FIRST_PACKET;
6145 		break;
6146 	default:
6147 		pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE);
6148 		pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
6149 		s->timeout = PFTM_OTHER_FIRST_PACKET;
6150 	}
6151 
6152 	s->creation = s->expire = pf_get_uptime();
6153 
6154 	if (pd->proto == IPPROTO_TCP) {
6155 		if (s->state_flags & PFSTATE_SCRUB_TCP &&
6156 		    pf_normalize_tcp_init(pd, th, &s->src)) {
6157 			REASON_SET(&ctx->reason, PFRES_MEMORY);
6158 			goto csfailed;
6159 		}
6160 		if (s->state_flags & PFSTATE_SCRUB_TCP && s->src.scrub &&
6161 		    pf_normalize_tcp_stateful(pd, &ctx->reason, th, s,
6162 		    &s->src, &s->dst, &ctx->rewrite)) {
6163 			/* This really shouldn't happen!!! */
6164 			DPFPRINTF(PF_DEBUG_URGENT,
6165 			    ("%s: tcp normalize failed on first "
6166 			     "pkt\n", __func__));
6167 			goto csfailed;
6168 		}
6169 	} else if (pd->proto == IPPROTO_SCTP) {
6170 		if (pf_normalize_sctp_init(pd, &s->src, &s->dst))
6171 			goto csfailed;
6172 		if (! (pd->sctp_flags & (PFDESC_SCTP_INIT | PFDESC_SCTP_ADD_IP)))
6173 			goto csfailed;
6174 	}
6175 	s->direction = pd->dir;
6176 
6177 	/*
6178 	 * sk/nk could already been setup by pf_get_translation().
6179 	 */
6180 	if (ctx->sk == NULL && ctx->nk == NULL) {
6181 		MPASS(pd->sport == NULL || (pd->osport == *pd->sport));
6182 		MPASS(pd->dport == NULL || (pd->odport == *pd->dport));
6183 		if (pf_state_key_setup(pd, pd->nsport, pd->ndport,
6184 		    &ctx->sk, &ctx->nk)) {
6185 			goto csfailed;
6186 		}
6187 	} else
6188 		KASSERT((ctx->sk != NULL && ctx->nk != NULL), ("%s: nr %p sk %p, nk %p",
6189 		    __func__, ctx->nr, ctx->sk, ctx->nk));
6190 
6191 	/* Swap sk/nk for PF_OUT. */
6192 	if (pf_state_insert(BOUND_IFACE(s, pd), pd->kif,
6193 	    (pd->dir == PF_IN) ? ctx->sk : ctx->nk,
6194 	    (pd->dir == PF_IN) ? ctx->nk : ctx->sk, s)) {
6195 		REASON_SET(&ctx->reason, PFRES_STATEINS);
6196 		goto drop;
6197 	} else
6198 		*sm = s;
6199 	ctx->sk = ctx->nk = NULL;
6200 
6201 	STATE_INC_COUNTERS(s);
6202 
6203 	/*
6204 	 * Lock order is important: first state, then source node.
6205 	 */
6206 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
6207 		if (pf_src_node_exists(&sns[sn_type], snhs[sn_type])) {
6208 			s->sns[sn_type] = sns[sn_type];
6209 			PF_HASHROW_UNLOCK(snhs[sn_type]);
6210 		}
6211 	}
6212 
6213 	if (ctx->tag > 0)
6214 		s->tag = ctx->tag;
6215 	if (pd->proto == IPPROTO_TCP && (tcp_get_flags(th) & (TH_SYN|TH_ACK)) ==
6216 	    TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
6217 		pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
6218 		pf_undo_nat(ctx->nr, pd, bip_sum);
6219 		s->src.seqhi = arc4random();
6220 		/* Find mss option */
6221 		int rtid = M_GETFIB(pd->m);
6222 		mss = pf_get_mss(pd);
6223 		mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
6224 		mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
6225 		s->src.mss = mss;
6226 		pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
6227 		    th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
6228 		    TH_SYN|TH_ACK, 0, s->src.mss, 0, M_SKIP_FIREWALL, 0, 0,
6229 		    pd->act.rtableid);
6230 		REASON_SET(&ctx->reason, PFRES_SYNPROXY);
6231 		return (PF_SYNPROXY_DROP);
6232 	}
6233 
6234 	s->udp_mapping = ctx->udp_mapping;
6235 
6236 	return (PF_PASS);
6237 
6238 csfailed:
6239 	while ((ri = SLIST_FIRST(&ctx->rules))) {
6240 		SLIST_REMOVE_HEAD(&ctx->rules, entry);
6241 		free(ri, M_PF_RULE_ITEM);
6242 	}
6243 
6244 	uma_zfree(V_pf_state_key_z, ctx->sk);
6245 	uma_zfree(V_pf_state_key_z, ctx->nk);
6246 
6247 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
6248 		if (pf_src_node_exists(&sns[sn_type], snhs[sn_type])) {
6249 			if (--sns[sn_type]->states == 0 &&
6250 			    sns[sn_type]->expire == 0) {
6251 				pf_unlink_src_node(sns[sn_type]);
6252 				pf_free_src_node(sns[sn_type]);
6253 				counter_u64_add(
6254 				    V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
6255 			}
6256 			PF_HASHROW_UNLOCK(snhs[sn_type]);
6257 		}
6258 	}
6259 
6260 drop:
6261 	if (s != NULL) {
6262 		pf_src_tree_remove_state(s);
6263 		s->timeout = PFTM_UNLINKED;
6264 		pf_free_state(s);
6265 	}
6266 
6267 	return (PF_DROP);
6268 }
6269 
6270 int
pf_translate(struct pf_pdesc * pd,struct pf_addr * saddr,u_int16_t sport,struct pf_addr * daddr,u_int16_t dport,u_int16_t virtual_type,int icmp_dir)6271 pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport,
6272     struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type,
6273     int icmp_dir)
6274 {
6275 	/*
6276 	 * pf_translate() implements OpenBSD's "new" NAT approach.
6277 	 * We don't follow it, because it involves a breaking syntax change
6278 	 * (removing nat/rdr rules, moving it into regular pf rules.)
6279 	 * It also moves NAT processing to be done after normal rules evaluation
6280 	 * whereas in FreeBSD that's done before rules processing.
6281 	 *
6282 	 * We adopt the function only for nat64, and keep other NAT processing
6283 	 * before rules processing.
6284 	 */
6285 	int	rewrite = 0;
6286 	int	afto = pd->af != pd->naf;
6287 
6288 	MPASS(afto);
6289 
6290 	switch (pd->proto) {
6291 	case IPPROTO_TCP:
6292 	case IPPROTO_UDP:
6293 	case IPPROTO_SCTP:
6294 		if (afto || *pd->sport != sport) {
6295 			pf_change_ap(pd, pd->src, pd->sport,
6296 			    saddr, sport);
6297 			rewrite = 1;
6298 		}
6299 		if (afto || *pd->dport != dport) {
6300 			pf_change_ap(pd, pd->dst, pd->dport,
6301 			    daddr, dport);
6302 			rewrite = 1;
6303 		}
6304 		break;
6305 
6306 #ifdef INET
6307 	case IPPROTO_ICMP:
6308 		/* pf_translate() is also used when logging invalid packets */
6309 		if (pd->af != AF_INET)
6310 			return (0);
6311 
6312 		if (afto) {
6313 			if (pf_translate_icmp_af(AF_INET6, &pd->hdr.icmp))
6314 				return (-1);
6315 			pd->proto = IPPROTO_ICMPV6;
6316 			rewrite = 1;
6317 		}
6318 		if (virtual_type == htons(ICMP_ECHO)) {
6319 			u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
6320 
6321 			if (icmpid != pd->hdr.icmp.icmp_id) {
6322 				pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
6323 				    pd->hdr.icmp.icmp_cksum,
6324 				    pd->hdr.icmp.icmp_id, icmpid, 0);
6325 				pd->hdr.icmp.icmp_id = icmpid;
6326 				/* XXX TODO copyback. */
6327 				rewrite = 1;
6328 			}
6329 		}
6330 		break;
6331 #endif /* INET */
6332 
6333 #ifdef INET6
6334 	case IPPROTO_ICMPV6:
6335 		/* pf_translate() is also used when logging invalid packets */
6336 		if (pd->af != AF_INET6)
6337 			return (0);
6338 
6339 		if (afto) {
6340 			/* ip_sum will be recalculated in pf_translate_af */
6341 			if (pf_translate_icmp_af(AF_INET, &pd->hdr.icmp6))
6342 				return (0);
6343 			pd->proto = IPPROTO_ICMP;
6344 			rewrite = 1;
6345 		}
6346 		break;
6347 #endif /* INET6 */
6348 
6349 	default:
6350 		break;
6351 	}
6352 
6353 	return (rewrite);
6354 }
6355 
6356 int
pf_translate_compat(struct pf_test_ctx * ctx)6357 pf_translate_compat(struct pf_test_ctx *ctx)
6358 {
6359 	struct pf_pdesc		*pd = ctx->pd;
6360 	struct pf_state_key	*nk = ctx->nk;
6361 	struct tcphdr		*th = &pd->hdr.tcp;
6362 	int 			 rewrite = 0;
6363 
6364 	KASSERT(ctx->sk != NULL, ("%s: null sk", __func__));
6365 	KASSERT(ctx->nk != NULL, ("%s: null nk", __func__));
6366 
6367 	switch (pd->proto) {
6368 	case IPPROTO_TCP:
6369 		if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) ||
6370 		    nk->port[pd->sidx] != pd->nsport) {
6371 			pf_change_ap(pd, pd->src, &th->th_sport,
6372 			    &nk->addr[pd->sidx], nk->port[pd->sidx]);
6373 			pd->sport = &th->th_sport;
6374 			pd->nsport = th->th_sport;
6375 			pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6376 		}
6377 
6378 		if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) ||
6379 		    nk->port[pd->didx] != pd->ndport) {
6380 			pf_change_ap(pd, pd->dst, &th->th_dport,
6381 			    &nk->addr[pd->didx], nk->port[pd->didx]);
6382 			pd->dport = &th->th_dport;
6383 			pd->ndport = th->th_dport;
6384 			pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6385 		}
6386 		rewrite++;
6387 		break;
6388 	case IPPROTO_UDP:
6389 		if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) ||
6390 		    nk->port[pd->sidx] != pd->nsport) {
6391 			pf_change_ap(pd, pd->src,
6392 			    &pd->hdr.udp.uh_sport,
6393 			    &nk->addr[pd->sidx],
6394 			    nk->port[pd->sidx]);
6395 			pd->sport = &pd->hdr.udp.uh_sport;
6396 			pd->nsport = pd->hdr.udp.uh_sport;
6397 			pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6398 		}
6399 
6400 		if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) ||
6401 		    nk->port[pd->didx] != pd->ndport) {
6402 			pf_change_ap(pd, pd->dst,
6403 			    &pd->hdr.udp.uh_dport,
6404 			    &nk->addr[pd->didx],
6405 			    nk->port[pd->didx]);
6406 			pd->dport = &pd->hdr.udp.uh_dport;
6407 			pd->ndport = pd->hdr.udp.uh_dport;
6408 			pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6409 		}
6410 		rewrite++;
6411 		break;
6412 	case IPPROTO_SCTP: {
6413 		if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], pd->af) ||
6414 		    nk->port[pd->sidx] != pd->nsport) {
6415 			pf_change_ap(pd, pd->src,
6416 			    &pd->hdr.sctp.src_port,
6417 			    &nk->addr[pd->sidx],
6418 			    nk->port[pd->sidx]);
6419 			pd->sport = &pd->hdr.sctp.src_port;
6420 			pd->nsport = pd->hdr.sctp.src_port;
6421 			pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6422 		}
6423 		if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], pd->af) ||
6424 		    nk->port[pd->didx] != pd->ndport) {
6425 			pf_change_ap(pd, pd->dst,
6426 			    &pd->hdr.sctp.dest_port,
6427 			    &nk->addr[pd->didx],
6428 			    nk->port[pd->didx]);
6429 			pd->dport = &pd->hdr.sctp.dest_port;
6430 			pd->ndport = pd->hdr.sctp.dest_port;
6431 			pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6432 		}
6433 		break;
6434 	}
6435 #ifdef INET
6436 	case IPPROTO_ICMP:
6437 		if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET)) {
6438 			pf_change_a(&pd->src->v4.s_addr, pd->ip_sum,
6439 			    nk->addr[pd->sidx].v4.s_addr, 0);
6440 			pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6441 		}
6442 
6443 		if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET)) {
6444 			pf_change_a(&pd->dst->v4.s_addr, pd->ip_sum,
6445 			    nk->addr[pd->didx].v4.s_addr, 0);
6446 			pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6447 		}
6448 
6449 		if (ctx->virtual_type == htons(ICMP_ECHO) &&
6450 		    nk->port[pd->sidx] != pd->hdr.icmp.icmp_id) {
6451 			pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
6452 			    pd->hdr.icmp.icmp_cksum, pd->nsport,
6453 			    nk->port[pd->sidx], 0);
6454 			pd->hdr.icmp.icmp_id = nk->port[pd->sidx];
6455 			pd->sport = &pd->hdr.icmp.icmp_id;
6456 		}
6457 		m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
6458 		break;
6459 #endif /* INET */
6460 #ifdef INET6
6461 	case IPPROTO_ICMPV6:
6462 		if (PF_ANEQ(&pd->nsaddr, &nk->addr[pd->sidx], AF_INET6)) {
6463 			pf_change_a6(pd->src, &pd->hdr.icmp6.icmp6_cksum,
6464 			    &nk->addr[pd->sidx], 0);
6465 			pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6466 		}
6467 
6468 		if (PF_ANEQ(&pd->ndaddr, &nk->addr[pd->didx], AF_INET6)) {
6469 			pf_change_a6(pd->dst, &pd->hdr.icmp6.icmp6_cksum,
6470 			    &nk->addr[pd->didx], 0);
6471 			pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6472 		}
6473 		rewrite++;
6474 		break;
6475 #endif /* INET */
6476 	default:
6477 		switch (pd->af) {
6478 #ifdef INET
6479 		case AF_INET:
6480 			if (PF_ANEQ(&pd->nsaddr,
6481 				&nk->addr[pd->sidx], AF_INET)) {
6482 				pf_change_a(&pd->src->v4.s_addr,
6483 				    pd->ip_sum,
6484 				    nk->addr[pd->sidx].v4.s_addr, 0);
6485 				pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
6486 			}
6487 
6488 			if (PF_ANEQ(&pd->ndaddr,
6489 				&nk->addr[pd->didx], AF_INET)) {
6490 				pf_change_a(&pd->dst->v4.s_addr,
6491 				    pd->ip_sum,
6492 				    nk->addr[pd->didx].v4.s_addr, 0);
6493 				pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
6494 			}
6495 			break;
6496 #endif /* INET */
6497 #ifdef INET6
6498 		case AF_INET6:
6499 			if (PF_ANEQ(&pd->nsaddr,
6500 				&nk->addr[pd->sidx], AF_INET6)) {
6501 				pf_addrcpy(&pd->nsaddr, &nk->addr[pd->sidx],
6502 				    pd->af);
6503 				pf_addrcpy(pd->src, &nk->addr[pd->sidx], pd->af);
6504 			}
6505 
6506 			if (PF_ANEQ(&pd->ndaddr,
6507 				&nk->addr[pd->didx], AF_INET6)) {
6508 				pf_addrcpy(&pd->ndaddr, &nk->addr[pd->didx],
6509 				    pd->af);
6510 				pf_addrcpy(pd->dst, &nk->addr[pd->didx],
6511 				    pd->af);
6512 			}
6513 			break;
6514 #endif /* INET6 */
6515 		}
6516 		break;
6517 	}
6518 	return (rewrite);
6519 }
6520 
6521 static int
pf_tcp_track_full(struct pf_kstate * state,struct pf_pdesc * pd,u_short * reason,int * copyback,struct pf_state_peer * src,struct pf_state_peer * dst,u_int8_t psrc,u_int8_t pdst)6522 pf_tcp_track_full(struct pf_kstate *state, struct pf_pdesc *pd,
6523     u_short *reason, int *copyback, struct pf_state_peer *src,
6524     struct pf_state_peer *dst, u_int8_t psrc, u_int8_t pdst)
6525 {
6526 	struct tcphdr		*th = &pd->hdr.tcp;
6527 	u_int16_t		 win = ntohs(th->th_win);
6528 	u_int32_t		 ack, end, data_end, seq, orig_seq;
6529 	u_int8_t		 sws, dws;
6530 	int			 ackskew;
6531 
6532 	if (src->wscale && dst->wscale && !(tcp_get_flags(th) & TH_SYN)) {
6533 		sws = src->wscale & PF_WSCALE_MASK;
6534 		dws = dst->wscale & PF_WSCALE_MASK;
6535 	} else
6536 		sws = dws = 0;
6537 
6538 	/*
6539 	 * Sequence tracking algorithm from Guido van Rooij's paper:
6540 	 *   http://www.madison-gurkha.com/publications/tcp_filtering/
6541 	 *	tcp_filtering.ps
6542 	 */
6543 
6544 	orig_seq = seq = ntohl(th->th_seq);
6545 	if (src->seqlo == 0) {
6546 		/* First packet from this end. Set its state */
6547 
6548 		if ((state->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
6549 		    src->scrub == NULL) {
6550 			if (pf_normalize_tcp_init(pd, th, src)) {
6551 				REASON_SET(reason, PFRES_MEMORY);
6552 				return (PF_DROP);
6553 			}
6554 		}
6555 
6556 		/* Deferred generation of sequence number modulator */
6557 		if (dst->seqdiff && !src->seqdiff) {
6558 			/* use random iss for the TCP server */
6559 			while ((src->seqdiff = arc4random() - seq) == 0)
6560 				;
6561 			ack = ntohl(th->th_ack) - dst->seqdiff;
6562 			pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum, htonl(seq +
6563 			    src->seqdiff), 0);
6564 			pf_change_proto_a(pd->m, &th->th_ack, &th->th_sum, htonl(ack), 0);
6565 			*copyback = 1;
6566 		} else {
6567 			ack = ntohl(th->th_ack);
6568 		}
6569 
6570 		end = seq + pd->p_len;
6571 		if (tcp_get_flags(th) & TH_SYN) {
6572 			end++;
6573 			if (dst->wscale & PF_WSCALE_FLAG) {
6574 				src->wscale = pf_get_wscale(pd);
6575 				if (src->wscale & PF_WSCALE_FLAG) {
6576 					/* Remove scale factor from initial
6577 					 * window */
6578 					sws = src->wscale & PF_WSCALE_MASK;
6579 					win = ((u_int32_t)win + (1 << sws) - 1)
6580 					    >> sws;
6581 					dws = dst->wscale & PF_WSCALE_MASK;
6582 				} else {
6583 					/* fixup other window */
6584 					dst->max_win = MIN(TCP_MAXWIN,
6585 					    (u_int32_t)dst->max_win <<
6586 					    (dst->wscale & PF_WSCALE_MASK));
6587 					/* in case of a retrans SYN|ACK */
6588 					dst->wscale = 0;
6589 				}
6590 			}
6591 		}
6592 		data_end = end;
6593 		if (tcp_get_flags(th) & TH_FIN)
6594 			end++;
6595 
6596 		src->seqlo = seq;
6597 		if (src->state < TCPS_SYN_SENT)
6598 			pf_set_protostate(state, psrc, TCPS_SYN_SENT);
6599 
6600 		/*
6601 		 * May need to slide the window (seqhi may have been set by
6602 		 * the crappy stack check or if we picked up the connection
6603 		 * after establishment)
6604 		 */
6605 		if (src->seqhi == 1 ||
6606 		    SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
6607 			src->seqhi = end + MAX(1, dst->max_win << dws);
6608 		if (win > src->max_win)
6609 			src->max_win = win;
6610 
6611 	} else {
6612 		ack = ntohl(th->th_ack) - dst->seqdiff;
6613 		if (src->seqdiff) {
6614 			/* Modulate sequence numbers */
6615 			pf_change_proto_a(pd->m, &th->th_seq, &th->th_sum, htonl(seq +
6616 			    src->seqdiff), 0);
6617 			pf_change_proto_a(pd->m, &th->th_ack, &th->th_sum, htonl(ack), 0);
6618 			*copyback = 1;
6619 		}
6620 		end = seq + pd->p_len;
6621 		if (tcp_get_flags(th) & TH_SYN)
6622 			end++;
6623 		data_end = end;
6624 		if (tcp_get_flags(th) & TH_FIN)
6625 			end++;
6626 	}
6627 
6628 	if ((tcp_get_flags(th) & TH_ACK) == 0) {
6629 		/* Let it pass through the ack skew check */
6630 		ack = dst->seqlo;
6631 	} else if ((ack == 0 &&
6632 	    (tcp_get_flags(th) & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
6633 	    /* broken tcp stacks do not set ack */
6634 	    (dst->state < TCPS_SYN_SENT)) {
6635 		/*
6636 		 * Many stacks (ours included) will set the ACK number in an
6637 		 * FIN|ACK if the SYN times out -- no sequence to ACK.
6638 		 */
6639 		ack = dst->seqlo;
6640 	}
6641 
6642 	if (seq == end) {
6643 		/* Ease sequencing restrictions on no data packets */
6644 		seq = src->seqlo;
6645 		data_end = end = seq;
6646 	}
6647 
6648 	ackskew = dst->seqlo - ack;
6649 
6650 	/*
6651 	 * Need to demodulate the sequence numbers in any TCP SACK options
6652 	 * (Selective ACK). We could optionally validate the SACK values
6653 	 * against the current ACK window, either forwards or backwards, but
6654 	 * I'm not confident that SACK has been implemented properly
6655 	 * everywhere. It wouldn't surprise me if several stacks accidentally
6656 	 * SACK too far backwards of previously ACKed data. There really aren't
6657 	 * any security implications of bad SACKing unless the target stack
6658 	 * doesn't validate the option length correctly. Someone trying to
6659 	 * spoof into a TCP connection won't bother blindly sending SACK
6660 	 * options anyway.
6661 	 */
6662 	if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
6663 		if (pf_modulate_sack(pd, th, dst))
6664 			*copyback = 1;
6665 	}
6666 
6667 #define	MAXACKWINDOW (0xffff + 1500)	/* 1500 is an arbitrary fudge factor */
6668 	if (SEQ_GEQ(src->seqhi, data_end) &&
6669 	    /* Last octet inside other's window space */
6670 	    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
6671 	    /* Retrans: not more than one window back */
6672 	    (ackskew >= -MAXACKWINDOW) &&
6673 	    /* Acking not more than one reassembled fragment backwards */
6674 	    (ackskew <= (MAXACKWINDOW << sws)) &&
6675 	    /* Acking not more than one window forward */
6676 	    ((tcp_get_flags(th) & TH_RST) == 0 || orig_seq == src->seqlo ||
6677 	    (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
6678 	    /* Require an exact/+1 sequence match on resets when possible */
6679 
6680 		if (dst->scrub || src->scrub) {
6681 			if (pf_normalize_tcp_stateful(pd, reason, th,
6682 			    state, src, dst, copyback))
6683 				return (PF_DROP);
6684 		}
6685 
6686 		/* update max window */
6687 		if (src->max_win < win)
6688 			src->max_win = win;
6689 		/* synchronize sequencing */
6690 		if (SEQ_GT(end, src->seqlo))
6691 			src->seqlo = end;
6692 		/* slide the window of what the other end can send */
6693 		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6694 			dst->seqhi = ack + MAX((win << sws), 1);
6695 
6696 		/* update states */
6697 		if (tcp_get_flags(th) & TH_SYN)
6698 			if (src->state < TCPS_SYN_SENT)
6699 				pf_set_protostate(state, psrc, TCPS_SYN_SENT);
6700 		if (tcp_get_flags(th) & TH_FIN)
6701 			if (src->state < TCPS_CLOSING)
6702 				pf_set_protostate(state, psrc, TCPS_CLOSING);
6703 		if (tcp_get_flags(th) & TH_ACK) {
6704 			if (dst->state == TCPS_SYN_SENT) {
6705 				pf_set_protostate(state, pdst,
6706 				    TCPS_ESTABLISHED);
6707 				if (src->state == TCPS_ESTABLISHED &&
6708 				    state->sns[PF_SN_LIMIT] != NULL &&
6709 				    pf_src_connlimit(state)) {
6710 					REASON_SET(reason, PFRES_SRCLIMIT);
6711 					return (PF_DROP);
6712 				}
6713 			} else if (dst->state == TCPS_CLOSING)
6714 				pf_set_protostate(state, pdst,
6715 				    TCPS_FIN_WAIT_2);
6716 		}
6717 		if (tcp_get_flags(th) & TH_RST)
6718 			pf_set_protostate(state, PF_PEER_BOTH, TCPS_TIME_WAIT);
6719 
6720 		/* update expire time */
6721 		state->expire = pf_get_uptime();
6722 		if (src->state >= TCPS_FIN_WAIT_2 &&
6723 		    dst->state >= TCPS_FIN_WAIT_2)
6724 			state->timeout = PFTM_TCP_CLOSED;
6725 		else if (src->state >= TCPS_CLOSING &&
6726 		    dst->state >= TCPS_CLOSING)
6727 			state->timeout = PFTM_TCP_FIN_WAIT;
6728 		else if (src->state < TCPS_ESTABLISHED ||
6729 		    dst->state < TCPS_ESTABLISHED)
6730 			state->timeout = PFTM_TCP_OPENING;
6731 		else if (src->state >= TCPS_CLOSING ||
6732 		    dst->state >= TCPS_CLOSING)
6733 			state->timeout = PFTM_TCP_CLOSING;
6734 		else
6735 			state->timeout = PFTM_TCP_ESTABLISHED;
6736 
6737 		/* Fall through to PASS packet */
6738 
6739 	} else if ((dst->state < TCPS_SYN_SENT ||
6740 		dst->state >= TCPS_FIN_WAIT_2 ||
6741 		src->state >= TCPS_FIN_WAIT_2) &&
6742 	    SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) &&
6743 	    /* Within a window forward of the originating packet */
6744 	    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
6745 	    /* Within a window backward of the originating packet */
6746 
6747 		/*
6748 		 * This currently handles three situations:
6749 		 *  1) Stupid stacks will shotgun SYNs before their peer
6750 		 *     replies.
6751 		 *  2) When PF catches an already established stream (the
6752 		 *     firewall rebooted, the state table was flushed, routes
6753 		 *     changed...)
6754 		 *  3) Packets get funky immediately after the connection
6755 		 *     closes (this should catch Solaris spurious ACK|FINs
6756 		 *     that web servers like to spew after a close)
6757 		 *
6758 		 * This must be a little more careful than the above code
6759 		 * since packet floods will also be caught here. We don't
6760 		 * update the TTL here to mitigate the damage of a packet
6761 		 * flood and so the same code can handle awkward establishment
6762 		 * and a loosened connection close.
6763 		 * In the establishment case, a correct peer response will
6764 		 * validate the connection, go through the normal state code
6765 		 * and keep updating the state TTL.
6766 		 */
6767 
6768 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
6769 			printf("pf: loose state match: ");
6770 			pf_print_state(state);
6771 			pf_print_flags(tcp_get_flags(th));
6772 			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6773 			    "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
6774 			    pd->p_len, ackskew, (unsigned long long)state->packets[0],
6775 			    (unsigned long long)state->packets[1],
6776 			    pd->dir == PF_IN ? "in" : "out",
6777 			    pd->dir == state->direction ? "fwd" : "rev");
6778 		}
6779 
6780 		if (dst->scrub || src->scrub) {
6781 			if (pf_normalize_tcp_stateful(pd, reason, th,
6782 			    state, src, dst, copyback))
6783 				return (PF_DROP);
6784 		}
6785 
6786 		/* update max window */
6787 		if (src->max_win < win)
6788 			src->max_win = win;
6789 		/* synchronize sequencing */
6790 		if (SEQ_GT(end, src->seqlo))
6791 			src->seqlo = end;
6792 		/* slide the window of what the other end can send */
6793 		if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
6794 			dst->seqhi = ack + MAX((win << sws), 1);
6795 
6796 		/*
6797 		 * Cannot set dst->seqhi here since this could be a shotgunned
6798 		 * SYN and not an already established connection.
6799 		 */
6800 
6801 		if (tcp_get_flags(th) & TH_FIN)
6802 			if (src->state < TCPS_CLOSING)
6803 				pf_set_protostate(state, psrc, TCPS_CLOSING);
6804 		if (tcp_get_flags(th) & TH_RST)
6805 			pf_set_protostate(state, PF_PEER_BOTH, TCPS_TIME_WAIT);
6806 
6807 		/* Fall through to PASS packet */
6808 
6809 	} else {
6810 		if (state->dst.state == TCPS_SYN_SENT &&
6811 		    state->src.state == TCPS_SYN_SENT) {
6812 			/* Send RST for state mismatches during handshake */
6813 			if (!(tcp_get_flags(th) & TH_RST))
6814 				pf_send_tcp(state->rule, pd->af,
6815 				    pd->dst, pd->src, th->th_dport,
6816 				    th->th_sport, ntohl(th->th_ack), 0,
6817 				    TH_RST, 0, 0,
6818 				    state->rule->return_ttl, M_SKIP_FIREWALL,
6819 				    0, 0, state->act.rtableid);
6820 			src->seqlo = 0;
6821 			src->seqhi = 1;
6822 			src->max_win = 1;
6823 		} else if (V_pf_status.debug >= PF_DEBUG_MISC) {
6824 			printf("pf: BAD state: ");
6825 			pf_print_state(state);
6826 			pf_print_flags(tcp_get_flags(th));
6827 			printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
6828 			    "pkts=%llu:%llu dir=%s,%s\n",
6829 			    seq, orig_seq, ack, pd->p_len, ackskew,
6830 			    (unsigned long long)state->packets[0],
6831 			    (unsigned long long)state->packets[1],
6832 			    pd->dir == PF_IN ? "in" : "out",
6833 			    pd->dir == state->direction ? "fwd" : "rev");
6834 			printf("pf: State failure on: %c %c %c %c | %c %c\n",
6835 			    SEQ_GEQ(src->seqhi, data_end) ? ' ' : '1',
6836 			    SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
6837 			    ' ': '2',
6838 			    (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
6839 			    (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
6840 			    SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) ?' ' :'5',
6841 			    SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
6842 		}
6843 		REASON_SET(reason, PFRES_BADSTATE);
6844 		return (PF_DROP);
6845 	}
6846 
6847 	return (PF_PASS);
6848 }
6849 
6850 static int
pf_tcp_track_sloppy(struct pf_kstate * state,struct pf_pdesc * pd,u_short * reason,struct pf_state_peer * src,struct pf_state_peer * dst,u_int8_t psrc,u_int8_t pdst)6851 pf_tcp_track_sloppy(struct pf_kstate *state, struct pf_pdesc *pd,
6852     u_short *reason, struct pf_state_peer *src, struct pf_state_peer *dst,
6853     u_int8_t psrc, u_int8_t pdst)
6854 {
6855 	struct tcphdr		*th = &pd->hdr.tcp;
6856 
6857 	if (tcp_get_flags(th) & TH_SYN)
6858 		if (src->state < TCPS_SYN_SENT)
6859 			pf_set_protostate(state, psrc, TCPS_SYN_SENT);
6860 	if (tcp_get_flags(th) & TH_FIN)
6861 		if (src->state < TCPS_CLOSING)
6862 			pf_set_protostate(state, psrc, TCPS_CLOSING);
6863 	if (tcp_get_flags(th) & TH_ACK) {
6864 		if (dst->state == TCPS_SYN_SENT) {
6865 			pf_set_protostate(state, pdst, TCPS_ESTABLISHED);
6866 			if (src->state == TCPS_ESTABLISHED &&
6867 			    state->sns[PF_SN_LIMIT] != NULL &&
6868 			    pf_src_connlimit(state)) {
6869 				REASON_SET(reason, PFRES_SRCLIMIT);
6870 				return (PF_DROP);
6871 			}
6872 		} else if (dst->state == TCPS_CLOSING) {
6873 			pf_set_protostate(state, pdst, TCPS_FIN_WAIT_2);
6874 		} else if (src->state == TCPS_SYN_SENT &&
6875 		    dst->state < TCPS_SYN_SENT) {
6876 			/*
6877 			 * Handle a special sloppy case where we only see one
6878 			 * half of the connection. If there is a ACK after
6879 			 * the initial SYN without ever seeing a packet from
6880 			 * the destination, set the connection to established.
6881 			 */
6882 			pf_set_protostate(state, PF_PEER_BOTH,
6883 			    TCPS_ESTABLISHED);
6884 			dst->state = src->state = TCPS_ESTABLISHED;
6885 			if (state->sns[PF_SN_LIMIT] != NULL &&
6886 			    pf_src_connlimit(state)) {
6887 				REASON_SET(reason, PFRES_SRCLIMIT);
6888 				return (PF_DROP);
6889 			}
6890 		} else if (src->state == TCPS_CLOSING &&
6891 		    dst->state == TCPS_ESTABLISHED &&
6892 		    dst->seqlo == 0) {
6893 			/*
6894 			 * Handle the closing of half connections where we
6895 			 * don't see the full bidirectional FIN/ACK+ACK
6896 			 * handshake.
6897 			 */
6898 			pf_set_protostate(state, pdst, TCPS_CLOSING);
6899 		}
6900 	}
6901 	if (tcp_get_flags(th) & TH_RST)
6902 		pf_set_protostate(state, PF_PEER_BOTH, TCPS_TIME_WAIT);
6903 
6904 	/* update expire time */
6905 	state->expire = pf_get_uptime();
6906 	if (src->state >= TCPS_FIN_WAIT_2 &&
6907 	    dst->state >= TCPS_FIN_WAIT_2)
6908 		state->timeout = PFTM_TCP_CLOSED;
6909 	else if (src->state >= TCPS_CLOSING &&
6910 	    dst->state >= TCPS_CLOSING)
6911 		state->timeout = PFTM_TCP_FIN_WAIT;
6912 	else if (src->state < TCPS_ESTABLISHED ||
6913 	    dst->state < TCPS_ESTABLISHED)
6914 		state->timeout = PFTM_TCP_OPENING;
6915 	else if (src->state >= TCPS_CLOSING ||
6916 	    dst->state >= TCPS_CLOSING)
6917 		state->timeout = PFTM_TCP_CLOSING;
6918 	else
6919 		state->timeout = PFTM_TCP_ESTABLISHED;
6920 
6921 	return (PF_PASS);
6922 }
6923 
6924 static int
pf_synproxy(struct pf_pdesc * pd,struct pf_kstate * state,u_short * reason)6925 pf_synproxy(struct pf_pdesc *pd, struct pf_kstate *state, u_short *reason)
6926 {
6927 	struct pf_state_key	*sk = state->key[pd->didx];
6928 	struct tcphdr		*th = &pd->hdr.tcp;
6929 
6930 	if (state->src.state == PF_TCPS_PROXY_SRC) {
6931 		if (pd->dir != state->direction) {
6932 			REASON_SET(reason, PFRES_SYNPROXY);
6933 			return (PF_SYNPROXY_DROP);
6934 		}
6935 		if (tcp_get_flags(th) & TH_SYN) {
6936 			if (ntohl(th->th_seq) != state->src.seqlo) {
6937 				REASON_SET(reason, PFRES_SYNPROXY);
6938 				return (PF_DROP);
6939 			}
6940 			pf_send_tcp(state->rule, pd->af, pd->dst,
6941 			    pd->src, th->th_dport, th->th_sport,
6942 			    state->src.seqhi, ntohl(th->th_seq) + 1,
6943 			    TH_SYN|TH_ACK, 0, state->src.mss, 0,
6944 			    M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
6945 			REASON_SET(reason, PFRES_SYNPROXY);
6946 			return (PF_SYNPROXY_DROP);
6947 		} else if ((tcp_get_flags(th) & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
6948 		    (ntohl(th->th_ack) != state->src.seqhi + 1) ||
6949 		    (ntohl(th->th_seq) != state->src.seqlo + 1)) {
6950 			REASON_SET(reason, PFRES_SYNPROXY);
6951 			return (PF_DROP);
6952 		} else if (state->sns[PF_SN_LIMIT] != NULL &&
6953 		    pf_src_connlimit(state)) {
6954 			REASON_SET(reason, PFRES_SRCLIMIT);
6955 			return (PF_DROP);
6956 		} else
6957 			pf_set_protostate(state, PF_PEER_SRC,
6958 			    PF_TCPS_PROXY_DST);
6959 	}
6960 	if (state->src.state == PF_TCPS_PROXY_DST) {
6961 		if (pd->dir == state->direction) {
6962 			if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) != TH_ACK) ||
6963 			    (ntohl(th->th_ack) != state->src.seqhi + 1) ||
6964 			    (ntohl(th->th_seq) != state->src.seqlo + 1)) {
6965 				REASON_SET(reason, PFRES_SYNPROXY);
6966 				return (PF_DROP);
6967 			}
6968 			state->src.max_win = MAX(ntohs(th->th_win), 1);
6969 			if (state->dst.seqhi == 1)
6970 				state->dst.seqhi = arc4random();
6971 			pf_send_tcp(state->rule, pd->af,
6972 			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
6973 			    sk->port[pd->sidx], sk->port[pd->didx],
6974 			    state->dst.seqhi, 0, TH_SYN, 0,
6975 			    state->src.mss, 0,
6976 			    state->orig_kif->pfik_ifp == V_loif ? M_LOOP : 0,
6977 			    state->tag, 0, state->act.rtableid);
6978 			REASON_SET(reason, PFRES_SYNPROXY);
6979 			return (PF_SYNPROXY_DROP);
6980 		} else if (((tcp_get_flags(th) & (TH_SYN|TH_ACK)) !=
6981 		    (TH_SYN|TH_ACK)) ||
6982 		    (ntohl(th->th_ack) != state->dst.seqhi + 1)) {
6983 			REASON_SET(reason, PFRES_SYNPROXY);
6984 			return (PF_DROP);
6985 		} else {
6986 			state->dst.max_win = MAX(ntohs(th->th_win), 1);
6987 			state->dst.seqlo = ntohl(th->th_seq);
6988 			pf_send_tcp(state->rule, pd->af, pd->dst,
6989 			    pd->src, th->th_dport, th->th_sport,
6990 			    ntohl(th->th_ack), ntohl(th->th_seq) + 1,
6991 			    TH_ACK, state->src.max_win, 0, 0, 0,
6992 			    state->tag, 0, state->act.rtableid);
6993 			pf_send_tcp(state->rule, pd->af,
6994 			    &sk->addr[pd->sidx], &sk->addr[pd->didx],
6995 			    sk->port[pd->sidx], sk->port[pd->didx],
6996 			    state->src.seqhi + 1, state->src.seqlo + 1,
6997 			    TH_ACK, state->dst.max_win, 0, 0,
6998 			    M_SKIP_FIREWALL, 0, 0, state->act.rtableid);
6999 			state->src.seqdiff = state->dst.seqhi -
7000 			    state->src.seqlo;
7001 			state->dst.seqdiff = state->src.seqhi -
7002 			    state->dst.seqlo;
7003 			state->src.seqhi = state->src.seqlo +
7004 			    state->dst.max_win;
7005 			state->dst.seqhi = state->dst.seqlo +
7006 			    state->src.max_win;
7007 			state->src.wscale = state->dst.wscale = 0;
7008 			pf_set_protostate(state, PF_PEER_BOTH,
7009 			    TCPS_ESTABLISHED);
7010 			REASON_SET(reason, PFRES_SYNPROXY);
7011 			return (PF_SYNPROXY_DROP);
7012 		}
7013 	}
7014 
7015 	return (PF_PASS);
7016 }
7017 
7018 static int
pf_test_state(struct pf_kstate ** state,struct pf_pdesc * pd,u_short * reason)7019 pf_test_state(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
7020 {
7021 	struct pf_state_key_cmp	 key;
7022 	int			 copyback = 0;
7023 	struct pf_state_peer	*src, *dst;
7024 	uint8_t			 psrc, pdst;
7025 	int			 action;
7026 
7027 	bzero(&key, sizeof(key));
7028 	key.af = pd->af;
7029 	key.proto = pd->virtual_proto;
7030 	pf_addrcpy(&key.addr[pd->sidx], pd->src, key.af);
7031 	pf_addrcpy(&key.addr[pd->didx], pd->dst, key.af);
7032 	key.port[pd->sidx] = pd->osport;
7033 	key.port[pd->didx] = pd->odport;
7034 
7035 	action = pf_find_state(pd, &key, state);
7036 	if (action != PF_MATCH)
7037 		return (action);
7038 
7039 	action = PF_PASS;
7040 	if (pd->dir == (*state)->direction) {
7041 		if (PF_REVERSED_KEY(*state, pd->af)) {
7042 			src = &(*state)->dst;
7043 			dst = &(*state)->src;
7044 			psrc = PF_PEER_DST;
7045 			pdst = PF_PEER_SRC;
7046 		} else {
7047 			src = &(*state)->src;
7048 			dst = &(*state)->dst;
7049 			psrc = PF_PEER_SRC;
7050 			pdst = PF_PEER_DST;
7051 		}
7052 	} else {
7053 		if (PF_REVERSED_KEY(*state, pd->af)) {
7054 			src = &(*state)->src;
7055 			dst = &(*state)->dst;
7056 			psrc = PF_PEER_SRC;
7057 			pdst = PF_PEER_DST;
7058 		} else {
7059 			src = &(*state)->dst;
7060 			dst = &(*state)->src;
7061 			psrc = PF_PEER_DST;
7062 			pdst = PF_PEER_SRC;
7063 		}
7064 	}
7065 
7066 	switch (pd->virtual_proto) {
7067 	case IPPROTO_TCP: {
7068 		struct tcphdr		*th = &pd->hdr.tcp;
7069 
7070 		if ((action = pf_synproxy(pd, *state, reason)) != PF_PASS)
7071 			return (action);
7072 		if (((tcp_get_flags(th) & (TH_SYN | TH_ACK)) == TH_SYN) ||
7073 		    ((th->th_flags & (TH_SYN | TH_ACK | TH_RST)) == TH_ACK &&
7074 		    pf_syncookie_check(pd) && pd->dir == PF_IN)) {
7075 			if ((*state)->src.state >= TCPS_FIN_WAIT_2 &&
7076 			    (*state)->dst.state >= TCPS_FIN_WAIT_2) {
7077 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
7078 					printf("pf: state reuse ");
7079 					pf_print_state(*state);
7080 					pf_print_flags(tcp_get_flags(th));
7081 					printf("\n");
7082 				}
7083 				/* XXX make sure it's the same direction ?? */
7084 				pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
7085 				pf_remove_state(*state);
7086 				*state = NULL;
7087 				return (PF_DROP);
7088 			} else if ((*state)->src.state >= TCPS_ESTABLISHED &&
7089 			    (*state)->dst.state >= TCPS_ESTABLISHED) {
7090 				/*
7091 				 * SYN matches existing state???
7092 				 * Typically happens when sender boots up after
7093 				 * sudden panic. Certain protocols (NFSv3) are
7094 				 * always using same port numbers. Challenge
7095 				 * ACK enables all parties (firewall and peers)
7096 				 * to get in sync again.
7097 				 */
7098 				pf_send_challenge_ack(pd, *state, src, dst);
7099 				return (PF_DROP);
7100 			}
7101 		}
7102 		if ((*state)->state_flags & PFSTATE_SLOPPY) {
7103 			if (pf_tcp_track_sloppy(*state, pd, reason, src, dst,
7104 			    psrc, pdst) == PF_DROP)
7105 				return (PF_DROP);
7106 		} else {
7107 			int	 ret;
7108 
7109 			ret = pf_tcp_track_full(*state, pd, reason,
7110 			    &copyback, src, dst, psrc, pdst);
7111 			if (ret == PF_DROP)
7112 				return (PF_DROP);
7113 		}
7114 		break;
7115 	}
7116 	case IPPROTO_UDP:
7117 		/* update states */
7118 		if (src->state < PFUDPS_SINGLE)
7119 			pf_set_protostate(*state, psrc, PFUDPS_SINGLE);
7120 		if (dst->state == PFUDPS_SINGLE)
7121 			pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE);
7122 
7123 		/* update expire time */
7124 		(*state)->expire = pf_get_uptime();
7125 		if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
7126 			(*state)->timeout = PFTM_UDP_MULTIPLE;
7127 		else
7128 			(*state)->timeout = PFTM_UDP_SINGLE;
7129 		break;
7130 	case IPPROTO_SCTP:
7131 		if ((src->state >= SCTP_SHUTDOWN_SENT || src->state == SCTP_CLOSED) &&
7132 		    (dst->state >= SCTP_SHUTDOWN_SENT || dst->state == SCTP_CLOSED) &&
7133 		    pd->sctp_flags & PFDESC_SCTP_INIT) {
7134 			pf_set_protostate(*state, PF_PEER_BOTH, SCTP_CLOSED);
7135 			pf_remove_state(*state);
7136 			*state = NULL;
7137 			return (PF_DROP);
7138 		}
7139 
7140 		if (pf_sctp_track(*state, pd, reason) != PF_PASS)
7141 			return (PF_DROP);
7142 
7143 		/* Track state. */
7144 		if (pd->sctp_flags & PFDESC_SCTP_INIT) {
7145 			if (src->state < SCTP_COOKIE_WAIT) {
7146 				pf_set_protostate(*state, psrc, SCTP_COOKIE_WAIT);
7147 				(*state)->timeout = PFTM_SCTP_OPENING;
7148 			}
7149 		}
7150 		if (pd->sctp_flags & PFDESC_SCTP_INIT_ACK) {
7151 			MPASS(dst->scrub != NULL);
7152 			if (dst->scrub->pfss_v_tag == 0)
7153 				dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
7154 		}
7155 
7156 		/*
7157 		 * Bind to the correct interface if we're if-bound. For multihomed
7158 		 * extra associations we don't know which interface that will be until
7159 		 * here, so we've inserted the state on V_pf_all. Fix that now.
7160 		 */
7161 		if ((*state)->kif == V_pfi_all &&
7162 		    (*state)->rule->rule_flag & PFRULE_IFBOUND)
7163 			(*state)->kif = pd->kif;
7164 
7165 		if (pd->sctp_flags & (PFDESC_SCTP_COOKIE | PFDESC_SCTP_HEARTBEAT_ACK)) {
7166 			if (src->state < SCTP_ESTABLISHED) {
7167 				pf_set_protostate(*state, psrc, SCTP_ESTABLISHED);
7168 				(*state)->timeout = PFTM_SCTP_ESTABLISHED;
7169 			}
7170 		}
7171 		if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN |
7172 		    PFDESC_SCTP_SHUTDOWN_COMPLETE)) {
7173 			if (src->state < SCTP_SHUTDOWN_PENDING) {
7174 				pf_set_protostate(*state, psrc, SCTP_SHUTDOWN_PENDING);
7175 				(*state)->timeout = PFTM_SCTP_CLOSING;
7176 			}
7177 		}
7178 		if (pd->sctp_flags & (PFDESC_SCTP_SHUTDOWN_COMPLETE | PFDESC_SCTP_ABORT)) {
7179 			pf_set_protostate(*state, psrc, SCTP_CLOSED);
7180 			(*state)->timeout = PFTM_SCTP_CLOSED;
7181 		}
7182 
7183 		(*state)->expire = pf_get_uptime();
7184 		break;
7185 	default:
7186 		/* update states */
7187 		if (src->state < PFOTHERS_SINGLE)
7188 			pf_set_protostate(*state, psrc, PFOTHERS_SINGLE);
7189 		if (dst->state == PFOTHERS_SINGLE)
7190 			pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE);
7191 
7192 		/* update expire time */
7193 		(*state)->expire = pf_get_uptime();
7194 		if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
7195 			(*state)->timeout = PFTM_OTHER_MULTIPLE;
7196 		else
7197 			(*state)->timeout = PFTM_OTHER_SINGLE;
7198 		break;
7199 	}
7200 
7201 	/* translate source/destination address, if necessary */
7202 	if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
7203 		struct pf_state_key	*nk;
7204 		int			 afto, sidx, didx;
7205 
7206 		if (PF_REVERSED_KEY(*state, pd->af))
7207 			nk = (*state)->key[pd->sidx];
7208 		else
7209 			nk = (*state)->key[pd->didx];
7210 
7211 		afto = pd->af != nk->af;
7212 
7213 		if (afto && (*state)->direction == PF_IN) {
7214 			sidx = pd->didx;
7215 			didx = pd->sidx;
7216 		} else {
7217 			sidx = pd->sidx;
7218 			didx = pd->didx;
7219 		}
7220 
7221 		if (afto) {
7222 			pf_addrcpy(&pd->nsaddr, &nk->addr[sidx], nk->af);
7223 			pf_addrcpy(&pd->ndaddr, &nk->addr[didx], nk->af);
7224 			pd->naf = nk->af;
7225 			action = PF_AFRT;
7226 		}
7227 
7228 		if (afto || PF_ANEQ(pd->src, &nk->addr[sidx], pd->af) ||
7229 		    nk->port[sidx] != pd->osport)
7230 			pf_change_ap(pd, pd->src, pd->sport,
7231 			    &nk->addr[sidx], nk->port[sidx]);
7232 
7233 		if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af) ||
7234 		    nk->port[didx] != pd->odport)
7235 			pf_change_ap(pd, pd->dst, pd->dport,
7236 			    &nk->addr[didx], nk->port[didx]);
7237 
7238 		copyback = 1;
7239 	}
7240 
7241 	if (copyback && pd->hdrlen > 0)
7242 		m_copyback(pd->m, pd->off, pd->hdrlen, pd->hdr.any);
7243 
7244 	return (action);
7245 }
7246 
7247 static int
pf_sctp_track(struct pf_kstate * state,struct pf_pdesc * pd,u_short * reason)7248 pf_sctp_track(struct pf_kstate *state, struct pf_pdesc *pd,
7249     u_short *reason)
7250 {
7251 	struct pf_state_peer	*src;
7252 	if (pd->dir == state->direction) {
7253 		if (PF_REVERSED_KEY(state, pd->af))
7254 			src = &state->dst;
7255 		else
7256 			src = &state->src;
7257 	} else {
7258 		if (PF_REVERSED_KEY(state, pd->af))
7259 			src = &state->src;
7260 		else
7261 			src = &state->dst;
7262 	}
7263 
7264 	if (src->scrub != NULL) {
7265 		if (src->scrub->pfss_v_tag == 0)
7266 			src->scrub->pfss_v_tag = pd->hdr.sctp.v_tag;
7267 		else  if (src->scrub->pfss_v_tag != pd->hdr.sctp.v_tag)
7268 			return (PF_DROP);
7269 	}
7270 
7271 	return (PF_PASS);
7272 }
7273 
7274 static void
pf_sctp_multihome_detach_addr(const struct pf_kstate * s)7275 pf_sctp_multihome_detach_addr(const struct pf_kstate *s)
7276 {
7277 	struct pf_sctp_endpoint key;
7278 	struct pf_sctp_endpoint *ep;
7279 	struct pf_state_key *sks = s->key[PF_SK_STACK];
7280 	struct pf_sctp_source *i, *tmp;
7281 
7282 	if (sks == NULL || sks->proto != IPPROTO_SCTP || s->dst.scrub == NULL)
7283 		return;
7284 
7285 	PF_SCTP_ENDPOINTS_LOCK();
7286 
7287 	key.v_tag = s->dst.scrub->pfss_v_tag;
7288 	ep  = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
7289 	if (ep != NULL) {
7290 		TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) {
7291 			if (pf_addr_cmp(&i->addr,
7292 			    &s->key[PF_SK_WIRE]->addr[s->direction == PF_OUT],
7293 			    s->key[PF_SK_WIRE]->af) == 0) {
7294 				SDT_PROBE3(pf, sctp, multihome, remove,
7295 				    key.v_tag, s, i);
7296 				TAILQ_REMOVE(&ep->sources, i, entry);
7297 				free(i, M_PFTEMP);
7298 				break;
7299 			}
7300 		}
7301 
7302 		if (TAILQ_EMPTY(&ep->sources)) {
7303 			RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
7304 			free(ep, M_PFTEMP);
7305 		}
7306 	}
7307 
7308 	/* Other direction. */
7309 	key.v_tag = s->src.scrub->pfss_v_tag;
7310 	ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
7311 	if (ep != NULL) {
7312 		TAILQ_FOREACH_SAFE(i, &ep->sources, entry, tmp) {
7313 			if (pf_addr_cmp(&i->addr,
7314 			    &s->key[PF_SK_WIRE]->addr[s->direction == PF_IN],
7315 			    s->key[PF_SK_WIRE]->af) == 0) {
7316 				SDT_PROBE3(pf, sctp, multihome, remove,
7317 				    key.v_tag, s, i);
7318 				TAILQ_REMOVE(&ep->sources, i, entry);
7319 				free(i, M_PFTEMP);
7320 				break;
7321 			}
7322 		}
7323 
7324 		if (TAILQ_EMPTY(&ep->sources)) {
7325 			RB_REMOVE(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
7326 			free(ep, M_PFTEMP);
7327 		}
7328 	}
7329 
7330 	PF_SCTP_ENDPOINTS_UNLOCK();
7331 }
7332 
7333 static void
pf_sctp_multihome_add_addr(struct pf_pdesc * pd,struct pf_addr * a,uint32_t v_tag)7334 pf_sctp_multihome_add_addr(struct pf_pdesc *pd, struct pf_addr *a, uint32_t v_tag)
7335 {
7336 	struct pf_sctp_endpoint key = {
7337 		.v_tag = v_tag,
7338 	};
7339 	struct pf_sctp_source *i;
7340 	struct pf_sctp_endpoint *ep;
7341 	int count;
7342 
7343 	PF_SCTP_ENDPOINTS_LOCK();
7344 
7345 	ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
7346 	if (ep == NULL) {
7347 		ep = malloc(sizeof(struct pf_sctp_endpoint),
7348 		    M_PFTEMP, M_NOWAIT);
7349 		if (ep == NULL) {
7350 			PF_SCTP_ENDPOINTS_UNLOCK();
7351 			return;
7352 		}
7353 
7354 		ep->v_tag = v_tag;
7355 		TAILQ_INIT(&ep->sources);
7356 		RB_INSERT(pf_sctp_endpoints, &V_pf_sctp_endpoints, ep);
7357 	}
7358 
7359 	/* Avoid inserting duplicates. */
7360 	count = 0;
7361 	TAILQ_FOREACH(i, &ep->sources, entry) {
7362 		count++;
7363 		if (pf_addr_cmp(&i->addr, a, pd->af) == 0) {
7364 			PF_SCTP_ENDPOINTS_UNLOCK();
7365 			return;
7366 		}
7367 	}
7368 
7369 	/* Limit the number of addresses per endpoint. */
7370 	if (count >= PF_SCTP_MAX_ENDPOINTS) {
7371 		PF_SCTP_ENDPOINTS_UNLOCK();
7372 		return;
7373 	}
7374 
7375 	i = malloc(sizeof(*i), M_PFTEMP, M_NOWAIT);
7376 	if (i == NULL) {
7377 		PF_SCTP_ENDPOINTS_UNLOCK();
7378 		return;
7379 	}
7380 
7381 	i->af = pd->af;
7382 	memcpy(&i->addr, a, sizeof(*a));
7383 	TAILQ_INSERT_TAIL(&ep->sources, i, entry);
7384 	SDT_PROBE2(pf, sctp, multihome, add, v_tag, i);
7385 
7386 	PF_SCTP_ENDPOINTS_UNLOCK();
7387 }
7388 
7389 static void
pf_sctp_multihome_delayed(struct pf_pdesc * pd,struct pfi_kkif * kif,struct pf_kstate * s,int action)7390 pf_sctp_multihome_delayed(struct pf_pdesc *pd, struct pfi_kkif *kif,
7391     struct pf_kstate *s, int action)
7392 {
7393 	struct pf_sctp_multihome_job	*j, *tmp;
7394 	struct pf_sctp_source		*i;
7395 	int			 ret __unused;
7396 	struct pf_kstate	*sm = NULL;
7397 	struct pf_krule		*ra = NULL;
7398 	struct pf_krule		*r = &V_pf_default_rule;
7399 	struct pf_kruleset	*rs = NULL;
7400 	u_short			 reason;
7401 	bool do_extra = true;
7402 
7403 	PF_RULES_RLOCK_TRACKER;
7404 
7405 again:
7406 	TAILQ_FOREACH_SAFE(j, &pd->sctp_multihome_jobs, next, tmp) {
7407 		if (s == NULL || action != PF_PASS)
7408 			goto free;
7409 
7410 		/* Confirm we don't recurse here. */
7411 		MPASS(! (pd->sctp_flags & PFDESC_SCTP_ADD_IP));
7412 
7413 		switch (j->op) {
7414 		case  SCTP_ADD_IP_ADDRESS: {
7415 			uint32_t v_tag = pd->sctp_initiate_tag;
7416 
7417 			if (v_tag == 0) {
7418 				if (s->direction == pd->dir)
7419 					v_tag = s->src.scrub->pfss_v_tag;
7420 				else
7421 					v_tag = s->dst.scrub->pfss_v_tag;
7422 			}
7423 
7424 			/*
7425 			 * Avoid duplicating states. We'll already have
7426 			 * created a state based on the source address of
7427 			 * the packet, but SCTP endpoints may also list this
7428 			 * address again in the INIT(_ACK) parameters.
7429 			 */
7430 			if (pf_addr_cmp(&j->src, pd->src, pd->af) == 0) {
7431 				break;
7432 			}
7433 
7434 			j->pd.sctp_flags |= PFDESC_SCTP_ADD_IP;
7435 			PF_RULES_RLOCK();
7436 			sm = NULL;
7437 			if (s->rule->rule_flag & PFRULE_ALLOW_RELATED) {
7438 				j->pd.related_rule = s->rule;
7439 			}
7440 			ret = pf_test_rule(&r, &sm,
7441 			    &j->pd, &ra, &rs, &reason, NULL);
7442 			PF_RULES_RUNLOCK();
7443 			SDT_PROBE4(pf, sctp, multihome, test, kif, r, j->pd.m, ret);
7444 			if (ret != PF_DROP && sm != NULL) {
7445 				/* Inherit v_tag values. */
7446 				if (sm->direction == s->direction) {
7447 					sm->src.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag;
7448 					sm->dst.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag;
7449 				} else {
7450 					sm->src.scrub->pfss_v_tag = s->dst.scrub->pfss_v_tag;
7451 					sm->dst.scrub->pfss_v_tag = s->src.scrub->pfss_v_tag;
7452 				}
7453 				PF_STATE_UNLOCK(sm);
7454 			} else {
7455 				/* If we try duplicate inserts? */
7456 				break;
7457 			}
7458 
7459 			/* Only add the address if we've actually allowed the state. */
7460 			pf_sctp_multihome_add_addr(pd, &j->src, v_tag);
7461 
7462 			if (! do_extra) {
7463 				break;
7464 			}
7465 			/*
7466 			 * We need to do this for each of our source addresses.
7467 			 * Find those based on the verification tag.
7468 			 */
7469 			struct pf_sctp_endpoint key = {
7470 				.v_tag = pd->hdr.sctp.v_tag,
7471 			};
7472 			struct pf_sctp_endpoint *ep;
7473 
7474 			PF_SCTP_ENDPOINTS_LOCK();
7475 			ep = RB_FIND(pf_sctp_endpoints, &V_pf_sctp_endpoints, &key);
7476 			if (ep == NULL) {
7477 				PF_SCTP_ENDPOINTS_UNLOCK();
7478 				break;
7479 			}
7480 			MPASS(ep != NULL);
7481 
7482 			TAILQ_FOREACH(i, &ep->sources, entry) {
7483 				struct pf_sctp_multihome_job *nj;
7484 
7485 				/* SCTP can intermingle IPv4 and IPv6. */
7486 				if (i->af != pd->af)
7487 					continue;
7488 
7489 				nj = malloc(sizeof(*nj), M_PFTEMP, M_NOWAIT | M_ZERO);
7490 				if (! nj) {
7491 					continue;
7492 				}
7493 				memcpy(&nj->pd, &j->pd, sizeof(j->pd));
7494 				memcpy(&nj->src, &j->src, sizeof(nj->src));
7495 				nj->pd.src = &nj->src;
7496 				// New destination address!
7497 				memcpy(&nj->dst, &i->addr, sizeof(nj->dst));
7498 				nj->pd.dst = &nj->dst;
7499 				nj->pd.m = j->pd.m;
7500 				nj->op = j->op;
7501 
7502 				TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, nj, next);
7503 			}
7504 			PF_SCTP_ENDPOINTS_UNLOCK();
7505 
7506 			break;
7507 		}
7508 		case SCTP_DEL_IP_ADDRESS: {
7509 			struct pf_state_key_cmp key;
7510 			uint8_t psrc;
7511 			int action;
7512 
7513 			bzero(&key, sizeof(key));
7514 			key.af = j->pd.af;
7515 			key.proto = IPPROTO_SCTP;
7516 			if (j->pd.dir == PF_IN)	{	/* wire side, straight */
7517 				pf_addrcpy(&key.addr[0], j->pd.src, key.af);
7518 				pf_addrcpy(&key.addr[1], j->pd.dst, key.af);
7519 				key.port[0] = j->pd.hdr.sctp.src_port;
7520 				key.port[1] = j->pd.hdr.sctp.dest_port;
7521 			} else {			/* stack side, reverse */
7522 				pf_addrcpy(&key.addr[1], j->pd.src, key.af);
7523 				pf_addrcpy(&key.addr[0], j->pd.dst, key.af);
7524 				key.port[1] = j->pd.hdr.sctp.src_port;
7525 				key.port[0] = j->pd.hdr.sctp.dest_port;
7526 			}
7527 
7528 			action = pf_find_state(&j->pd, &key, &sm);
7529 			if (action == PF_MATCH) {
7530 				PF_STATE_LOCK_ASSERT(sm);
7531 				if (j->pd.dir == sm->direction) {
7532 					psrc = PF_PEER_SRC;
7533 				} else {
7534 					psrc = PF_PEER_DST;
7535 				}
7536 				pf_set_protostate(sm, psrc, SCTP_SHUTDOWN_PENDING);
7537 				sm->timeout = PFTM_SCTP_CLOSING;
7538 				PF_STATE_UNLOCK(sm);
7539 			}
7540 			break;
7541 		default:
7542 			panic("Unknown op %#x", j->op);
7543 		}
7544 	}
7545 
7546 	free:
7547 		TAILQ_REMOVE(&pd->sctp_multihome_jobs, j, next);
7548 		free(j, M_PFTEMP);
7549 	}
7550 
7551 	/* We may have inserted extra work while processing the list. */
7552 	if (! TAILQ_EMPTY(&pd->sctp_multihome_jobs)) {
7553 		do_extra = false;
7554 		goto again;
7555 	}
7556 }
7557 
7558 static int
pf_multihome_scan(int start,int len,struct pf_pdesc * pd,int op)7559 pf_multihome_scan(int start, int len, struct pf_pdesc *pd, int op)
7560 {
7561 	int			 off = 0;
7562 	struct pf_sctp_multihome_job	*job;
7563 
7564 	SDT_PROBE4(pf, sctp, multihome_scan, entry, start, len, pd, op);
7565 
7566 	while (off < len) {
7567 		struct sctp_paramhdr h;
7568 
7569 		if (!pf_pull_hdr(pd->m, start + off, &h, sizeof(h), NULL, NULL,
7570 		    pd->af))
7571 			return (PF_DROP);
7572 
7573 		/* Parameters are at least 4 bytes. */
7574 		if (ntohs(h.param_length) < 4)
7575 			return (PF_DROP);
7576 
7577 		SDT_PROBE2(pf, sctp, multihome_scan, param, ntohs(h.param_type),
7578 		    ntohs(h.param_length));
7579 
7580 		switch (ntohs(h.param_type)) {
7581 		case  SCTP_IPV4_ADDRESS: {
7582 			struct in_addr t;
7583 
7584 			if (ntohs(h.param_length) !=
7585 			    (sizeof(struct sctp_paramhdr) + sizeof(t)))
7586 				return (PF_DROP);
7587 
7588 			if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t),
7589 			    NULL, NULL, pd->af))
7590 				return (PF_DROP);
7591 
7592 			if (in_nullhost(t))
7593 				t.s_addr = pd->src->v4.s_addr;
7594 
7595 			/*
7596 			 * We hold the state lock (idhash) here, which means
7597 			 * that we can't acquire the keyhash, or we'll get a
7598 			 * LOR (and potentially double-lock things too). We also
7599 			 * can't release the state lock here, so instead we'll
7600 			 * enqueue this for async handling.
7601 			 * There's a relatively small race here, in that a
7602 			 * packet using the new addresses could arrive already,
7603 			 * but that's just though luck for it.
7604 			 */
7605 			job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO);
7606 			if (! job)
7607 				return (PF_DROP);
7608 
7609 			SDT_PROBE2(pf, sctp, multihome_scan, ipv4, &t, op);
7610 
7611 			memcpy(&job->pd, pd, sizeof(*pd));
7612 
7613 			// New source address!
7614 			memcpy(&job->src, &t, sizeof(t));
7615 			job->pd.src = &job->src;
7616 			memcpy(&job->dst, pd->dst, sizeof(job->dst));
7617 			job->pd.dst = &job->dst;
7618 			job->pd.m = pd->m;
7619 			job->op = op;
7620 
7621 			TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
7622 			break;
7623 		}
7624 #ifdef INET6
7625 		case SCTP_IPV6_ADDRESS: {
7626 			struct in6_addr t;
7627 
7628 			if (ntohs(h.param_length) !=
7629 			    (sizeof(struct sctp_paramhdr) + sizeof(t)))
7630 				return (PF_DROP);
7631 
7632 			if (!pf_pull_hdr(pd->m, start + off + sizeof(h), &t, sizeof(t),
7633 			    NULL, NULL, pd->af))
7634 				return (PF_DROP);
7635 			if (memcmp(&t, &pd->src->v6, sizeof(t)) == 0)
7636 				break;
7637 			if (memcmp(&t, &in6addr_any, sizeof(t)) == 0)
7638 				memcpy(&t, &pd->src->v6, sizeof(t));
7639 
7640 			job = malloc(sizeof(*job), M_PFTEMP, M_NOWAIT | M_ZERO);
7641 			if (! job)
7642 				return (PF_DROP);
7643 
7644 			SDT_PROBE2(pf, sctp, multihome_scan, ipv6, &t, op);
7645 
7646 			memcpy(&job->pd, pd, sizeof(*pd));
7647 			memcpy(&job->src, &t, sizeof(t));
7648 			job->pd.src = &job->src;
7649 			memcpy(&job->dst, pd->dst, sizeof(job->dst));
7650 			job->pd.dst = &job->dst;
7651 			job->pd.m = pd->m;
7652 			job->op = op;
7653 
7654 			TAILQ_INSERT_TAIL(&pd->sctp_multihome_jobs, job, next);
7655 			break;
7656 		}
7657 #endif /* INET6 */
7658 		case SCTP_ADD_IP_ADDRESS: {
7659 			int ret;
7660 			struct sctp_asconf_paramhdr ah;
7661 
7662 			if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah),
7663 			    NULL, NULL, pd->af))
7664 				return (PF_DROP);
7665 
7666 			ret = pf_multihome_scan(start + off + sizeof(ah),
7667 			    ntohs(ah.ph.param_length) - sizeof(ah), pd,
7668 			    SCTP_ADD_IP_ADDRESS);
7669 			if (ret != PF_PASS)
7670 				return (ret);
7671 			break;
7672 		}
7673 		case SCTP_DEL_IP_ADDRESS: {
7674 			int ret;
7675 			struct sctp_asconf_paramhdr ah;
7676 
7677 			if (!pf_pull_hdr(pd->m, start + off, &ah, sizeof(ah),
7678 			    NULL, NULL, pd->af))
7679 				return (PF_DROP);
7680 			ret = pf_multihome_scan(start + off + sizeof(ah),
7681 			    ntohs(ah.ph.param_length) - sizeof(ah), pd,
7682 			    SCTP_DEL_IP_ADDRESS);
7683 			if (ret != PF_PASS)
7684 				return (ret);
7685 			break;
7686 		}
7687 		default:
7688 			break;
7689 		}
7690 
7691 		off += roundup(ntohs(h.param_length), 4);
7692 	}
7693 
7694 	return (PF_PASS);
7695 }
7696 
7697 int
pf_multihome_scan_init(int start,int len,struct pf_pdesc * pd)7698 pf_multihome_scan_init(int start, int len, struct pf_pdesc *pd)
7699 {
7700 	start += sizeof(struct sctp_init_chunk);
7701 	len -= sizeof(struct sctp_init_chunk);
7702 
7703 	return (pf_multihome_scan(start, len, pd, SCTP_ADD_IP_ADDRESS));
7704 }
7705 
7706 int
pf_multihome_scan_asconf(int start,int len,struct pf_pdesc * pd)7707 pf_multihome_scan_asconf(int start, int len, struct pf_pdesc *pd)
7708 {
7709 	start += sizeof(struct sctp_asconf_chunk);
7710 	len -= sizeof(struct sctp_asconf_chunk);
7711 
7712 	return (pf_multihome_scan(start, len, pd, SCTP_ADD_IP_ADDRESS));
7713 }
7714 
7715 int
pf_icmp_state_lookup(struct pf_state_key_cmp * key,struct pf_pdesc * pd,struct pf_kstate ** state,u_int16_t icmpid,u_int16_t type,int icmp_dir,int * iidx,int multi,int inner)7716 pf_icmp_state_lookup(struct pf_state_key_cmp *key, struct pf_pdesc *pd,
7717     struct pf_kstate **state, u_int16_t icmpid, u_int16_t type, int icmp_dir,
7718     int *iidx, int multi, int inner)
7719 {
7720 	int	 action, direction = pd->dir;
7721 
7722 	key->af = pd->af;
7723 	key->proto = pd->proto;
7724 	if (icmp_dir == PF_IN) {
7725 		*iidx = pd->sidx;
7726 		key->port[pd->sidx] = icmpid;
7727 		key->port[pd->didx] = type;
7728 	} else {
7729 		*iidx = pd->didx;
7730 		key->port[pd->sidx] = type;
7731 		key->port[pd->didx] = icmpid;
7732 	}
7733 	if (pf_state_key_addr_setup(pd, key, multi))
7734 		return (PF_DROP);
7735 
7736 	action = pf_find_state(pd, key, state);
7737 	if (action != PF_MATCH)
7738 		return (action);
7739 
7740 	if ((*state)->state_flags & PFSTATE_SLOPPY)
7741 		return (-1);
7742 
7743 	/* Is this ICMP message flowing in right direction? */
7744 	if ((*state)->key[PF_SK_WIRE]->af != (*state)->key[PF_SK_STACK]->af)
7745 		direction = (pd->af == (*state)->key[PF_SK_WIRE]->af) ?
7746 		    PF_IN : PF_OUT;
7747 	else
7748 		direction = (*state)->direction;
7749 	if ((*state)->rule->type &&
7750 	    (((!inner && direction == pd->dir) ||
7751 	    (inner && direction != pd->dir)) ?
7752 	    PF_IN : PF_OUT) != icmp_dir) {
7753 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
7754 			printf("pf: icmp type %d in wrong direction (%d): ",
7755 			    ntohs(type), icmp_dir);
7756 			pf_print_state(*state);
7757 			printf("\n");
7758 		}
7759 		PF_STATE_UNLOCK(*state);
7760 		*state = NULL;
7761 		return (PF_DROP);
7762 	}
7763 	return (-1);
7764 }
7765 
7766 static int
pf_test_state_icmp(struct pf_kstate ** state,struct pf_pdesc * pd,u_short * reason)7767 pf_test_state_icmp(struct pf_kstate **state, struct pf_pdesc *pd,
7768     u_short *reason)
7769 {
7770 	struct pf_addr  *saddr = pd->src, *daddr = pd->dst;
7771 	u_int16_t	*icmpsum, virtual_id, virtual_type;
7772 	u_int8_t	 icmptype, icmpcode;
7773 	int		 icmp_dir, iidx, ret;
7774 	struct pf_state_key_cmp key;
7775 #ifdef INET
7776 	u_int16_t	 icmpid;
7777 #endif /* INET*/
7778 
7779 	MPASS(*state == NULL);
7780 
7781 	bzero(&key, sizeof(key));
7782 	switch (pd->proto) {
7783 #ifdef INET
7784 	case IPPROTO_ICMP:
7785 		icmptype = pd->hdr.icmp.icmp_type;
7786 		icmpcode = pd->hdr.icmp.icmp_code;
7787 		icmpid = pd->hdr.icmp.icmp_id;
7788 		icmpsum = &pd->hdr.icmp.icmp_cksum;
7789 		break;
7790 #endif /* INET */
7791 #ifdef INET6
7792 	case IPPROTO_ICMPV6:
7793 		icmptype = pd->hdr.icmp6.icmp6_type;
7794 		icmpcode = pd->hdr.icmp6.icmp6_code;
7795 #ifdef INET
7796 		icmpid = pd->hdr.icmp6.icmp6_id;
7797 #endif /* INET */
7798 		icmpsum = &pd->hdr.icmp6.icmp6_cksum;
7799 		break;
7800 #endif /* INET6 */
7801 	default:
7802 		panic("unhandled proto %d", pd->proto);
7803 	}
7804 
7805 	if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id,
7806 	    &virtual_type) == 0) {
7807 		/*
7808 		 * ICMP query/reply message not related to a TCP/UDP/SCTP
7809 		 * packet. Search for an ICMP state.
7810 		 */
7811 		ret = pf_icmp_state_lookup(&key, pd, state, virtual_id,
7812 		    virtual_type, icmp_dir, &iidx, 0, 0);
7813 		/* IPv6? try matching a multicast address */
7814 		if (ret == PF_DROP && pd->af == AF_INET6 && icmp_dir == PF_OUT) {
7815 			MPASS(*state == NULL);
7816 			ret = pf_icmp_state_lookup(&key, pd, state,
7817 			    virtual_id, virtual_type,
7818 			    icmp_dir, &iidx, 1, 0);
7819 		}
7820 		if (ret >= 0) {
7821 			MPASS(*state == NULL);
7822 			return (ret);
7823 		}
7824 
7825 		(*state)->expire = pf_get_uptime();
7826 		(*state)->timeout = PFTM_ICMP_ERROR_REPLY;
7827 
7828 		/* translate source/destination address, if necessary */
7829 		if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
7830 			struct pf_state_key	*nk;
7831 			int			 afto, sidx, didx;
7832 
7833 			if (PF_REVERSED_KEY(*state, pd->af))
7834 				nk = (*state)->key[pd->sidx];
7835 			else
7836 				nk = (*state)->key[pd->didx];
7837 
7838 			afto = pd->af != nk->af;
7839 
7840 			if (afto && (*state)->direction == PF_IN) {
7841 				sidx = pd->didx;
7842 				didx = pd->sidx;
7843 				iidx = !iidx;
7844 			} else {
7845 				sidx = pd->sidx;
7846 				didx = pd->didx;
7847 			}
7848 
7849 			switch (pd->af) {
7850 #ifdef INET
7851 			case AF_INET:
7852 #ifdef INET6
7853 				if (afto) {
7854 					if (pf_translate_icmp_af(AF_INET6,
7855 					    &pd->hdr.icmp))
7856 						return (PF_DROP);
7857 					pd->proto = IPPROTO_ICMPV6;
7858 				}
7859 #endif /* INET6 */
7860 				if (!afto &&
7861 				    PF_ANEQ(pd->src, &nk->addr[sidx], AF_INET))
7862 					pf_change_a(&saddr->v4.s_addr,
7863 					    pd->ip_sum,
7864 					    nk->addr[sidx].v4.s_addr,
7865 					    0);
7866 
7867 				if (!afto && PF_ANEQ(pd->dst,
7868 				    &nk->addr[didx], AF_INET))
7869 					pf_change_a(&daddr->v4.s_addr,
7870 					    pd->ip_sum,
7871 					    nk->addr[didx].v4.s_addr, 0);
7872 
7873 				if (nk->port[iidx] !=
7874 				    pd->hdr.icmp.icmp_id) {
7875 					pd->hdr.icmp.icmp_cksum =
7876 					    pf_cksum_fixup(
7877 					    pd->hdr.icmp.icmp_cksum, icmpid,
7878 					    nk->port[iidx], 0);
7879 					pd->hdr.icmp.icmp_id =
7880 					    nk->port[iidx];
7881 				}
7882 
7883 				m_copyback(pd->m, pd->off, ICMP_MINLEN,
7884 				    (caddr_t )&pd->hdr.icmp);
7885 				break;
7886 #endif /* INET */
7887 #ifdef INET6
7888 			case AF_INET6:
7889 #ifdef INET
7890 				if (afto) {
7891 					if (pf_translate_icmp_af(AF_INET,
7892 					    &pd->hdr.icmp6))
7893 						return (PF_DROP);
7894 					pd->proto = IPPROTO_ICMP;
7895 				}
7896 #endif /* INET */
7897 				if (!afto &&
7898 				    PF_ANEQ(pd->src, &nk->addr[sidx], AF_INET6))
7899 					pf_change_a6(saddr,
7900 					    &pd->hdr.icmp6.icmp6_cksum,
7901 					    &nk->addr[sidx], 0);
7902 
7903 				if (!afto && PF_ANEQ(pd->dst,
7904 				    &nk->addr[didx], AF_INET6))
7905 					pf_change_a6(daddr,
7906 					    &pd->hdr.icmp6.icmp6_cksum,
7907 					    &nk->addr[didx], 0);
7908 
7909 				if (nk->port[iidx] != pd->hdr.icmp6.icmp6_id)
7910 					pd->hdr.icmp6.icmp6_id =
7911 					    nk->port[iidx];
7912 
7913 				m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr),
7914 				    (caddr_t )&pd->hdr.icmp6);
7915 				break;
7916 #endif /* INET6 */
7917 			}
7918 			if (afto) {
7919 				pf_addrcpy(&pd->nsaddr, &nk->addr[sidx],
7920 				    nk->af);
7921 				pf_addrcpy(&pd->ndaddr, &nk->addr[didx],
7922 				    nk->af);
7923 				pd->naf = nk->af;
7924 				return (PF_AFRT);
7925 			}
7926 		}
7927 		return (PF_PASS);
7928 
7929 	} else {
7930 		/*
7931 		 * ICMP error message in response to a TCP/UDP packet.
7932 		 * Extract the inner TCP/UDP header and search for that state.
7933 		 */
7934 
7935 		struct pf_pdesc	pd2;
7936 		bzero(&pd2, sizeof pd2);
7937 #ifdef INET
7938 		struct ip	h2;
7939 #endif /* INET */
7940 #ifdef INET6
7941 		struct ip6_hdr	h2_6;
7942 #endif /* INET6 */
7943 		int		ipoff2 = 0;
7944 
7945 		pd2.af = pd->af;
7946 		pd2.dir = pd->dir;
7947 		/* Payload packet is from the opposite direction. */
7948 		pd2.sidx = (pd->dir == PF_IN) ? 1 : 0;
7949 		pd2.didx = (pd->dir == PF_IN) ? 0 : 1;
7950 		pd2.m = pd->m;
7951 		pd2.pf_mtag = pd->pf_mtag;
7952 		pd2.kif = pd->kif;
7953 		switch (pd->af) {
7954 #ifdef INET
7955 		case AF_INET:
7956 			/* offset of h2 in mbuf chain */
7957 			ipoff2 = pd->off + ICMP_MINLEN;
7958 
7959 			if (!pf_pull_hdr(pd->m, ipoff2, &h2, sizeof(h2),
7960 			    NULL, reason, pd2.af)) {
7961 				DPFPRINTF(PF_DEBUG_MISC,
7962 				    ("pf: ICMP error message too short "
7963 				    "(ip)\n"));
7964 				return (PF_DROP);
7965 			}
7966 			/*
7967 			 * ICMP error messages don't refer to non-first
7968 			 * fragments
7969 			 */
7970 			if (h2.ip_off & htons(IP_OFFMASK)) {
7971 				REASON_SET(reason, PFRES_FRAG);
7972 				return (PF_DROP);
7973 			}
7974 
7975 			/* offset of protocol header that follows h2 */
7976 			pd2.off = ipoff2;
7977 			if (pf_walk_header(&pd2, &h2, reason) != PF_PASS)
7978 				return (PF_DROP);
7979 
7980 			pd2.tot_len = ntohs(h2.ip_len);
7981 			pd2.src = (struct pf_addr *)&h2.ip_src;
7982 			pd2.dst = (struct pf_addr *)&h2.ip_dst;
7983 			pd2.ip_sum = &h2.ip_sum;
7984 			break;
7985 #endif /* INET */
7986 #ifdef INET6
7987 		case AF_INET6:
7988 			ipoff2 = pd->off + sizeof(struct icmp6_hdr);
7989 
7990 			if (!pf_pull_hdr(pd->m, ipoff2, &h2_6, sizeof(h2_6),
7991 			    NULL, reason, pd2.af)) {
7992 				DPFPRINTF(PF_DEBUG_MISC,
7993 				    ("pf: ICMP error message too short "
7994 				    "(ip6)\n"));
7995 				return (PF_DROP);
7996 			}
7997 			pd2.off = ipoff2;
7998 			if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS)
7999 				return (PF_DROP);
8000 
8001 			pd2.tot_len = ntohs(h2_6.ip6_plen) +
8002 			    sizeof(struct ip6_hdr);
8003 			pd2.src = (struct pf_addr *)&h2_6.ip6_src;
8004 			pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
8005 			pd2.ip_sum = NULL;
8006 			break;
8007 #endif /* INET6 */
8008 		default:
8009 			unhandled_af(pd->af);
8010 		}
8011 
8012 		if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
8013 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
8014 				printf("pf: BAD ICMP %d:%d outer dst: ",
8015 				    icmptype, icmpcode);
8016 				pf_print_host(pd->src, 0, pd->af);
8017 				printf(" -> ");
8018 				pf_print_host(pd->dst, 0, pd->af);
8019 				printf(" inner src: ");
8020 				pf_print_host(pd2.src, 0, pd2.af);
8021 				printf(" -> ");
8022 				pf_print_host(pd2.dst, 0, pd2.af);
8023 				printf("\n");
8024 			}
8025 			REASON_SET(reason, PFRES_BADSTATE);
8026 			return (PF_DROP);
8027 		}
8028 
8029 		switch (pd2.proto) {
8030 		case IPPROTO_TCP: {
8031 			struct tcphdr		*th = &pd2.hdr.tcp;
8032 			u_int32_t		 seq;
8033 			struct pf_state_peer	*src, *dst;
8034 			u_int8_t		 dws;
8035 			int			 copyback = 0;
8036 			int			 action;
8037 
8038 			/*
8039 			 * Only the first 8 bytes of the TCP header can be
8040 			 * expected. Don't access any TCP header fields after
8041 			 * th_seq, an ackskew test is not possible.
8042 			 */
8043 			if (!pf_pull_hdr(pd->m, pd2.off, th, 8, NULL, reason,
8044 			    pd2.af)) {
8045 				DPFPRINTF(PF_DEBUG_MISC,
8046 				    ("pf: ICMP error message too short "
8047 				    "(tcp)\n"));
8048 				return (PF_DROP);
8049 			}
8050 			pd2.pcksum = &pd2.hdr.tcp.th_sum;
8051 
8052 			key.af = pd2.af;
8053 			key.proto = IPPROTO_TCP;
8054 			pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
8055 			pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
8056 			key.port[pd2.sidx] = th->th_sport;
8057 			key.port[pd2.didx] = th->th_dport;
8058 
8059 			action = pf_find_state(&pd2, &key, state);
8060 			if (action != PF_MATCH)
8061 				return (action);
8062 
8063 			if (pd->dir == (*state)->direction) {
8064 				if (PF_REVERSED_KEY(*state, pd->af)) {
8065 					src = &(*state)->src;
8066 					dst = &(*state)->dst;
8067 				} else {
8068 					src = &(*state)->dst;
8069 					dst = &(*state)->src;
8070 				}
8071 			} else {
8072 				if (PF_REVERSED_KEY(*state, pd->af)) {
8073 					src = &(*state)->dst;
8074 					dst = &(*state)->src;
8075 				} else {
8076 					src = &(*state)->src;
8077 					dst = &(*state)->dst;
8078 				}
8079 			}
8080 
8081 			if (src->wscale && dst->wscale)
8082 				dws = dst->wscale & PF_WSCALE_MASK;
8083 			else
8084 				dws = 0;
8085 
8086 			/* Demodulate sequence number */
8087 			seq = ntohl(th->th_seq) - src->seqdiff;
8088 			if (src->seqdiff) {
8089 				pf_change_a(&th->th_seq, icmpsum,
8090 				    htonl(seq), 0);
8091 				copyback = 1;
8092 			}
8093 
8094 			if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
8095 			    (!SEQ_GEQ(src->seqhi, seq) ||
8096 			    !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
8097 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
8098 					printf("pf: BAD ICMP %d:%d ",
8099 					    icmptype, icmpcode);
8100 					pf_print_host(pd->src, 0, pd->af);
8101 					printf(" -> ");
8102 					pf_print_host(pd->dst, 0, pd->af);
8103 					printf(" state: ");
8104 					pf_print_state(*state);
8105 					printf(" seq=%u\n", seq);
8106 				}
8107 				REASON_SET(reason, PFRES_BADSTATE);
8108 				return (PF_DROP);
8109 			} else {
8110 				if (V_pf_status.debug >= PF_DEBUG_MISC) {
8111 					printf("pf: OK ICMP %d:%d ",
8112 					    icmptype, icmpcode);
8113 					pf_print_host(pd->src, 0, pd->af);
8114 					printf(" -> ");
8115 					pf_print_host(pd->dst, 0, pd->af);
8116 					printf(" state: ");
8117 					pf_print_state(*state);
8118 					printf(" seq=%u\n", seq);
8119 				}
8120 			}
8121 
8122 			/* translate source/destination address, if necessary */
8123 			if ((*state)->key[PF_SK_WIRE] !=
8124 			    (*state)->key[PF_SK_STACK]) {
8125 
8126 				struct pf_state_key	*nk;
8127 
8128 				if (PF_REVERSED_KEY(*state, pd->af))
8129 					nk = (*state)->key[pd->sidx];
8130 				else
8131 					nk = (*state)->key[pd->didx];
8132 
8133 #if defined(INET) && defined(INET6)
8134 				int		 afto, sidx, didx;
8135 
8136 				afto = pd->af != nk->af;
8137 
8138 				if (afto && (*state)->direction == PF_IN) {
8139 					sidx = pd2.didx;
8140 					didx = pd2.sidx;
8141 				} else {
8142 					sidx = pd2.sidx;
8143 					didx = pd2.didx;
8144 				}
8145 
8146 				if (afto) {
8147 					if (pf_translate_icmp_af(nk->af,
8148 					    &pd->hdr.icmp))
8149 						return (PF_DROP);
8150 					m_copyback(pd->m, pd->off,
8151 					    sizeof(struct icmp6_hdr),
8152 					    (c_caddr_t)&pd->hdr.icmp6);
8153 					if (pf_change_icmp_af(pd->m, ipoff2, pd,
8154 					    &pd2, &nk->addr[sidx],
8155 					    &nk->addr[didx], pd->af,
8156 					    nk->af))
8157 						return (PF_DROP);
8158 					pf_addrcpy(&pd->nsaddr,
8159 					    &nk->addr[pd2.sidx], nk->af);
8160 					pf_addrcpy(&pd->ndaddr,
8161 					    &nk->addr[pd2.didx], nk->af);
8162 					if (nk->af == AF_INET) {
8163 						pd->proto = IPPROTO_ICMP;
8164 					} else {
8165 						pd->proto = IPPROTO_ICMPV6;
8166 						/*
8167 						 * IPv4 becomes IPv6 so we must
8168 						 * copy IPv4 src addr to least
8169 						 * 32bits in IPv6 address to
8170 						 * keep traceroute/icmp
8171 						 * working.
8172 						 */
8173 						pd->nsaddr.addr32[3] =
8174 						    pd->src->addr32[0];
8175 					}
8176 					pd->naf = pd2.naf = nk->af;
8177 					pf_change_ap(&pd2, pd2.src, &th->th_sport,
8178 					    &nk->addr[pd2.sidx], nk->port[sidx]);
8179 					pf_change_ap(&pd2, pd2.dst, &th->th_dport,
8180 					    &nk->addr[pd2.didx], nk->port[didx]);
8181 					m_copyback(pd2.m, pd2.off, 8, (c_caddr_t)th);
8182 					return (PF_AFRT);
8183 				}
8184 #endif /* INET && INET6 */
8185 
8186 				if (PF_ANEQ(pd2.src,
8187 				    &nk->addr[pd2.sidx], pd2.af) ||
8188 				    nk->port[pd2.sidx] != th->th_sport)
8189 					pf_change_icmp(pd2.src, &th->th_sport,
8190 					    daddr, &nk->addr[pd2.sidx],
8191 					    nk->port[pd2.sidx], NULL,
8192 					    pd2.ip_sum, icmpsum,
8193 					    pd->ip_sum, 0, pd2.af);
8194 
8195 				if (PF_ANEQ(pd2.dst,
8196 				    &nk->addr[pd2.didx], pd2.af) ||
8197 				    nk->port[pd2.didx] != th->th_dport)
8198 					pf_change_icmp(pd2.dst, &th->th_dport,
8199 					    saddr, &nk->addr[pd2.didx],
8200 					    nk->port[pd2.didx], NULL,
8201 					    pd2.ip_sum, icmpsum,
8202 					    pd->ip_sum, 0, pd2.af);
8203 				copyback = 1;
8204 			}
8205 
8206 			if (copyback) {
8207 				switch (pd2.af) {
8208 #ifdef INET
8209 				case AF_INET:
8210 					m_copyback(pd->m, pd->off, ICMP_MINLEN,
8211 					    (caddr_t )&pd->hdr.icmp);
8212 					m_copyback(pd->m, ipoff2, sizeof(h2),
8213 					    (caddr_t )&h2);
8214 					break;
8215 #endif /* INET */
8216 #ifdef INET6
8217 				case AF_INET6:
8218 					m_copyback(pd->m, pd->off,
8219 					    sizeof(struct icmp6_hdr),
8220 					    (caddr_t )&pd->hdr.icmp6);
8221 					m_copyback(pd->m, ipoff2, sizeof(h2_6),
8222 					    (caddr_t )&h2_6);
8223 					break;
8224 #endif /* INET6 */
8225 				default:
8226 					unhandled_af(pd->af);
8227 				}
8228 				m_copyback(pd->m, pd2.off, 8, (caddr_t)th);
8229 			}
8230 
8231 			return (PF_PASS);
8232 			break;
8233 		}
8234 		case IPPROTO_UDP: {
8235 			struct udphdr		*uh = &pd2.hdr.udp;
8236 			int			 action;
8237 
8238 			if (!pf_pull_hdr(pd->m, pd2.off, uh, sizeof(*uh),
8239 			    NULL, reason, pd2.af)) {
8240 				DPFPRINTF(PF_DEBUG_MISC,
8241 				    ("pf: ICMP error message too short "
8242 				    "(udp)\n"));
8243 				return (PF_DROP);
8244 			}
8245 			pd2.pcksum = &pd2.hdr.udp.uh_sum;
8246 
8247 			key.af = pd2.af;
8248 			key.proto = IPPROTO_UDP;
8249 			pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
8250 			pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
8251 			key.port[pd2.sidx] = uh->uh_sport;
8252 			key.port[pd2.didx] = uh->uh_dport;
8253 
8254 			action = pf_find_state(&pd2, &key, state);
8255 			if (action != PF_MATCH)
8256 				return (action);
8257 
8258 			/* translate source/destination address, if necessary */
8259 			if ((*state)->key[PF_SK_WIRE] !=
8260 			    (*state)->key[PF_SK_STACK]) {
8261 				struct pf_state_key	*nk;
8262 
8263 				if (PF_REVERSED_KEY(*state, pd->af))
8264 					nk = (*state)->key[pd->sidx];
8265 				else
8266 					nk = (*state)->key[pd->didx];
8267 
8268 #if defined(INET) && defined(INET6)
8269 				int	 afto, sidx, didx;
8270 
8271 				afto = pd->af != nk->af;
8272 
8273 				if (afto && (*state)->direction == PF_IN) {
8274 					sidx = pd2.didx;
8275 					didx = pd2.sidx;
8276 				} else {
8277 					sidx = pd2.sidx;
8278 					didx = pd2.didx;
8279 				}
8280 
8281 				if (afto) {
8282 					if (pf_translate_icmp_af(nk->af,
8283 					    &pd->hdr.icmp))
8284 						return (PF_DROP);
8285 					m_copyback(pd->m, pd->off,
8286 					    sizeof(struct icmp6_hdr),
8287 					    (c_caddr_t)&pd->hdr.icmp6);
8288 					if (pf_change_icmp_af(pd->m, ipoff2, pd,
8289 					    &pd2, &nk->addr[sidx],
8290 					    &nk->addr[didx], pd->af,
8291 					    nk->af))
8292 						return (PF_DROP);
8293 					pf_addrcpy(&pd->nsaddr,
8294 					    &nk->addr[pd2.sidx], nk->af);
8295 					pf_addrcpy(&pd->ndaddr,
8296 					    &nk->addr[pd2.didx], nk->af);
8297 					if (nk->af == AF_INET) {
8298 						pd->proto = IPPROTO_ICMP;
8299 					} else {
8300 						pd->proto = IPPROTO_ICMPV6;
8301 						/*
8302 						 * IPv4 becomes IPv6 so we must
8303 						 * copy IPv4 src addr to least
8304 						 * 32bits in IPv6 address to
8305 						 * keep traceroute/icmp
8306 						 * working.
8307 						 */
8308 						pd->nsaddr.addr32[3] =
8309 						    pd->src->addr32[0];
8310 					}
8311 					pd->naf = pd2.naf = nk->af;
8312 					pf_change_ap(&pd2, pd2.src, &uh->uh_sport,
8313 					    &nk->addr[pd2.sidx], nk->port[sidx]);
8314 					pf_change_ap(&pd2, pd2.dst, &uh->uh_dport,
8315 					    &nk->addr[pd2.didx], nk->port[didx]);
8316 					m_copyback(pd2.m, pd2.off, sizeof(*uh),
8317 					    (c_caddr_t)uh);
8318 					return (PF_AFRT);
8319 				}
8320 #endif /* INET && INET6 */
8321 
8322 				if (PF_ANEQ(pd2.src,
8323 				    &nk->addr[pd2.sidx], pd2.af) ||
8324 				    nk->port[pd2.sidx] != uh->uh_sport)
8325 					pf_change_icmp(pd2.src, &uh->uh_sport,
8326 					    daddr, &nk->addr[pd2.sidx],
8327 					    nk->port[pd2.sidx], &uh->uh_sum,
8328 					    pd2.ip_sum, icmpsum,
8329 					    pd->ip_sum, 1, pd2.af);
8330 
8331 				if (PF_ANEQ(pd2.dst,
8332 				    &nk->addr[pd2.didx], pd2.af) ||
8333 				    nk->port[pd2.didx] != uh->uh_dport)
8334 					pf_change_icmp(pd2.dst, &uh->uh_dport,
8335 					    saddr, &nk->addr[pd2.didx],
8336 					    nk->port[pd2.didx], &uh->uh_sum,
8337 					    pd2.ip_sum, icmpsum,
8338 					    pd->ip_sum, 1, pd2.af);
8339 
8340 				switch (pd2.af) {
8341 #ifdef INET
8342 				case AF_INET:
8343 					m_copyback(pd->m, pd->off, ICMP_MINLEN,
8344 					    (caddr_t )&pd->hdr.icmp);
8345 					m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2);
8346 					break;
8347 #endif /* INET */
8348 #ifdef INET6
8349 				case AF_INET6:
8350 					m_copyback(pd->m, pd->off,
8351 					    sizeof(struct icmp6_hdr),
8352 					    (caddr_t )&pd->hdr.icmp6);
8353 					m_copyback(pd->m, ipoff2, sizeof(h2_6),
8354 					    (caddr_t )&h2_6);
8355 					break;
8356 #endif /* INET6 */
8357 				}
8358 				m_copyback(pd->m, pd2.off, sizeof(*uh), (caddr_t)uh);
8359 			}
8360 			return (PF_PASS);
8361 			break;
8362 		}
8363 #ifdef INET
8364 		case IPPROTO_SCTP: {
8365 			struct sctphdr		*sh = &pd2.hdr.sctp;
8366 			struct pf_state_peer	*src;
8367 			int			 copyback = 0;
8368 			int			 action;
8369 
8370 			if (! pf_pull_hdr(pd->m, pd2.off, sh, sizeof(*sh), NULL, reason,
8371 			    pd2.af)) {
8372 				DPFPRINTF(PF_DEBUG_MISC,
8373 				    ("pf: ICMP error message too short "
8374 				    "(sctp)\n"));
8375 				return (PF_DROP);
8376 			}
8377 			pd2.pcksum = &pd2.sctp_dummy_sum;
8378 
8379 			key.af = pd2.af;
8380 			key.proto = IPPROTO_SCTP;
8381 			pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
8382 			pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
8383 			key.port[pd2.sidx] = sh->src_port;
8384 			key.port[pd2.didx] = sh->dest_port;
8385 
8386 			action = pf_find_state(&pd2, &key, state);
8387 			if (action != PF_MATCH)
8388 				return (action);
8389 
8390 			if (pd->dir == (*state)->direction) {
8391 				if (PF_REVERSED_KEY(*state, pd->af))
8392 					src = &(*state)->src;
8393 				else
8394 					src = &(*state)->dst;
8395 			} else {
8396 				if (PF_REVERSED_KEY(*state, pd->af))
8397 					src = &(*state)->dst;
8398 				else
8399 					src = &(*state)->src;
8400 			}
8401 
8402 			if (src->scrub->pfss_v_tag != sh->v_tag) {
8403 				DPFPRINTF(PF_DEBUG_MISC,
8404 				    ("pf: ICMP error message has incorrect "
8405 				    "SCTP v_tag\n"));
8406 				return (PF_DROP);
8407 			}
8408 
8409 			/* translate source/destination address, if necessary */
8410 			if ((*state)->key[PF_SK_WIRE] !=
8411 			    (*state)->key[PF_SK_STACK]) {
8412 
8413 				struct pf_state_key	*nk;
8414 
8415 				if (PF_REVERSED_KEY(*state, pd->af))
8416 					nk = (*state)->key[pd->sidx];
8417 				else
8418 					nk = (*state)->key[pd->didx];
8419 
8420 #if defined(INET) && defined(INET6)
8421 				int	 afto, sidx, didx;
8422 
8423 				afto = pd->af != nk->af;
8424 
8425 				if (afto && (*state)->direction == PF_IN) {
8426 					sidx = pd2.didx;
8427 					didx = pd2.sidx;
8428 				} else {
8429 					sidx = pd2.sidx;
8430 					didx = pd2.didx;
8431 				}
8432 
8433 				if (afto) {
8434 					if (pf_translate_icmp_af(nk->af,
8435 					    &pd->hdr.icmp))
8436 						return (PF_DROP);
8437 					m_copyback(pd->m, pd->off,
8438 					    sizeof(struct icmp6_hdr),
8439 					    (c_caddr_t)&pd->hdr.icmp6);
8440 					if (pf_change_icmp_af(pd->m, ipoff2, pd,
8441 					    &pd2, &nk->addr[sidx],
8442 					    &nk->addr[didx], pd->af,
8443 					    nk->af))
8444 						return (PF_DROP);
8445 					sh->src_port = nk->port[sidx];
8446 					sh->dest_port = nk->port[didx];
8447 					m_copyback(pd2.m, pd2.off, sizeof(*sh), (c_caddr_t)sh);
8448 					pf_addrcpy(&pd->nsaddr,
8449 					    &nk->addr[pd2.sidx], nk->af);
8450 					pf_addrcpy(&pd->ndaddr,
8451 					    &nk->addr[pd2.didx], nk->af);
8452 					if (nk->af == AF_INET) {
8453 						pd->proto = IPPROTO_ICMP;
8454 					} else {
8455 						pd->proto = IPPROTO_ICMPV6;
8456 						/*
8457 						 * IPv4 becomes IPv6 so we must
8458 						 * copy IPv4 src addr to least
8459 						 * 32bits in IPv6 address to
8460 						 * keep traceroute/icmp
8461 						 * working.
8462 						 */
8463 						pd->nsaddr.addr32[3] =
8464 						    pd->src->addr32[0];
8465 					}
8466 					pd->naf = nk->af;
8467 					return (PF_AFRT);
8468 				}
8469 #endif /* INET && INET6 */
8470 
8471 				if (PF_ANEQ(pd2.src,
8472 				    &nk->addr[pd2.sidx], pd2.af) ||
8473 				    nk->port[pd2.sidx] != sh->src_port)
8474 					pf_change_icmp(pd2.src, &sh->src_port,
8475 					    daddr, &nk->addr[pd2.sidx],
8476 					    nk->port[pd2.sidx], NULL,
8477 					    pd2.ip_sum, icmpsum,
8478 					    pd->ip_sum, 0, pd2.af);
8479 
8480 				if (PF_ANEQ(pd2.dst,
8481 				    &nk->addr[pd2.didx], pd2.af) ||
8482 				    nk->port[pd2.didx] != sh->dest_port)
8483 					pf_change_icmp(pd2.dst, &sh->dest_port,
8484 					    saddr, &nk->addr[pd2.didx],
8485 					    nk->port[pd2.didx], NULL,
8486 					    pd2.ip_sum, icmpsum,
8487 					    pd->ip_sum, 0, pd2.af);
8488 				copyback = 1;
8489 			}
8490 
8491 			if (copyback) {
8492 				switch (pd2.af) {
8493 #ifdef INET
8494 				case AF_INET:
8495 					m_copyback(pd->m, pd->off, ICMP_MINLEN,
8496 					    (caddr_t )&pd->hdr.icmp);
8497 					m_copyback(pd->m, ipoff2, sizeof(h2),
8498 					    (caddr_t )&h2);
8499 					break;
8500 #endif /* INET */
8501 #ifdef INET6
8502 				case AF_INET6:
8503 					m_copyback(pd->m, pd->off,
8504 					    sizeof(struct icmp6_hdr),
8505 					    (caddr_t )&pd->hdr.icmp6);
8506 					m_copyback(pd->m, ipoff2, sizeof(h2_6),
8507 					    (caddr_t )&h2_6);
8508 					break;
8509 #endif /* INET6 */
8510 				}
8511 				m_copyback(pd->m, pd2.off, sizeof(*sh), (caddr_t)sh);
8512 			}
8513 
8514 			return (PF_PASS);
8515 			break;
8516 		}
8517 		case IPPROTO_ICMP: {
8518 			struct icmp	*iih = &pd2.hdr.icmp;
8519 
8520 			if (pd2.af != AF_INET) {
8521 				REASON_SET(reason, PFRES_NORM);
8522 				return (PF_DROP);
8523 			}
8524 
8525 			if (!pf_pull_hdr(pd->m, pd2.off, iih, ICMP_MINLEN,
8526 			    NULL, reason, pd2.af)) {
8527 				DPFPRINTF(PF_DEBUG_MISC,
8528 				    ("pf: ICMP error message too short i"
8529 				    "(icmp)\n"));
8530 				return (PF_DROP);
8531 			}
8532 			pd2.pcksum = &pd2.hdr.icmp.icmp_cksum;
8533 
8534 			icmpid = iih->icmp_id;
8535 			pf_icmp_mapping(&pd2, iih->icmp_type,
8536 			    &icmp_dir, &virtual_id, &virtual_type);
8537 
8538 			ret = pf_icmp_state_lookup(&key, &pd2, state,
8539 			    virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
8540 			if (ret >= 0) {
8541 				MPASS(*state == NULL);
8542 				return (ret);
8543 			}
8544 
8545 			/* translate source/destination address, if necessary */
8546 			if ((*state)->key[PF_SK_WIRE] !=
8547 			    (*state)->key[PF_SK_STACK]) {
8548 				struct pf_state_key	*nk;
8549 
8550 				if (PF_REVERSED_KEY(*state, pd->af))
8551 					nk = (*state)->key[pd->sidx];
8552 				else
8553 					nk = (*state)->key[pd->didx];
8554 
8555 #if defined(INET) && defined(INET6)
8556 				int	 afto, sidx, didx;
8557 
8558 				afto = pd->af != nk->af;
8559 
8560 				if (afto && (*state)->direction == PF_IN) {
8561 					sidx = pd2.didx;
8562 					didx = pd2.sidx;
8563 					iidx = !iidx;
8564 				} else {
8565 					sidx = pd2.sidx;
8566 					didx = pd2.didx;
8567 				}
8568 
8569 				if (afto) {
8570 					if (nk->af != AF_INET6)
8571 						return (PF_DROP);
8572 					if (pf_translate_icmp_af(nk->af,
8573 					    &pd->hdr.icmp))
8574 						return (PF_DROP);
8575 					m_copyback(pd->m, pd->off,
8576 					    sizeof(struct icmp6_hdr),
8577 					    (c_caddr_t)&pd->hdr.icmp6);
8578 					if (pf_change_icmp_af(pd->m, ipoff2, pd,
8579 					    &pd2, &nk->addr[sidx],
8580 					    &nk->addr[didx], pd->af,
8581 					    nk->af))
8582 						return (PF_DROP);
8583 					pd->proto = IPPROTO_ICMPV6;
8584 					if (pf_translate_icmp_af(nk->af, iih))
8585 						return (PF_DROP);
8586 					if (virtual_type == htons(ICMP_ECHO) &&
8587 					    nk->port[iidx] != iih->icmp_id)
8588 						iih->icmp_id = nk->port[iidx];
8589 					m_copyback(pd2.m, pd2.off, ICMP_MINLEN,
8590 					    (c_caddr_t)iih);
8591 					pf_addrcpy(&pd->nsaddr,
8592 					    &nk->addr[pd2.sidx], nk->af);
8593 					pf_addrcpy(&pd->ndaddr,
8594 					    &nk->addr[pd2.didx], nk->af);
8595 					/*
8596 					 * IPv4 becomes IPv6 so we must copy
8597 					 * IPv4 src addr to least 32bits in
8598 					 * IPv6 address to keep traceroute
8599 					 * working.
8600 					 */
8601 					pd->nsaddr.addr32[3] =
8602 					    pd->src->addr32[0];
8603 					pd->naf = nk->af;
8604 					return (PF_AFRT);
8605 				}
8606 #endif /* INET && INET6 */
8607 
8608 				if (PF_ANEQ(pd2.src,
8609 				    &nk->addr[pd2.sidx], pd2.af) ||
8610 				    (virtual_type == htons(ICMP_ECHO) &&
8611 				    nk->port[iidx] != iih->icmp_id))
8612 					pf_change_icmp(pd2.src,
8613 					    (virtual_type == htons(ICMP_ECHO)) ?
8614 					    &iih->icmp_id : NULL,
8615 					    daddr, &nk->addr[pd2.sidx],
8616 					    (virtual_type == htons(ICMP_ECHO)) ?
8617 					    nk->port[iidx] : 0, NULL,
8618 					    pd2.ip_sum, icmpsum,
8619 					    pd->ip_sum, 0, AF_INET);
8620 
8621 				if (PF_ANEQ(pd2.dst,
8622 				    &nk->addr[pd2.didx], pd2.af))
8623 					pf_change_icmp(pd2.dst, NULL, NULL,
8624 					    &nk->addr[pd2.didx], 0, NULL,
8625 					    pd2.ip_sum, icmpsum, pd->ip_sum, 0,
8626 					    AF_INET);
8627 
8628 				m_copyback(pd->m, pd->off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
8629 				m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2);
8630 				m_copyback(pd->m, pd2.off, ICMP_MINLEN, (caddr_t)iih);
8631 			}
8632 			return (PF_PASS);
8633 			break;
8634 		}
8635 #endif /* INET */
8636 #ifdef INET6
8637 		case IPPROTO_ICMPV6: {
8638 			struct icmp6_hdr	*iih = &pd2.hdr.icmp6;
8639 
8640 			if (pd2.af != AF_INET6) {
8641 				REASON_SET(reason, PFRES_NORM);
8642 				return (PF_DROP);
8643 			}
8644 
8645 			if (!pf_pull_hdr(pd->m, pd2.off, iih,
8646 			    sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
8647 				DPFPRINTF(PF_DEBUG_MISC,
8648 				    ("pf: ICMP error message too short "
8649 				    "(icmp6)\n"));
8650 				return (PF_DROP);
8651 			}
8652 			pd2.pcksum = &pd2.hdr.icmp6.icmp6_cksum;
8653 
8654 			pf_icmp_mapping(&pd2, iih->icmp6_type,
8655 			    &icmp_dir, &virtual_id, &virtual_type);
8656 
8657 			ret = pf_icmp_state_lookup(&key, &pd2, state,
8658 			    virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
8659 			/* IPv6? try matching a multicast address */
8660 			if (ret == PF_DROP && pd2.af == AF_INET6 &&
8661 			    icmp_dir == PF_OUT) {
8662 				MPASS(*state == NULL);
8663 				ret = pf_icmp_state_lookup(&key, &pd2,
8664 				    state, virtual_id, virtual_type,
8665 				    icmp_dir, &iidx, 1, 1);
8666 			}
8667 			if (ret >= 0) {
8668 				MPASS(*state == NULL);
8669 				return (ret);
8670 			}
8671 
8672 			/* translate source/destination address, if necessary */
8673 			if ((*state)->key[PF_SK_WIRE] !=
8674 			    (*state)->key[PF_SK_STACK]) {
8675 				struct pf_state_key	*nk;
8676 
8677 				if (PF_REVERSED_KEY(*state, pd->af))
8678 					nk = (*state)->key[pd->sidx];
8679 				else
8680 					nk = (*state)->key[pd->didx];
8681 
8682 #if defined(INET) && defined(INET6)
8683 				int	 afto, sidx, didx;
8684 
8685 				afto = pd->af != nk->af;
8686 
8687 				if (afto && (*state)->direction == PF_IN) {
8688 					sidx = pd2.didx;
8689 					didx = pd2.sidx;
8690 					iidx = !iidx;
8691 				} else {
8692 					sidx = pd2.sidx;
8693 					didx = pd2.didx;
8694 				}
8695 
8696 				if (afto) {
8697 					if (nk->af != AF_INET)
8698 						return (PF_DROP);
8699 					if (pf_translate_icmp_af(nk->af,
8700 					    &pd->hdr.icmp))
8701 						return (PF_DROP);
8702 					m_copyback(pd->m, pd->off,
8703 					    sizeof(struct icmp6_hdr),
8704 					    (c_caddr_t)&pd->hdr.icmp6);
8705 					if (pf_change_icmp_af(pd->m, ipoff2, pd,
8706 					    &pd2, &nk->addr[sidx],
8707 					    &nk->addr[didx], pd->af,
8708 					    nk->af))
8709 						return (PF_DROP);
8710 					pd->proto = IPPROTO_ICMP;
8711 					if (pf_translate_icmp_af(nk->af, iih))
8712 						return (PF_DROP);
8713 					if (virtual_type ==
8714 					    htons(ICMP6_ECHO_REQUEST) &&
8715 					    nk->port[iidx] != iih->icmp6_id)
8716 						iih->icmp6_id = nk->port[iidx];
8717 					m_copyback(pd2.m, pd2.off,
8718 					    sizeof(struct icmp6_hdr), (c_caddr_t)iih);
8719 					pf_addrcpy(&pd->nsaddr,
8720 					    &nk->addr[pd2.sidx], nk->af);
8721 					pf_addrcpy(&pd->ndaddr,
8722 					    &nk->addr[pd2.didx], nk->af);
8723 					pd->naf = nk->af;
8724 					return (PF_AFRT);
8725 				}
8726 #endif /* INET && INET6 */
8727 
8728 				if (PF_ANEQ(pd2.src,
8729 				    &nk->addr[pd2.sidx], pd2.af) ||
8730 				    ((virtual_type == htons(ICMP6_ECHO_REQUEST)) &&
8731 				    nk->port[pd2.sidx] != iih->icmp6_id))
8732 					pf_change_icmp(pd2.src,
8733 					    (virtual_type == htons(ICMP6_ECHO_REQUEST))
8734 					    ? &iih->icmp6_id : NULL,
8735 					    daddr, &nk->addr[pd2.sidx],
8736 					    (virtual_type == htons(ICMP6_ECHO_REQUEST))
8737 					    ? nk->port[iidx] : 0, NULL,
8738 					    pd2.ip_sum, icmpsum,
8739 					    pd->ip_sum, 0, AF_INET6);
8740 
8741 				if (PF_ANEQ(pd2.dst,
8742 				    &nk->addr[pd2.didx], pd2.af))
8743 					pf_change_icmp(pd2.dst, NULL, NULL,
8744 					    &nk->addr[pd2.didx], 0, NULL,
8745 					    pd2.ip_sum, icmpsum,
8746 					    pd->ip_sum, 0, AF_INET6);
8747 
8748 				m_copyback(pd->m, pd->off, sizeof(struct icmp6_hdr),
8749 				    (caddr_t)&pd->hdr.icmp6);
8750 				m_copyback(pd->m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
8751 				m_copyback(pd->m, pd2.off, sizeof(struct icmp6_hdr),
8752 				    (caddr_t)iih);
8753 			}
8754 			return (PF_PASS);
8755 			break;
8756 		}
8757 #endif /* INET6 */
8758 		default: {
8759 			int	action;
8760 
8761 			key.af = pd2.af;
8762 			key.proto = pd2.proto;
8763 			pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
8764 			pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
8765 			key.port[0] = key.port[1] = 0;
8766 
8767 			action = pf_find_state(&pd2, &key, state);
8768 			if (action != PF_MATCH)
8769 				return (action);
8770 
8771 			/* translate source/destination address, if necessary */
8772 			if ((*state)->key[PF_SK_WIRE] !=
8773 			    (*state)->key[PF_SK_STACK]) {
8774 				struct pf_state_key *nk =
8775 				    (*state)->key[pd->didx];
8776 
8777 				if (PF_ANEQ(pd2.src,
8778 				    &nk->addr[pd2.sidx], pd2.af))
8779 					pf_change_icmp(pd2.src, NULL, daddr,
8780 					    &nk->addr[pd2.sidx], 0, NULL,
8781 					    pd2.ip_sum, icmpsum,
8782 					    pd->ip_sum, 0, pd2.af);
8783 
8784 				if (PF_ANEQ(pd2.dst,
8785 				    &nk->addr[pd2.didx], pd2.af))
8786 					pf_change_icmp(pd2.dst, NULL, saddr,
8787 					    &nk->addr[pd2.didx], 0, NULL,
8788 					    pd2.ip_sum, icmpsum,
8789 					    pd->ip_sum, 0, pd2.af);
8790 
8791 				switch (pd2.af) {
8792 #ifdef INET
8793 				case AF_INET:
8794 					m_copyback(pd->m, pd->off, ICMP_MINLEN,
8795 					    (caddr_t)&pd->hdr.icmp);
8796 					m_copyback(pd->m, ipoff2, sizeof(h2), (caddr_t)&h2);
8797 					break;
8798 #endif /* INET */
8799 #ifdef INET6
8800 				case AF_INET6:
8801 					m_copyback(pd->m, pd->off,
8802 					    sizeof(struct icmp6_hdr),
8803 					    (caddr_t )&pd->hdr.icmp6);
8804 					m_copyback(pd->m, ipoff2, sizeof(h2_6),
8805 					    (caddr_t )&h2_6);
8806 					break;
8807 #endif /* INET6 */
8808 				}
8809 			}
8810 			return (PF_PASS);
8811 			break;
8812 		}
8813 		}
8814 	}
8815 }
8816 
8817 /*
8818  * ipoff and off are measured from the start of the mbuf chain.
8819  * h must be at "ipoff" on the mbuf chain.
8820  */
8821 void *
pf_pull_hdr(const struct mbuf * m,int off,void * p,int len,u_short * actionp,u_short * reasonp,sa_family_t af)8822 pf_pull_hdr(const struct mbuf *m, int off, void *p, int len,
8823     u_short *actionp, u_short *reasonp, sa_family_t af)
8824 {
8825 	int iplen = 0;
8826 	switch (af) {
8827 #ifdef INET
8828 	case AF_INET: {
8829 		const struct ip	*h = mtod(m, struct ip *);
8830 		u_int16_t	 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
8831 
8832 		if (fragoff) {
8833 			if (fragoff >= len)
8834 				ACTION_SET(actionp, PF_PASS);
8835 			else {
8836 				ACTION_SET(actionp, PF_DROP);
8837 				REASON_SET(reasonp, PFRES_FRAG);
8838 			}
8839 			return (NULL);
8840 		}
8841 		iplen = ntohs(h->ip_len);
8842 		break;
8843 	}
8844 #endif /* INET */
8845 #ifdef INET6
8846 	case AF_INET6: {
8847 		const struct ip6_hdr	*h = mtod(m, struct ip6_hdr *);
8848 
8849 		iplen = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
8850 		break;
8851 	}
8852 #endif /* INET6 */
8853 	}
8854 	if (m->m_pkthdr.len < off + len || iplen < off + len) {
8855 		ACTION_SET(actionp, PF_DROP);
8856 		REASON_SET(reasonp, PFRES_SHORT);
8857 		return (NULL);
8858 	}
8859 	m_copydata(m, off, len, p);
8860 	return (p);
8861 }
8862 
8863 int
pf_routable(struct pf_addr * addr,sa_family_t af,struct pfi_kkif * kif,int rtableid)8864 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
8865     int rtableid)
8866 {
8867 	struct ifnet		*ifp;
8868 
8869 	/*
8870 	 * Skip check for addresses with embedded interface scope,
8871 	 * as they would always match anyway.
8872 	 */
8873 	if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
8874 		return (1);
8875 
8876 	if (af != AF_INET && af != AF_INET6)
8877 		return (0);
8878 
8879 	if (kif == V_pfi_all)
8880 		return (1);
8881 
8882 	/* Skip checks for ipsec interfaces */
8883 	if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
8884 		return (1);
8885 
8886 	ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
8887 
8888 	switch (af) {
8889 #ifdef INET6
8890 	case AF_INET6:
8891 		return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
8892 		    ifp));
8893 #endif /* INET6 */
8894 #ifdef INET
8895 	case AF_INET:
8896 		return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
8897 		    ifp));
8898 #endif /* INET */
8899 	}
8900 
8901 	return (0);
8902 }
8903 
8904 #ifdef INET
8905 static void
pf_route(struct pf_krule * r,struct ifnet * oifp,struct pf_kstate * s,struct pf_pdesc * pd,struct inpcb * inp)8906 pf_route(struct pf_krule *r, struct ifnet *oifp,
8907     struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
8908 {
8909 	struct mbuf		*m0, *m1, *md;
8910 	struct route		 ro;
8911 	const struct sockaddr	*gw = &ro.ro_dst;
8912 	struct sockaddr_in	*dst;
8913 	struct ip		*ip;
8914 	struct ifnet		*ifp = NULL;
8915 	int			 error = 0;
8916 	uint16_t		 ip_len, ip_off;
8917 	uint16_t		 tmp;
8918 	int			 r_dir;
8919 	bool			 skip_test = false;
8920 
8921 	KASSERT(pd->m && r && oifp, ("%s: invalid parameters", __func__));
8922 
8923 	SDT_PROBE4(pf, ip, route_to, entry, pd->m, pd, s, oifp);
8924 
8925 	if (s) {
8926 		r_dir = s->direction;
8927 	} else {
8928 		r_dir = r->direction;
8929 	}
8930 
8931 	KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT ||
8932 	    r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction",
8933 	    __func__));
8934 
8935 	if ((pd->pf_mtag == NULL &&
8936 	    ((pd->pf_mtag = pf_get_mtag(pd->m)) == NULL)) ||
8937 	    pd->pf_mtag->routed++ > 3) {
8938 		m0 = pd->m;
8939 		pd->m = NULL;
8940 		SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
8941 		goto bad_locked;
8942 	}
8943 
8944 	if (pd->act.rt_kif != NULL)
8945 		ifp = pd->act.rt_kif->pfik_ifp;
8946 
8947 	if (pd->act.rt == PF_DUPTO) {
8948 		if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) {
8949 			if (s != NULL) {
8950 				PF_STATE_UNLOCK(s);
8951 			}
8952 			if (ifp == oifp) {
8953 				/* When the 2nd interface is not skipped */
8954 				return;
8955 			} else {
8956 				m0 = pd->m;
8957 				pd->m = NULL;
8958 				SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
8959 				goto bad;
8960 			}
8961 		} else {
8962 			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED;
8963 			if (((m0 = m_dup(pd->m, M_NOWAIT)) == NULL)) {
8964 				if (s)
8965 					PF_STATE_UNLOCK(s);
8966 				return;
8967 			}
8968 		}
8969 	} else {
8970 		if ((pd->act.rt == PF_REPLYTO) == (r_dir == pd->dir)) {
8971 			if (pd->af == pd->naf) {
8972 				pf_dummynet(pd, s, r, &pd->m);
8973 				if (s)
8974 					PF_STATE_UNLOCK(s);
8975 				return;
8976 			} else {
8977 				if (r_dir == PF_IN) {
8978 					skip_test = true;
8979 				}
8980 			}
8981 		}
8982 
8983 		/*
8984 		 * If we're actually doing route-to and af-to and are in the
8985 		 * reply direction.
8986 		 */
8987 		if (pd->act.rt_kif && pd->act.rt_kif->pfik_ifp &&
8988 		    pd->af != pd->naf) {
8989 			if (pd->act.rt == PF_ROUTETO && r->naf != AF_INET) {
8990 				/* Un-set ifp so we do a plain route lookup. */
8991 				ifp = NULL;
8992 			}
8993 			if (pd->act.rt == PF_REPLYTO && r->naf != AF_INET6) {
8994 				/* Un-set ifp so we do a plain route lookup. */
8995 				ifp = NULL;
8996 			}
8997 		}
8998 		m0 = pd->m;
8999 	}
9000 
9001 	ip = mtod(m0, struct ip *);
9002 
9003 	bzero(&ro, sizeof(ro));
9004 	dst = (struct sockaddr_in *)&ro.ro_dst;
9005 	dst->sin_family = AF_INET;
9006 	dst->sin_len = sizeof(struct sockaddr_in);
9007 	dst->sin_addr.s_addr = pd->act.rt_addr.v4.s_addr;
9008 
9009 	if (pd->dir == PF_IN) {
9010 		if (ip->ip_ttl <= IPTTLDEC) {
9011 			if (r->rt != PF_DUPTO)
9012 				pf_send_icmp(m0, ICMP_TIMXCEED,
9013 				    ICMP_TIMXCEED_INTRANS, 0, pd->af, r,
9014 				    pd->act.rtableid);
9015 			goto bad_locked;
9016 		}
9017 		ip->ip_ttl -= IPTTLDEC;
9018 	}
9019 
9020 	if (s != NULL) {
9021 		if (ifp == NULL && (pd->af != pd->naf)) {
9022 			/* We're in the AFTO case. Do a route lookup. */
9023 			const struct nhop_object *nh;
9024 			nh = fib4_lookup(M_GETFIB(m0), ip->ip_dst, 0, NHR_NONE, 0);
9025 			if (nh) {
9026 				ifp = nh->nh_ifp;
9027 
9028 				/* Use the gateway if needed. */
9029 				if (nh->nh_flags & NHF_GATEWAY) {
9030 					gw = &nh->gw_sa;
9031 					ro.ro_flags |= RT_HAS_GW;
9032 				} else {
9033 					dst->sin_addr = ip->ip_dst;
9034 				}
9035 
9036 				/*
9037 				 * Bind to the correct interface if we're
9038 				 * if-bound. We don't know which interface
9039 				 * that will be until here, so we've inserted
9040 				 * the state on V_pf_all. Fix that now.
9041 				 */
9042 				if (s->kif == V_pfi_all && ifp != NULL &&
9043 				    r->rule_flag & PFRULE_IFBOUND)
9044 					s->kif = ifp->if_pf_kif;
9045 			}
9046 		}
9047 
9048 		if (r->rule_flag & PFRULE_IFBOUND &&
9049 		    pd->act.rt == PF_REPLYTO &&
9050 		    s->kif == V_pfi_all) {
9051 			s->kif = pd->act.rt_kif;
9052 			s->orig_kif = oifp->if_pf_kif;
9053 		}
9054 
9055 		PF_STATE_UNLOCK(s);
9056 	}
9057 
9058 	if (ifp == NULL) {
9059 		m0 = pd->m;
9060 		pd->m = NULL;
9061 		SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9062 		goto bad;
9063 	}
9064 
9065 	if (pd->dir == PF_IN && !skip_test) {
9066 		if (pf_test(AF_INET, PF_OUT, PFIL_FWD, ifp, &m0, inp,
9067 		    &pd->act) != PF_PASS) {
9068 			SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9069 			goto bad;
9070 		} else if (m0 == NULL) {
9071 			SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9072 			goto done;
9073 		}
9074 		if (m0->m_len < sizeof(struct ip)) {
9075 			DPFPRINTF(PF_DEBUG_URGENT,
9076 			    ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
9077 			SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9078 			goto bad;
9079 		}
9080 		ip = mtod(m0, struct ip *);
9081 	}
9082 
9083 	if (ifp->if_flags & IFF_LOOPBACK)
9084 		m0->m_flags |= M_SKIP_FIREWALL;
9085 
9086 	ip_len = ntohs(ip->ip_len);
9087 	ip_off = ntohs(ip->ip_off);
9088 
9089 	/* Copied from FreeBSD 10.0-CURRENT ip_output. */
9090 	m0->m_pkthdr.csum_flags |= CSUM_IP;
9091 	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
9092 		in_delayed_cksum(m0);
9093 		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
9094 	}
9095 	if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
9096 		pf_sctp_checksum(m0, (uint32_t)(ip->ip_hl << 2));
9097 		m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
9098 	}
9099 
9100 	if (pd->dir == PF_IN) {
9101 		/*
9102 		 * Make sure dummynet gets the correct direction, in case it needs to
9103 		 * re-inject later.
9104 		 */
9105 		pd->dir = PF_OUT;
9106 
9107 		/*
9108 		 * The following processing is actually the rest of the inbound processing, even
9109 		 * though we've marked it as outbound (so we don't look through dummynet) and it
9110 		 * happens after the outbound processing (pf_test(PF_OUT) above).
9111 		 * Swap the dummynet pipe numbers, because it's going to come to the wrong
9112 		 * conclusion about what direction it's processing, and we can't fix it or it
9113 		 * will re-inject incorrectly. Swapping the pipe numbers means that its incorrect
9114 		 * decision will pick the right pipe, and everything will mostly work as expected.
9115 		 */
9116 		tmp = pd->act.dnrpipe;
9117 		pd->act.dnrpipe = pd->act.dnpipe;
9118 		pd->act.dnpipe = tmp;
9119 	}
9120 
9121 	/*
9122 	 * If small enough for interface, or the interface will take
9123 	 * care of the fragmentation for us, we can just send directly.
9124 	 */
9125 	if (ip_len <= ifp->if_mtu ||
9126 	    (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
9127 		ip->ip_sum = 0;
9128 		if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
9129 			ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
9130 			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
9131 		}
9132 		m_clrprotoflags(m0);	/* Avoid confusing lower layers. */
9133 
9134 		md = m0;
9135 		error = pf_dummynet_route(pd, s, r, ifp, gw, &md);
9136 		if (md != NULL) {
9137 			error = (*ifp->if_output)(ifp, md, gw, &ro);
9138 			SDT_PROBE2(pf, ip, route_to, output, ifp, error);
9139 		}
9140 		goto done;
9141 	}
9142 
9143 	/* Balk when DF bit is set or the interface didn't support TSO. */
9144 	if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
9145 		error = EMSGSIZE;
9146 		KMOD_IPSTAT_INC(ips_cantfrag);
9147 		if (pd->act.rt != PF_DUPTO) {
9148 			if (s && s->nat_rule != NULL) {
9149 				MPASS(m0 == pd->m);
9150 				PACKET_UNDO_NAT(pd,
9151 				    (ip->ip_hl << 2) + (ip_off & IP_OFFMASK),
9152 				    s);
9153 			}
9154 
9155 			pf_send_icmp(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
9156 			   ifp->if_mtu, pd->af, r, pd->act.rtableid);
9157 		}
9158 		SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9159 		goto bad;
9160 	}
9161 
9162 	error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
9163 	if (error) {
9164 		SDT_PROBE1(pf, ip, route_to, drop, __LINE__);
9165 		goto bad;
9166 	}
9167 
9168 	for (; m0; m0 = m1) {
9169 		m1 = m0->m_nextpkt;
9170 		m0->m_nextpkt = NULL;
9171 		if (error == 0) {
9172 			m_clrprotoflags(m0);
9173 			md = m0;
9174 			pd->pf_mtag = pf_find_mtag(md);
9175 			error = pf_dummynet_route(pd, s, r, ifp,
9176 			    gw, &md);
9177 			if (md != NULL) {
9178 				error = (*ifp->if_output)(ifp, md, gw, &ro);
9179 				SDT_PROBE2(pf, ip, route_to, output, ifp, error);
9180 			}
9181 		} else
9182 			m_freem(m0);
9183 	}
9184 
9185 	if (error == 0)
9186 		KMOD_IPSTAT_INC(ips_fragmented);
9187 
9188 done:
9189 	if (pd->act.rt != PF_DUPTO)
9190 		pd->m = NULL;
9191 	return;
9192 
9193 bad_locked:
9194 	if (s)
9195 		PF_STATE_UNLOCK(s);
9196 bad:
9197 	m_freem(m0);
9198 	goto done;
9199 }
9200 #endif /* INET */
9201 
9202 #ifdef INET6
9203 static void
pf_route6(struct pf_krule * r,struct ifnet * oifp,struct pf_kstate * s,struct pf_pdesc * pd,struct inpcb * inp)9204 pf_route6(struct pf_krule *r, struct ifnet *oifp,
9205     struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
9206 {
9207 	struct mbuf		*m0, *md;
9208 	struct m_tag		*mtag;
9209 	struct sockaddr_in6	dst;
9210 	struct ip6_hdr		*ip6;
9211 	struct ifnet		*ifp = NULL;
9212 	int			 r_dir;
9213 	bool			 skip_test = false;
9214 
9215 	KASSERT(pd->m && r && oifp, ("%s: invalid parameters", __func__));
9216 
9217 	SDT_PROBE4(pf, ip6, route_to, entry, pd->m, pd, s, oifp);
9218 
9219 	if (s) {
9220 		r_dir = s->direction;
9221 	} else {
9222 		r_dir = r->direction;
9223 	}
9224 
9225 	KASSERT(pd->dir == PF_IN || pd->dir == PF_OUT ||
9226 	    r_dir == PF_IN || r_dir == PF_OUT, ("%s: invalid direction",
9227 	    __func__));
9228 
9229 	if ((pd->pf_mtag == NULL &&
9230 	    ((pd->pf_mtag = pf_get_mtag(pd->m)) == NULL)) ||
9231 	    pd->pf_mtag->routed++ > 3) {
9232 		m0 = pd->m;
9233 		pd->m = NULL;
9234 		SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9235 		goto bad_locked;
9236 	}
9237 
9238 	if (pd->act.rt_kif != NULL)
9239 		ifp = pd->act.rt_kif->pfik_ifp;
9240 
9241 	if (pd->act.rt == PF_DUPTO) {
9242 		if ((pd->pf_mtag->flags & PF_MTAG_FLAG_DUPLICATED)) {
9243 			if (s != NULL) {
9244 				PF_STATE_UNLOCK(s);
9245 			}
9246 			if (ifp == oifp) {
9247 				/* When the 2nd interface is not skipped */
9248 				return;
9249 			} else {
9250 				m0 = pd->m;
9251 				pd->m = NULL;
9252 				SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9253 				goto bad;
9254 			}
9255 		} else {
9256 			pd->pf_mtag->flags |= PF_MTAG_FLAG_DUPLICATED;
9257 			if (((m0 = m_dup(pd->m, M_NOWAIT)) == NULL)) {
9258 				if (s)
9259 					PF_STATE_UNLOCK(s);
9260 				return;
9261 			}
9262 		}
9263 	} else {
9264 		if ((pd->act.rt == PF_REPLYTO) == (r_dir == pd->dir)) {
9265 			if (pd->af == pd->naf) {
9266 				pf_dummynet(pd, s, r, &pd->m);
9267 				if (s)
9268 					PF_STATE_UNLOCK(s);
9269 				return;
9270 			} else {
9271 				if (r_dir == PF_IN) {
9272 					skip_test = true;
9273 				}
9274 			}
9275 		}
9276 
9277 		/*
9278 		 * If we're actually doing route-to and af-to and are in the
9279 		 * reply direction.
9280 		 */
9281 		if (pd->act.rt_kif && pd->act.rt_kif->pfik_ifp &&
9282 		    pd->af != pd->naf) {
9283 			if (pd->act.rt == PF_ROUTETO && r->naf != AF_INET6) {
9284 				/* Un-set ifp so we do a plain route lookup. */
9285 				ifp = NULL;
9286 			}
9287 			if (pd->act.rt == PF_REPLYTO && r->naf != AF_INET) {
9288 				/* Un-set ifp so we do a plain route lookup. */
9289 				ifp = NULL;
9290 			}
9291 		}
9292 		m0 = pd->m;
9293 	}
9294 
9295 	ip6 = mtod(m0, struct ip6_hdr *);
9296 
9297 	bzero(&dst, sizeof(dst));
9298 	dst.sin6_family = AF_INET6;
9299 	dst.sin6_len = sizeof(dst);
9300 	pf_addrcpy((struct pf_addr *)&dst.sin6_addr, &pd->act.rt_addr,
9301 	    AF_INET6);
9302 
9303 	if (pd->dir == PF_IN) {
9304 		if (ip6->ip6_hlim <= IPV6_HLIMDEC) {
9305 			if (r->rt != PF_DUPTO)
9306 				pf_send_icmp(m0, ICMP6_TIME_EXCEEDED,
9307 				    ICMP6_TIME_EXCEED_TRANSIT, 0, pd->af, r,
9308 				    pd->act.rtableid);
9309 			goto bad_locked;
9310 		}
9311 		ip6->ip6_hlim -= IPV6_HLIMDEC;
9312 	}
9313 
9314 	if (s != NULL) {
9315 		if (ifp == NULL && (pd->af != pd->naf)) {
9316 			const struct nhop_object *nh;
9317 			nh = fib6_lookup(M_GETFIB(m0), &ip6->ip6_dst, 0, NHR_NONE, 0);
9318 			if (nh) {
9319 				ifp = nh->nh_ifp;
9320 
9321 				/* Use the gateway if needed. */
9322 				if (nh->nh_flags & NHF_GATEWAY)
9323 					bcopy(&nh->gw6_sa.sin6_addr, &dst.sin6_addr,
9324 					    sizeof(dst.sin6_addr));
9325 				else
9326 					dst.sin6_addr = ip6->ip6_dst;
9327 
9328 				/*
9329 				 * Bind to the correct interface if we're
9330 				 * if-bound. We don't know which interface
9331 				 * that will be until here, so we've inserted
9332 				 * the state on V_pf_all. Fix that now.
9333 				 */
9334 				if (s->kif == V_pfi_all && ifp != NULL &&
9335 				    r->rule_flag & PFRULE_IFBOUND)
9336 					s->kif = ifp->if_pf_kif;
9337 			}
9338 		}
9339 
9340 		if (r->rule_flag & PFRULE_IFBOUND &&
9341 		    pd->act.rt == PF_REPLYTO &&
9342 		    s->kif == V_pfi_all) {
9343 			s->kif = pd->act.rt_kif;
9344 			s->orig_kif = oifp->if_pf_kif;
9345 		}
9346 
9347 		PF_STATE_UNLOCK(s);
9348 	}
9349 
9350 	if (pd->af != pd->naf) {
9351 		struct udphdr *uh = &pd->hdr.udp;
9352 
9353 		if (pd->proto == IPPROTO_UDP && uh->uh_sum == 0) {
9354 			uh->uh_sum = in6_cksum_pseudo(ip6,
9355 			    ntohs(uh->uh_ulen), IPPROTO_UDP, 0);
9356 			m_copyback(m0, pd->off, sizeof(*uh), pd->hdr.any);
9357 		}
9358 	}
9359 
9360 	if (ifp == NULL) {
9361 		m0 = pd->m;
9362 		pd->m = NULL;
9363 		SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9364 		goto bad;
9365 	}
9366 
9367 	if (pd->dir == PF_IN && !skip_test) {
9368 		if (pf_test(AF_INET6, PF_OUT, PFIL_FWD | PF_PFIL_NOREFRAGMENT,
9369 		    ifp, &m0, inp, &pd->act) != PF_PASS) {
9370 			SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9371 			goto bad;
9372 		} else if (m0 == NULL) {
9373 			SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9374 			goto done;
9375 		}
9376 		if (m0->m_len < sizeof(struct ip6_hdr)) {
9377 			DPFPRINTF(PF_DEBUG_URGENT,
9378 			    ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
9379 			    __func__));
9380 			SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9381 			goto bad;
9382 		}
9383 		ip6 = mtod(m0, struct ip6_hdr *);
9384 	}
9385 
9386 	if (ifp->if_flags & IFF_LOOPBACK)
9387 		m0->m_flags |= M_SKIP_FIREWALL;
9388 
9389 	if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
9390 	    ~ifp->if_hwassist) {
9391 		uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
9392 		in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
9393 		m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
9394 	}
9395 
9396 	if (pd->dir == PF_IN) {
9397 		uint16_t	 tmp;
9398 		/*
9399 		 * Make sure dummynet gets the correct direction, in case it needs to
9400 		 * re-inject later.
9401 		 */
9402 		pd->dir = PF_OUT;
9403 
9404 		/*
9405 		 * The following processing is actually the rest of the inbound processing, even
9406 		 * though we've marked it as outbound (so we don't look through dummynet) and it
9407 		 * happens after the outbound processing (pf_test(PF_OUT) above).
9408 		 * Swap the dummynet pipe numbers, because it's going to come to the wrong
9409 		 * conclusion about what direction it's processing, and we can't fix it or it
9410 		 * will re-inject incorrectly. Swapping the pipe numbers means that its incorrect
9411 		 * decision will pick the right pipe, and everything will mostly work as expected.
9412 		 */
9413 		tmp = pd->act.dnrpipe;
9414 		pd->act.dnrpipe = pd->act.dnpipe;
9415 		pd->act.dnpipe = tmp;
9416 	}
9417 
9418 	/*
9419 	 * If the packet is too large for the outgoing interface,
9420 	 * send back an icmp6 error.
9421 	 */
9422 	if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
9423 		dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
9424 	mtag = m_tag_find(m0, PACKET_TAG_PF_REASSEMBLED, NULL);
9425 	if (mtag != NULL) {
9426 		int ret __sdt_used;
9427 		ret = pf_refragment6(ifp, &m0, mtag, ifp, true);
9428 		SDT_PROBE2(pf, ip6, route_to, output, ifp, ret);
9429 		goto done;
9430 	}
9431 
9432 	if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
9433 		md = m0;
9434 		pf_dummynet_route(pd, s, r, ifp, sintosa(&dst), &md);
9435 		if (md != NULL) {
9436 			int ret __sdt_used;
9437 			ret = nd6_output_ifp(ifp, ifp, md, &dst, NULL);
9438 			SDT_PROBE2(pf, ip6, route_to, output, ifp, ret);
9439 		}
9440 	}
9441 	else {
9442 		in6_ifstat_inc(ifp, ifs6_in_toobig);
9443 		if (pd->act.rt != PF_DUPTO) {
9444 			if (s && s->nat_rule != NULL) {
9445 				MPASS(m0 == pd->m);
9446 				PACKET_UNDO_NAT(pd,
9447 				    ((caddr_t)ip6 - m0->m_data) +
9448 				    sizeof(struct ip6_hdr), s);
9449 			}
9450 
9451 			if (r->rt != PF_DUPTO)
9452 				pf_send_icmp(m0, ICMP6_PACKET_TOO_BIG, 0,
9453 				    ifp->if_mtu, pd->af, r, pd->act.rtableid);
9454 		}
9455 		SDT_PROBE1(pf, ip6, route_to, drop, __LINE__);
9456 		goto bad;
9457 	}
9458 
9459 done:
9460 	if (pd->act.rt != PF_DUPTO)
9461 		pd->m = NULL;
9462 	return;
9463 
9464 bad_locked:
9465 	if (s)
9466 		PF_STATE_UNLOCK(s);
9467 bad:
9468 	m_freem(m0);
9469 	goto done;
9470 }
9471 #endif /* INET6 */
9472 
9473 /*
9474  * FreeBSD supports cksum offloads for the following drivers.
9475  *  em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
9476  *
9477  * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
9478  *  network driver performed cksum including pseudo header, need to verify
9479  *   csum_data
9480  * CSUM_DATA_VALID :
9481  *  network driver performed cksum, needs to additional pseudo header
9482  *  cksum computation with partial csum_data(i.e. lack of H/W support for
9483  *  pseudo header, for instance sk(4) and possibly gem(4))
9484  *
9485  * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
9486  * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
9487  * TCP/UDP layer.
9488  * Also, set csum_data to 0xffff to force cksum validation.
9489  */
9490 static int
pf_check_proto_cksum(struct mbuf * m,int off,int len,u_int8_t p,sa_family_t af)9491 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
9492 {
9493 	u_int16_t sum = 0;
9494 	int hw_assist = 0;
9495 	struct ip *ip;
9496 
9497 	if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
9498 		return (1);
9499 	if (m->m_pkthdr.len < off + len)
9500 		return (1);
9501 
9502 	switch (p) {
9503 	case IPPROTO_TCP:
9504 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
9505 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
9506 				sum = m->m_pkthdr.csum_data;
9507 			} else {
9508 				ip = mtod(m, struct ip *);
9509 				sum = in_pseudo(ip->ip_src.s_addr,
9510 				ip->ip_dst.s_addr, htonl((u_short)len +
9511 				m->m_pkthdr.csum_data + IPPROTO_TCP));
9512 			}
9513 			sum ^= 0xffff;
9514 			++hw_assist;
9515 		}
9516 		break;
9517 	case IPPROTO_UDP:
9518 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
9519 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
9520 				sum = m->m_pkthdr.csum_data;
9521 			} else {
9522 				ip = mtod(m, struct ip *);
9523 				sum = in_pseudo(ip->ip_src.s_addr,
9524 				ip->ip_dst.s_addr, htonl((u_short)len +
9525 				m->m_pkthdr.csum_data + IPPROTO_UDP));
9526 			}
9527 			sum ^= 0xffff;
9528 			++hw_assist;
9529 		}
9530 		break;
9531 	case IPPROTO_ICMP:
9532 #ifdef INET6
9533 	case IPPROTO_ICMPV6:
9534 #endif /* INET6 */
9535 		break;
9536 	default:
9537 		return (1);
9538 	}
9539 
9540 	if (!hw_assist) {
9541 		switch (af) {
9542 		case AF_INET:
9543 			if (m->m_len < sizeof(struct ip))
9544 				return (1);
9545 			sum = in4_cksum(m, (p == IPPROTO_ICMP ? 0 : p), off, len);
9546 			break;
9547 #ifdef INET6
9548 		case AF_INET6:
9549 			if (m->m_len < sizeof(struct ip6_hdr))
9550 				return (1);
9551 			sum = in6_cksum(m, p, off, len);
9552 			break;
9553 #endif /* INET6 */
9554 		}
9555 	}
9556 	if (sum) {
9557 		switch (p) {
9558 		case IPPROTO_TCP:
9559 		    {
9560 			KMOD_TCPSTAT_INC(tcps_rcvbadsum);
9561 			break;
9562 		    }
9563 		case IPPROTO_UDP:
9564 		    {
9565 			KMOD_UDPSTAT_INC(udps_badsum);
9566 			break;
9567 		    }
9568 #ifdef INET
9569 		case IPPROTO_ICMP:
9570 		    {
9571 			KMOD_ICMPSTAT_INC(icps_checksum);
9572 			break;
9573 		    }
9574 #endif
9575 #ifdef INET6
9576 		case IPPROTO_ICMPV6:
9577 		    {
9578 			KMOD_ICMP6STAT_INC(icp6s_checksum);
9579 			break;
9580 		    }
9581 #endif /* INET6 */
9582 		}
9583 		return (1);
9584 	} else {
9585 		if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
9586 			m->m_pkthdr.csum_flags |=
9587 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
9588 			m->m_pkthdr.csum_data = 0xffff;
9589 		}
9590 	}
9591 	return (0);
9592 }
9593 
9594 static bool
pf_pdesc_to_dnflow(const struct pf_pdesc * pd,const struct pf_krule * r,const struct pf_kstate * s,struct ip_fw_args * dnflow)9595 pf_pdesc_to_dnflow(const struct pf_pdesc *pd, const struct pf_krule *r,
9596     const struct pf_kstate *s, struct ip_fw_args *dnflow)
9597 {
9598 	int dndir = r->direction;
9599 
9600 	if (s && dndir == PF_INOUT) {
9601 		dndir = s->direction;
9602 	} else if (dndir == PF_INOUT) {
9603 		/* Assume primary direction. Happens when we've set dnpipe in
9604 		 * the ethernet level code. */
9605 		dndir = pd->dir;
9606 	}
9607 
9608 	if (pd->pf_mtag->flags & PF_MTAG_FLAG_DUMMYNETED)
9609 		return (false);
9610 
9611 	memset(dnflow, 0, sizeof(*dnflow));
9612 
9613 	if (pd->dport != NULL)
9614 		dnflow->f_id.dst_port = ntohs(*pd->dport);
9615 	if (pd->sport != NULL)
9616 		dnflow->f_id.src_port = ntohs(*pd->sport);
9617 
9618 	if (pd->dir == PF_IN)
9619 		dnflow->flags |= IPFW_ARGS_IN;
9620 	else
9621 		dnflow->flags |= IPFW_ARGS_OUT;
9622 
9623 	if (pd->dir != dndir && pd->act.dnrpipe) {
9624 		dnflow->rule.info = pd->act.dnrpipe;
9625 	}
9626 	else if (pd->dir == dndir && pd->act.dnpipe) {
9627 		dnflow->rule.info = pd->act.dnpipe;
9628 	}
9629 	else {
9630 		return (false);
9631 	}
9632 
9633 	dnflow->rule.info |= IPFW_IS_DUMMYNET;
9634 	if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFSTATE_DN_IS_PIPE)
9635 		dnflow->rule.info |= IPFW_IS_PIPE;
9636 
9637 	dnflow->f_id.proto = pd->proto;
9638 	dnflow->f_id.extra = dnflow->rule.info;
9639 	switch (pd->naf) {
9640 	case AF_INET:
9641 		dnflow->f_id.addr_type = 4;
9642 		dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
9643 		dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
9644 		break;
9645 	case AF_INET6:
9646 		dnflow->flags |= IPFW_ARGS_IP6;
9647 		dnflow->f_id.addr_type = 6;
9648 		dnflow->f_id.src_ip6 = pd->src->v6;
9649 		dnflow->f_id.dst_ip6 = pd->dst->v6;
9650 		break;
9651 	}
9652 
9653 	return (true);
9654 }
9655 
9656 int
pf_test_eth(int dir,int pflags,struct ifnet * ifp,struct mbuf ** m0,struct inpcb * inp)9657 pf_test_eth(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
9658     struct inpcb *inp)
9659 {
9660 	struct pfi_kkif		*kif;
9661 	struct mbuf		*m = *m0;
9662 
9663 	M_ASSERTPKTHDR(m);
9664 	MPASS(ifp->if_vnet == curvnet);
9665 	NET_EPOCH_ASSERT();
9666 
9667 	if (!V_pf_status.running)
9668 		return (PF_PASS);
9669 
9670 	kif = (struct pfi_kkif *)ifp->if_pf_kif;
9671 
9672 	if (kif == NULL) {
9673 		DPFPRINTF(PF_DEBUG_URGENT,
9674 		    ("%s: kif == NULL, if_xname %s\n", __func__, ifp->if_xname));
9675 		return (PF_DROP);
9676 	}
9677 	if (kif->pfik_flags & PFI_IFLAG_SKIP)
9678 		return (PF_PASS);
9679 
9680 	if (m->m_flags & M_SKIP_FIREWALL)
9681 		return (PF_PASS);
9682 
9683 	if (__predict_false(! M_WRITABLE(*m0))) {
9684 		m = *m0 = m_unshare(*m0, M_NOWAIT);
9685 		if (*m0 == NULL)
9686 			return (PF_DROP);
9687 	}
9688 
9689 	/* Stateless! */
9690 	return (pf_test_eth_rule(dir, kif, m0));
9691 }
9692 
9693 static __inline void
pf_dummynet_flag_remove(struct mbuf * m,struct pf_mtag * pf_mtag)9694 pf_dummynet_flag_remove(struct mbuf *m, struct pf_mtag *pf_mtag)
9695 {
9696 	struct m_tag *mtag;
9697 
9698 	pf_mtag->flags &= ~PF_MTAG_FLAG_DUMMYNET;
9699 
9700 	/* dummynet adds this tag, but pf does not need it,
9701 	 * and keeping it creates unexpected behavior,
9702 	 * e.g. in case of divert(4) usage right after dummynet. */
9703 	mtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL);
9704 	if (mtag != NULL)
9705 		m_tag_delete(m, mtag);
9706 }
9707 
9708 static int
pf_dummynet(struct pf_pdesc * pd,struct pf_kstate * s,struct pf_krule * r,struct mbuf ** m0)9709 pf_dummynet(struct pf_pdesc *pd, struct pf_kstate *s,
9710     struct pf_krule *r, struct mbuf **m0)
9711 {
9712 	return (pf_dummynet_route(pd, s, r, NULL, NULL, m0));
9713 }
9714 
9715 static int
pf_dummynet_route(struct pf_pdesc * pd,struct pf_kstate * s,struct pf_krule * r,struct ifnet * ifp,const struct sockaddr * sa,struct mbuf ** m0)9716 pf_dummynet_route(struct pf_pdesc *pd, struct pf_kstate *s,
9717     struct pf_krule *r, struct ifnet *ifp, const struct sockaddr *sa,
9718     struct mbuf **m0)
9719 {
9720 	struct ip_fw_args dnflow;
9721 
9722 	NET_EPOCH_ASSERT();
9723 
9724 	if (pd->act.dnpipe == 0 && pd->act.dnrpipe == 0)
9725 		return (0);
9726 
9727 	if (ip_dn_io_ptr == NULL) {
9728 		m_freem(*m0);
9729 		*m0 = NULL;
9730 		return (ENOMEM);
9731 	}
9732 
9733 	if (pd->pf_mtag == NULL &&
9734 	    ((pd->pf_mtag = pf_get_mtag(*m0)) == NULL)) {
9735 		m_freem(*m0);
9736 		*m0 = NULL;
9737 		return (ENOMEM);
9738 	}
9739 
9740 	if (ifp != NULL) {
9741 		pd->pf_mtag->flags |= PF_MTAG_FLAG_ROUTE_TO;
9742 
9743 		pd->pf_mtag->if_index = ifp->if_index;
9744 		pd->pf_mtag->if_idxgen = ifp->if_idxgen;
9745 
9746 		MPASS(sa != NULL);
9747 
9748 		switch (sa->sa_family) {
9749 		case AF_INET:
9750 			memcpy(&pd->pf_mtag->dst, sa,
9751 			    sizeof(struct sockaddr_in));
9752 			break;
9753 		case AF_INET6:
9754 			memcpy(&pd->pf_mtag->dst, sa,
9755 			    sizeof(struct sockaddr_in6));
9756 			break;
9757 		}
9758 	}
9759 
9760 	if (s != NULL && s->nat_rule != NULL &&
9761 	    s->nat_rule->action == PF_RDR &&
9762 	    (
9763 #ifdef INET
9764 	    (pd->af == AF_INET && IN_LOOPBACK(ntohl(pd->dst->v4.s_addr))) ||
9765 #endif /* INET */
9766 	    (pd->af == AF_INET6 && IN6_IS_ADDR_LOOPBACK(&pd->dst->v6)))) {
9767 		/*
9768 		 * If we're redirecting to loopback mark this packet
9769 		 * as being local. Otherwise it might get dropped
9770 		 * if dummynet re-injects.
9771 		 */
9772 		(*m0)->m_pkthdr.rcvif = V_loif;
9773 	}
9774 
9775 	if (pf_pdesc_to_dnflow(pd, r, s, &dnflow)) {
9776 		pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNET;
9777 		pd->pf_mtag->flags |= PF_MTAG_FLAG_DUMMYNETED;
9778 		ip_dn_io_ptr(m0, &dnflow);
9779 		if (*m0 != NULL) {
9780 			pd->pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
9781 			pf_dummynet_flag_remove(*m0, pd->pf_mtag);
9782 		}
9783 	}
9784 
9785 	return (0);
9786 }
9787 
9788 static int
pf_walk_header(struct pf_pdesc * pd,struct ip * h,u_short * reason)9789 pf_walk_header(struct pf_pdesc *pd, struct ip *h, u_short *reason)
9790 {
9791 	struct ah	 ext;
9792 	u_int32_t	 hlen, end;
9793 	int		 hdr_cnt;
9794 
9795 	hlen = h->ip_hl << 2;
9796 	if (hlen < sizeof(struct ip) || hlen > ntohs(h->ip_len)) {
9797 		REASON_SET(reason, PFRES_SHORT);
9798 		return (PF_DROP);
9799 	}
9800 	if (hlen != sizeof(struct ip))
9801 		pd->badopts++;
9802 	end = pd->off + ntohs(h->ip_len);
9803 	pd->off += hlen;
9804 	pd->proto = h->ip_p;
9805 	/* stop walking over non initial fragments */
9806 	if ((h->ip_off & htons(IP_OFFMASK)) != 0)
9807 		return (PF_PASS);
9808 	for (hdr_cnt = 0; hdr_cnt < PF_HDR_LIMIT; hdr_cnt++) {
9809 		switch (pd->proto) {
9810 		case IPPROTO_AH:
9811 			/* fragments may be short */
9812 			if ((h->ip_off & htons(IP_MF | IP_OFFMASK)) != 0 &&
9813 			    end < pd->off + sizeof(ext))
9814 				return (PF_PASS);
9815 			if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
9816 				NULL, reason, AF_INET)) {
9817 				DPFPRINTF(PF_DEBUG_MISC, ("IP short exthdr"));
9818 				return (PF_DROP);
9819 			}
9820 			pd->off += (ext.ah_len + 2) * 4;
9821 			pd->proto = ext.ah_nxt;
9822 			break;
9823 		default:
9824 			return (PF_PASS);
9825 		}
9826 	}
9827 	DPFPRINTF(PF_DEBUG_MISC, ("IPv4 nested authentication header limit"));
9828 	REASON_SET(reason, PFRES_IPOPTIONS);
9829 	return (PF_DROP);
9830 }
9831 
9832 #ifdef INET6
9833 static int
pf_walk_option6(struct pf_pdesc * pd,struct ip6_hdr * h,int off,int end,u_short * reason)9834 pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
9835     u_short *reason)
9836 {
9837 	struct ip6_opt		 opt;
9838 	struct ip6_opt_jumbo	 jumbo;
9839 
9840 	while (off < end) {
9841 		if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type,
9842 		    sizeof(opt.ip6o_type), NULL, reason, AF_INET6)) {
9843 			DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short opt type"));
9844 			return (PF_DROP);
9845 		}
9846 		if (opt.ip6o_type == IP6OPT_PAD1) {
9847 			off++;
9848 			continue;
9849 		}
9850 		if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt), NULL,
9851 		    reason, AF_INET6)) {
9852 			DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short opt"));
9853 			return (PF_DROP);
9854 		}
9855 		if (off + sizeof(opt) + opt.ip6o_len > end) {
9856 			DPFPRINTF(PF_DEBUG_MISC, ("IPv6 long opt"));
9857 			REASON_SET(reason, PFRES_IPOPTIONS);
9858 			return (PF_DROP);
9859 		}
9860 		switch (opt.ip6o_type) {
9861 		case IP6OPT_JUMBO:
9862 			if (pd->jumbolen != 0) {
9863 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple jumbo"));
9864 				REASON_SET(reason, PFRES_IPOPTIONS);
9865 				return (PF_DROP);
9866 			}
9867 			if (ntohs(h->ip6_plen) != 0) {
9868 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 bad jumbo plen"));
9869 				REASON_SET(reason, PFRES_IPOPTIONS);
9870 				return (PF_DROP);
9871 			}
9872 			if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo), NULL,
9873 				reason, AF_INET6)) {
9874 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short jumbo"));
9875 				return (PF_DROP);
9876 			}
9877 			memcpy(&pd->jumbolen, jumbo.ip6oj_jumbo_len,
9878 			    sizeof(pd->jumbolen));
9879 			pd->jumbolen = ntohl(pd->jumbolen);
9880 			if (pd->jumbolen < IPV6_MAXPACKET) {
9881 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short jumbolen"));
9882 				REASON_SET(reason, PFRES_IPOPTIONS);
9883 				return (PF_DROP);
9884 			}
9885 			break;
9886 		default:
9887 			break;
9888 		}
9889 		off += sizeof(opt) + opt.ip6o_len;
9890 	}
9891 
9892 	return (PF_PASS);
9893 }
9894 
9895 int
pf_walk_header6(struct pf_pdesc * pd,struct ip6_hdr * h,u_short * reason)9896 pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
9897 {
9898 	struct ip6_frag		 frag;
9899 	struct ip6_ext		 ext;
9900 	struct ip6_rthdr	 rthdr;
9901 	uint32_t		 end;
9902 	int			 hdr_cnt, fraghdr_cnt = 0, rthdr_cnt = 0;
9903 
9904 	pd->off += sizeof(struct ip6_hdr);
9905 	end = pd->off + ntohs(h->ip6_plen);
9906 	pd->fragoff = pd->extoff = pd->jumbolen = 0;
9907 	pd->proto = h->ip6_nxt;
9908 	for (hdr_cnt = 0; hdr_cnt < PF_HDR_LIMIT; hdr_cnt++) {
9909 		switch (pd->proto) {
9910 		case IPPROTO_ROUTING:
9911 		case IPPROTO_HOPOPTS:
9912 		case IPPROTO_DSTOPTS:
9913 			pd->badopts++;
9914 			break;
9915 		}
9916 		switch (pd->proto) {
9917 		case IPPROTO_FRAGMENT:
9918 			if (fraghdr_cnt++) {
9919 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple fragment"));
9920 				REASON_SET(reason, PFRES_FRAG);
9921 				return (PF_DROP);
9922 			}
9923 			/* jumbo payload packets cannot be fragmented */
9924 			if (pd->jumbolen != 0) {
9925 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 fragmented jumbo"));
9926 				REASON_SET(reason, PFRES_FRAG);
9927 				return (PF_DROP);
9928 			}
9929 			if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag),
9930 			    NULL, reason, AF_INET6)) {
9931 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short fragment"));
9932 				return (PF_DROP);
9933 			}
9934 			/* stop walking over non initial fragments */
9935 			if (ntohs((frag.ip6f_offlg & IP6F_OFF_MASK)) != 0) {
9936 				pd->fragoff = pd->off;
9937 				return (PF_PASS);
9938 			}
9939 			/* RFC6946:  reassemble only non atomic fragments */
9940 			if (frag.ip6f_offlg & IP6F_MORE_FRAG)
9941 				pd->fragoff = pd->off;
9942 			pd->off += sizeof(frag);
9943 			pd->proto = frag.ip6f_nxt;
9944 			break;
9945 		case IPPROTO_ROUTING:
9946 			if (rthdr_cnt++) {
9947 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 multiple rthdr"));
9948 				REASON_SET(reason, PFRES_IPOPTIONS);
9949 				return (PF_DROP);
9950 			}
9951 			/* fragments may be short */
9952 			if (pd->fragoff != 0 && end < pd->off + sizeof(rthdr)) {
9953 				pd->off = pd->fragoff;
9954 				pd->proto = IPPROTO_FRAGMENT;
9955 				return (PF_PASS);
9956 			}
9957 			if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr),
9958 			    NULL, reason, AF_INET6)) {
9959 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short rthdr"));
9960 				return (PF_DROP);
9961 			}
9962 			if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
9963 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 rthdr0"));
9964 				REASON_SET(reason, PFRES_IPOPTIONS);
9965 				return (PF_DROP);
9966 			}
9967 			/* FALLTHROUGH */
9968 		case IPPROTO_HOPOPTS:
9969 			/* RFC2460 4.1:  Hop-by-Hop only after IPv6 header */
9970 			if (pd->proto == IPPROTO_HOPOPTS && hdr_cnt > 0) {
9971 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 hopopts not first"));
9972 				REASON_SET(reason, PFRES_IPOPTIONS);
9973 				return (PF_DROP);
9974 			}
9975 			/* FALLTHROUGH */
9976 		case IPPROTO_AH:
9977 		case IPPROTO_DSTOPTS:
9978 			if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
9979 			    NULL, reason, AF_INET6)) {
9980 				DPFPRINTF(PF_DEBUG_MISC, ("IPv6 short exthdr"));
9981 				return (PF_DROP);
9982 			}
9983 			/* fragments may be short */
9984 			if (pd->fragoff != 0 && end < pd->off + sizeof(ext)) {
9985 				pd->off = pd->fragoff;
9986 				pd->proto = IPPROTO_FRAGMENT;
9987 				return (PF_PASS);
9988 			}
9989 			/* reassembly needs the ext header before the frag */
9990 			if (pd->fragoff == 0)
9991 				pd->extoff = pd->off;
9992 			if (pd->proto == IPPROTO_HOPOPTS && pd->fragoff == 0) {
9993 				if (pf_walk_option6(pd, h,
9994 				    pd->off + sizeof(ext),
9995 				    pd->off + (ext.ip6e_len + 1) * 8, reason)
9996 				    != PF_PASS)
9997 					return (PF_DROP);
9998 				if (ntohs(h->ip6_plen) == 0 && pd->jumbolen != 0) {
9999 					DPFPRINTF(PF_DEBUG_MISC,
10000 					    ("IPv6 missing jumbo"));
10001 					REASON_SET(reason, PFRES_IPOPTIONS);
10002 					return (PF_DROP);
10003 				}
10004 			}
10005 			if (pd->proto == IPPROTO_AH)
10006 				pd->off += (ext.ip6e_len + 2) * 4;
10007 			else
10008 				pd->off += (ext.ip6e_len + 1) * 8;
10009 			pd->proto = ext.ip6e_nxt;
10010 			break;
10011 		case IPPROTO_TCP:
10012 		case IPPROTO_UDP:
10013 		case IPPROTO_SCTP:
10014 		case IPPROTO_ICMPV6:
10015 			/* fragments may be short, ignore inner header then */
10016 			if (pd->fragoff != 0 && end < pd->off +
10017 			    (pd->proto == IPPROTO_TCP ? sizeof(struct tcphdr) :
10018 			    pd->proto == IPPROTO_UDP ? sizeof(struct udphdr) :
10019 			    pd->proto == IPPROTO_SCTP ? sizeof(struct sctphdr) :
10020 			    sizeof(struct icmp6_hdr))) {
10021 				pd->off = pd->fragoff;
10022 				pd->proto = IPPROTO_FRAGMENT;
10023 			}
10024 			/* FALLTHROUGH */
10025 		default:
10026 			return (PF_PASS);
10027 		}
10028 	}
10029 	DPFPRINTF(PF_DEBUG_MISC, ("IPv6 nested extension header limit"));
10030 	REASON_SET(reason, PFRES_IPOPTIONS);
10031 	return (PF_DROP);
10032 }
10033 #endif /* INET6 */
10034 
10035 static void
pf_init_pdesc(struct pf_pdesc * pd,struct mbuf * m)10036 pf_init_pdesc(struct pf_pdesc *pd, struct mbuf *m)
10037 {
10038 	memset(pd, 0, sizeof(*pd));
10039 	pd->pf_mtag = pf_find_mtag(m);
10040 	pd->m = m;
10041 }
10042 
10043 static int
pf_setup_pdesc(sa_family_t af,int dir,struct pf_pdesc * pd,struct mbuf ** m0,u_short * action,u_short * reason,struct pfi_kkif * kif,struct pf_rule_actions * default_actions)10044 pf_setup_pdesc(sa_family_t af, int dir, struct pf_pdesc *pd, struct mbuf **m0,
10045     u_short *action, u_short *reason, struct pfi_kkif *kif,
10046     struct pf_rule_actions *default_actions)
10047 {
10048 	pd->dir = dir;
10049 	pd->kif = kif;
10050 	pd->m = *m0;
10051 	pd->sidx = (dir == PF_IN) ? 0 : 1;
10052 	pd->didx = (dir == PF_IN) ? 1 : 0;
10053 	pd->af = pd->naf = af;
10054 
10055 	TAILQ_INIT(&pd->sctp_multihome_jobs);
10056 	if (default_actions != NULL)
10057 		memcpy(&pd->act, default_actions, sizeof(pd->act));
10058 
10059 	if (pd->pf_mtag && pd->pf_mtag->dnpipe) {
10060 		pd->act.dnpipe = pd->pf_mtag->dnpipe;
10061 		pd->act.flags = pd->pf_mtag->dnflags;
10062 	}
10063 
10064 	switch (af) {
10065 #ifdef INET
10066 	case AF_INET: {
10067 		struct ip *h;
10068 
10069 		if (__predict_false((*m0)->m_len < sizeof(struct ip)) &&
10070 		    (pd->m = *m0 = m_pullup(*m0, sizeof(struct ip))) == NULL) {
10071 			DPFPRINTF(PF_DEBUG_URGENT,
10072 			    ("%s: m_len < sizeof(struct ip), pullup failed\n",
10073 			    __func__));
10074 			*action = PF_DROP;
10075 			REASON_SET(reason, PFRES_SHORT);
10076 			return (-1);
10077 		}
10078 
10079 		if (pf_normalize_ip(reason, pd) != PF_PASS) {
10080 			/* We do IP header normalization and packet reassembly here */
10081 			*m0 = pd->m;
10082 			*action = PF_DROP;
10083 			return (-1);
10084 		}
10085 		*m0 = pd->m;
10086 
10087 		h = mtod(pd->m, struct ip *);
10088 		if (pd->m->m_pkthdr.len < ntohs(h->ip_len)) {
10089 			*action = PF_DROP;
10090 			REASON_SET(reason, PFRES_SHORT);
10091 			return (-1);
10092 		}
10093 
10094 		if (pf_walk_header(pd, h, reason) != PF_PASS) {
10095 			*action = PF_DROP;
10096 			return (-1);
10097 		}
10098 
10099 		pd->src = (struct pf_addr *)&h->ip_src;
10100 		pd->dst = (struct pf_addr *)&h->ip_dst;
10101 		pf_addrcpy(&pd->osrc, pd->src, af);
10102 		pf_addrcpy(&pd->odst, pd->dst, af);
10103 		pd->ip_sum = &h->ip_sum;
10104 		pd->tos = h->ip_tos & ~IPTOS_ECN_MASK;
10105 		pd->ttl = h->ip_ttl;
10106 		pd->tot_len = ntohs(h->ip_len);
10107 		pd->act.rtableid = -1;
10108 		pd->df = h->ip_off & htons(IP_DF);
10109 		pd->virtual_proto = (h->ip_off & htons(IP_MF | IP_OFFMASK)) ?
10110 		    PF_VPROTO_FRAGMENT : pd->proto;
10111 
10112 		break;
10113 	}
10114 #endif /* INET */
10115 #ifdef INET6
10116 	case AF_INET6: {
10117 		struct ip6_hdr *h;
10118 
10119 		if (__predict_false((*m0)->m_len < sizeof(struct ip6_hdr)) &&
10120 		    (pd->m = *m0 = m_pullup(*m0, sizeof(struct ip6_hdr))) == NULL) {
10121 			DPFPRINTF(PF_DEBUG_URGENT,
10122 			    ("%s: m_len < sizeof(struct ip6_hdr)"
10123 			     ", pullup failed\n", __func__));
10124 			*action = PF_DROP;
10125 			REASON_SET(reason, PFRES_SHORT);
10126 			return (-1);
10127 		}
10128 
10129 		h = mtod(pd->m, struct ip6_hdr *);
10130 
10131 		if (pf_walk_header6(pd, h, reason) != PF_PASS) {
10132 			*action = PF_DROP;
10133 			return (-1);
10134 		}
10135 
10136 		h = mtod(pd->m, struct ip6_hdr *);
10137 		pd->src = (struct pf_addr *)&h->ip6_src;
10138 		pd->dst = (struct pf_addr *)&h->ip6_dst;
10139 		pf_addrcpy(&pd->osrc, pd->src, af);
10140 		pf_addrcpy(&pd->odst, pd->dst, af);
10141 		pd->ip_sum = NULL;
10142 		pd->tos = IPV6_DSCP(h);
10143 		pd->ttl = h->ip6_hlim;
10144 		pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
10145 		pd->act.rtableid = -1;
10146 
10147 		pd->virtual_proto = (pd->fragoff != 0) ?
10148 		    PF_VPROTO_FRAGMENT : pd->proto;
10149 
10150 		/*
10151 		 * we do not support jumbogram.  if we keep going, zero ip6_plen
10152 		 * will do something bad, so drop the packet for now.
10153 		 */
10154 		if (htons(h->ip6_plen) == 0) {
10155 			*action = PF_DROP;
10156 			return (-1);
10157 		}
10158 
10159 		/* We do IP header normalization and packet reassembly here */
10160 		if (pf_normalize_ip6(pd->fragoff, reason, pd) !=
10161 		    PF_PASS) {
10162 			*m0 = pd->m;
10163 			*action = PF_DROP;
10164 			return (-1);
10165 		}
10166 		*m0 = pd->m;
10167 		if (pd->m == NULL) {
10168 			/* packet sits in reassembly queue, no error */
10169 			*action = PF_PASS;
10170 			return (-1);
10171 		}
10172 
10173 		/* Update pointers into the packet. */
10174 		h = mtod(pd->m, struct ip6_hdr *);
10175 		pd->src = (struct pf_addr *)&h->ip6_src;
10176 		pd->dst = (struct pf_addr *)&h->ip6_dst;
10177 
10178 		pd->off = 0;
10179 
10180 		if (pf_walk_header6(pd, h, reason) != PF_PASS) {
10181 			*action = PF_DROP;
10182 			return (-1);
10183 		}
10184 
10185 		if (m_tag_find(pd->m, PACKET_TAG_PF_REASSEMBLED, NULL) != NULL) {
10186 			/*
10187 			 * Reassembly may have changed the next protocol from
10188 			 * fragment to something else, so update.
10189 			 */
10190 			pd->virtual_proto = pd->proto;
10191 			MPASS(pd->fragoff == 0);
10192 		}
10193 
10194 		if (pd->fragoff != 0)
10195 			pd->virtual_proto = PF_VPROTO_FRAGMENT;
10196 
10197 		break;
10198 	}
10199 #endif /* INET6 */
10200 	default:
10201 		panic("pf_setup_pdesc called with illegal af %u", af);
10202 	}
10203 
10204 	switch (pd->virtual_proto) {
10205 	case IPPROTO_TCP: {
10206 		struct tcphdr *th = &pd->hdr.tcp;
10207 
10208 		if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th), action,
10209 			reason, af)) {
10210 			*action = PF_DROP;
10211 			REASON_SET(reason, PFRES_SHORT);
10212 			return (-1);
10213 		}
10214 		pd->hdrlen = sizeof(*th);
10215 		pd->p_len = pd->tot_len - pd->off - (th->th_off << 2);
10216 		pd->sport = &th->th_sport;
10217 		pd->dport = &th->th_dport;
10218 		pd->pcksum = &th->th_sum;
10219 		break;
10220 	}
10221 	case IPPROTO_UDP: {
10222 		struct udphdr *uh = &pd->hdr.udp;
10223 
10224 		if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh), action,
10225 			reason, af)) {
10226 			*action = PF_DROP;
10227 			REASON_SET(reason, PFRES_SHORT);
10228 			return (-1);
10229 		}
10230 		pd->hdrlen = sizeof(*uh);
10231 		if (uh->uh_dport == 0 ||
10232 		    ntohs(uh->uh_ulen) > pd->m->m_pkthdr.len - pd->off ||
10233 		    ntohs(uh->uh_ulen) < sizeof(struct udphdr)) {
10234 			*action = PF_DROP;
10235 			REASON_SET(reason, PFRES_SHORT);
10236 			return (-1);
10237 		}
10238 		pd->sport = &uh->uh_sport;
10239 		pd->dport = &uh->uh_dport;
10240 		pd->pcksum = &uh->uh_sum;
10241 		break;
10242 	}
10243 	case IPPROTO_SCTP: {
10244 		if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.sctp, sizeof(pd->hdr.sctp),
10245 		    action, reason, af)) {
10246 			*action = PF_DROP;
10247 			REASON_SET(reason, PFRES_SHORT);
10248 			return (-1);
10249 		}
10250 		pd->hdrlen = sizeof(pd->hdr.sctp);
10251 		pd->p_len = pd->tot_len - pd->off;
10252 
10253 		pd->sport = &pd->hdr.sctp.src_port;
10254 		pd->dport = &pd->hdr.sctp.dest_port;
10255 		if (pd->hdr.sctp.src_port == 0 || pd->hdr.sctp.dest_port == 0) {
10256 			*action = PF_DROP;
10257 			REASON_SET(reason, PFRES_SHORT);
10258 			return (-1);
10259 		}
10260 		if (pf_scan_sctp(pd) != PF_PASS) {
10261 			*action = PF_DROP;
10262 			REASON_SET(reason, PFRES_SHORT);
10263 			return (-1);
10264 		}
10265 		/*
10266 		 * Placeholder. The SCTP checksum is 32-bits, but
10267 		 * pf_test_state() expects to update a 16-bit checksum.
10268 		 * Provide a dummy value which we'll subsequently ignore.
10269 		 */
10270 		pd->pcksum = &pd->sctp_dummy_sum;
10271 		break;
10272 	}
10273 	case IPPROTO_ICMP: {
10274 		if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp, ICMP_MINLEN,
10275 			action, reason, af)) {
10276 			*action = PF_DROP;
10277 			REASON_SET(reason, PFRES_SHORT);
10278 			return (-1);
10279 		}
10280 		pd->pcksum = &pd->hdr.icmp.icmp_cksum;
10281 		pd->hdrlen = ICMP_MINLEN;
10282 		break;
10283 	}
10284 #ifdef INET6
10285 	case IPPROTO_ICMPV6: {
10286 		size_t icmp_hlen = sizeof(struct icmp6_hdr);
10287 
10288 		if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
10289 			action, reason, af)) {
10290 			*action = PF_DROP;
10291 			REASON_SET(reason, PFRES_SHORT);
10292 			return (-1);
10293 		}
10294 		/* ICMP headers we look further into to match state */
10295 		switch (pd->hdr.icmp6.icmp6_type) {
10296 		case MLD_LISTENER_QUERY:
10297 		case MLD_LISTENER_REPORT:
10298 			icmp_hlen = sizeof(struct mld_hdr);
10299 			break;
10300 		case ND_NEIGHBOR_SOLICIT:
10301 		case ND_NEIGHBOR_ADVERT:
10302 			icmp_hlen = sizeof(struct nd_neighbor_solicit);
10303 			/* FALLTHROUGH */
10304 		case ND_ROUTER_SOLICIT:
10305 		case ND_ROUTER_ADVERT:
10306 		case ND_REDIRECT:
10307 			if (pd->ttl != 255) {
10308 				REASON_SET(reason, PFRES_NORM);
10309 				return (PF_DROP);
10310 			}
10311 			break;
10312 		}
10313 		if (icmp_hlen > sizeof(struct icmp6_hdr) &&
10314 		    !pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
10315 			action, reason, af)) {
10316 			*action = PF_DROP;
10317 			REASON_SET(reason, PFRES_SHORT);
10318 			return (-1);
10319 		}
10320 		pd->hdrlen = icmp_hlen;
10321 		pd->pcksum = &pd->hdr.icmp6.icmp6_cksum;
10322 		break;
10323 	}
10324 #endif /* INET6 */
10325 	}
10326 
10327 	if (pd->sport)
10328 		pd->osport = pd->nsport = *pd->sport;
10329 	if (pd->dport)
10330 		pd->odport = pd->ndport = *pd->dport;
10331 
10332 	return (0);
10333 }
10334 
10335 static void
pf_counters_inc(int action,struct pf_pdesc * pd,struct pf_kstate * s,struct pf_krule * r,struct pf_krule * a)10336 pf_counters_inc(int action, struct pf_pdesc *pd,
10337     struct pf_kstate *s, struct pf_krule *r, struct pf_krule *a)
10338 {
10339 	struct pf_krule		*tr;
10340 	int			 dir = pd->dir;
10341 	int			 dirndx;
10342 
10343 	pf_counter_u64_critical_enter();
10344 	pf_counter_u64_add_protected(
10345 	    &pd->kif->pfik_bytes[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
10346 	    pd->tot_len);
10347 	pf_counter_u64_add_protected(
10348 	    &pd->kif->pfik_packets[pd->af == AF_INET6][dir == PF_OUT][action != PF_PASS],
10349 	    1);
10350 
10351 	if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) {
10352 		dirndx = (dir == PF_OUT);
10353 		pf_counter_u64_add_protected(&r->packets[dirndx], 1);
10354 		pf_counter_u64_add_protected(&r->bytes[dirndx], pd->tot_len);
10355 		pf_update_timestamp(r);
10356 
10357 		if (a != NULL) {
10358 			pf_counter_u64_add_protected(&a->packets[dirndx], 1);
10359 			pf_counter_u64_add_protected(&a->bytes[dirndx], pd->tot_len);
10360 		}
10361 		if (s != NULL) {
10362 			struct pf_krule_item	*ri;
10363 
10364 			if (s->nat_rule != NULL) {
10365 				pf_counter_u64_add_protected(&s->nat_rule->packets[dirndx],
10366 				    1);
10367 				pf_counter_u64_add_protected(&s->nat_rule->bytes[dirndx],
10368 				    pd->tot_len);
10369 			}
10370 			/*
10371 			 * Source nodes are accessed unlocked here.
10372 			 * But since we are operating with stateful tracking
10373 			 * and the state is locked, those SNs could not have
10374 			 * been freed.
10375 			 */
10376 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++) {
10377 				if (s->sns[sn_type] != NULL) {
10378 					counter_u64_add(
10379 					    s->sns[sn_type]->packets[dirndx],
10380 					    1);
10381 					counter_u64_add(
10382 					    s->sns[sn_type]->bytes[dirndx],
10383 					    pd->tot_len);
10384 				}
10385 			}
10386 			dirndx = (dir == s->direction) ? 0 : 1;
10387 			s->packets[dirndx]++;
10388 			s->bytes[dirndx] += pd->tot_len;
10389 
10390 			SLIST_FOREACH(ri, &s->match_rules, entry) {
10391 				pf_counter_u64_add_protected(&ri->r->packets[dirndx], 1);
10392 				pf_counter_u64_add_protected(&ri->r->bytes[dirndx], pd->tot_len);
10393 
10394 				if (ri->r->src.addr.type == PF_ADDR_TABLE)
10395 					pfr_update_stats(ri->r->src.addr.p.tbl,
10396 					    (s == NULL) ? pd->src :
10397 					    &s->key[(s->direction == PF_IN)]->
10398 						addr[(s->direction == PF_OUT)],
10399 					    pd->af, pd->tot_len, dir == PF_OUT,
10400 					    r->action == PF_PASS, ri->r->src.neg);
10401 				if (ri->r->dst.addr.type == PF_ADDR_TABLE)
10402 					pfr_update_stats(ri->r->dst.addr.p.tbl,
10403 					    (s == NULL) ? pd->dst :
10404 					    &s->key[(s->direction == PF_IN)]->
10405 						addr[(s->direction == PF_IN)],
10406 					    pd->af, pd->tot_len, dir == PF_OUT,
10407 					    r->action == PF_PASS, ri->r->dst.neg);
10408 			}
10409 		}
10410 
10411 		tr = r;
10412 		if (s != NULL && s->nat_rule != NULL &&
10413 		    r == &V_pf_default_rule)
10414 			tr = s->nat_rule;
10415 
10416 		if (tr->src.addr.type == PF_ADDR_TABLE)
10417 			pfr_update_stats(tr->src.addr.p.tbl,
10418 			    (s == NULL) ? pd->src :
10419 			    &s->key[(s->direction == PF_IN)]->
10420 				addr[(s->direction == PF_OUT)],
10421 			    pd->af, pd->tot_len, dir == PF_OUT,
10422 			    r->action == PF_PASS, tr->src.neg);
10423 		if (tr->dst.addr.type == PF_ADDR_TABLE)
10424 			pfr_update_stats(tr->dst.addr.p.tbl,
10425 			    (s == NULL) ? pd->dst :
10426 			    &s->key[(s->direction == PF_IN)]->
10427 				addr[(s->direction == PF_IN)],
10428 			    pd->af, pd->tot_len, dir == PF_OUT,
10429 			    r->action == PF_PASS, tr->dst.neg);
10430 	}
10431 	pf_counter_u64_critical_exit();
10432 }
10433 static void
pf_log_matches(struct pf_pdesc * pd,struct pf_krule * rm,struct pf_krule * am,struct pf_kruleset * ruleset,struct pf_krule_slist * matchrules)10434 pf_log_matches(struct pf_pdesc *pd, struct pf_krule *rm,
10435     struct pf_krule *am, struct pf_kruleset *ruleset,
10436     struct pf_krule_slist *matchrules)
10437 {
10438 	struct pf_krule_item	*ri;
10439 
10440 	/* if this is the log(matches) rule, packet has been logged already */
10441 	if (rm->log & PF_LOG_MATCHES)
10442 		return;
10443 
10444 	SLIST_FOREACH(ri, matchrules, entry)
10445 		if (ri->r->log & PF_LOG_MATCHES)
10446 			PFLOG_PACKET(rm->action, PFRES_MATCH, rm, am,
10447 			    ruleset, pd, 1, ri->r);
10448 }
10449 
10450 #if defined(INET) || defined(INET6)
10451 int
pf_test(sa_family_t af,int dir,int pflags,struct ifnet * ifp,struct mbuf ** m0,struct inpcb * inp,struct pf_rule_actions * default_actions)10452 pf_test(sa_family_t af, int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
10453     struct inpcb *inp, struct pf_rule_actions *default_actions)
10454 {
10455 	struct pfi_kkif		*kif;
10456 	u_short			 action, reason = 0;
10457 	struct m_tag		*mtag;
10458 	struct pf_krule		*a = NULL, *r = &V_pf_default_rule;
10459 	struct pf_kstate	*s = NULL;
10460 	struct pf_kruleset	*ruleset = NULL;
10461 	struct pf_pdesc		 pd;
10462 	int			 use_2nd_queue = 0;
10463 	uint16_t		 tag;
10464 
10465 	PF_RULES_RLOCK_TRACKER;
10466 	KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
10467 	M_ASSERTPKTHDR(*m0);
10468 
10469 	if (!V_pf_status.running)
10470 		return (PF_PASS);
10471 
10472 	PF_RULES_RLOCK();
10473 
10474 	kif = (struct pfi_kkif *)ifp->if_pf_kif;
10475 
10476 	if (__predict_false(kif == NULL)) {
10477 		DPFPRINTF(PF_DEBUG_URGENT,
10478 		    ("%s: kif == NULL, if_xname %s\n",
10479 		    __func__, ifp->if_xname));
10480 		PF_RULES_RUNLOCK();
10481 		return (PF_DROP);
10482 	}
10483 	if (kif->pfik_flags & PFI_IFLAG_SKIP) {
10484 		PF_RULES_RUNLOCK();
10485 		return (PF_PASS);
10486 	}
10487 
10488 	if ((*m0)->m_flags & M_SKIP_FIREWALL) {
10489 		PF_RULES_RUNLOCK();
10490 		return (PF_PASS);
10491 	}
10492 
10493 	if (__predict_false(! M_WRITABLE(*m0))) {
10494 		*m0 = m_unshare(*m0, M_NOWAIT);
10495 		if (*m0 == NULL) {
10496 			PF_RULES_RUNLOCK();
10497 			return (PF_DROP);
10498 		}
10499 	}
10500 
10501 	pf_init_pdesc(&pd, *m0);
10502 
10503 	if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_MTAG_FLAG_ROUTE_TO)) {
10504 		pd.pf_mtag->flags &= ~PF_MTAG_FLAG_ROUTE_TO;
10505 
10506 		ifp = ifnet_byindexgen(pd.pf_mtag->if_index,
10507 		    pd.pf_mtag->if_idxgen);
10508 		if (ifp == NULL || ifp->if_flags & IFF_DYING) {
10509 			PF_RULES_RUNLOCK();
10510 			m_freem(*m0);
10511 			*m0 = NULL;
10512 			return (PF_PASS);
10513 		}
10514 		PF_RULES_RUNLOCK();
10515 		(ifp->if_output)(ifp, *m0, sintosa(&pd.pf_mtag->dst), NULL);
10516 		*m0 = NULL;
10517 		return (PF_PASS);
10518 	}
10519 
10520 	if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
10521 	    pd.pf_mtag->flags & PF_MTAG_FLAG_DUMMYNET) {
10522 		/* Dummynet re-injects packets after they've
10523 		 * completed their delay. We've already
10524 		 * processed them, so pass unconditionally. */
10525 
10526 		/* But only once. We may see the packet multiple times (e.g.
10527 		 * PFIL_IN/PFIL_OUT). */
10528 		pf_dummynet_flag_remove(pd.m, pd.pf_mtag);
10529 		PF_RULES_RUNLOCK();
10530 
10531 		return (PF_PASS);
10532 	}
10533 
10534 	if (pf_setup_pdesc(af, dir, &pd, m0, &action, &reason,
10535 		kif, default_actions) == -1) {
10536 		if (action != PF_PASS)
10537 			pd.act.log |= PF_LOG_FORCE;
10538 		goto done;
10539 	}
10540 
10541 #ifdef INET
10542 	if (af == AF_INET && dir == PF_OUT && pflags & PFIL_FWD &&
10543 	    pd.df && (*m0)->m_pkthdr.len > ifp->if_mtu) {
10544 		PF_RULES_RUNLOCK();
10545 		icmp_error(*m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
10546 			0, ifp->if_mtu);
10547 		*m0 = NULL;
10548 		return (PF_DROP);
10549 	}
10550 #endif /* INET */
10551 #ifdef INET6
10552 	/*
10553 	 * If we end up changing IP addresses (e.g. binat) the stack may get
10554 	 * confused and fail to send the icmp6 packet too big error. Just send
10555 	 * it here, before we do any NAT.
10556 	 */
10557 	if (af == AF_INET6 && dir == PF_OUT && pflags & PFIL_FWD &&
10558 	    IN6_LINKMTU(ifp) < pf_max_frag_size(*m0)) {
10559 		PF_RULES_RUNLOCK();
10560 		icmp6_error(*m0, ICMP6_PACKET_TOO_BIG, 0, IN6_LINKMTU(ifp));
10561 		*m0 = NULL;
10562 		return (PF_DROP);
10563 	}
10564 #endif /* INET6 */
10565 
10566 	if (__predict_false(ip_divert_ptr != NULL) &&
10567 	    ((mtag = m_tag_locate(pd.m, MTAG_PF_DIVERT, 0, NULL)) != NULL)) {
10568 		struct pf_divert_mtag *dt = (struct pf_divert_mtag *)(mtag+1);
10569 		if ((dt->idir == PF_DIVERT_MTAG_DIR_IN && dir == PF_IN) ||
10570 		    (dt->idir == PF_DIVERT_MTAG_DIR_OUT && dir == PF_OUT)) {
10571 			if (pd.pf_mtag == NULL &&
10572 			    ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) {
10573 				action = PF_DROP;
10574 				goto done;
10575 			}
10576 			pd.pf_mtag->flags |= PF_MTAG_FLAG_PACKET_LOOPED;
10577 		}
10578 		if (pd.pf_mtag && pd.pf_mtag->flags & PF_MTAG_FLAG_FASTFWD_OURS_PRESENT) {
10579 			pd.m->m_flags |= M_FASTFWD_OURS;
10580 			pd.pf_mtag->flags &= ~PF_MTAG_FLAG_FASTFWD_OURS_PRESENT;
10581 		}
10582 		m_tag_delete(pd.m, mtag);
10583 
10584 		mtag = m_tag_locate(pd.m, MTAG_IPFW_RULE, 0, NULL);
10585 		if (mtag != NULL)
10586 			m_tag_delete(pd.m, mtag);
10587 	}
10588 
10589 	switch (pd.virtual_proto) {
10590 	case PF_VPROTO_FRAGMENT:
10591 		/*
10592 		 * handle fragments that aren't reassembled by
10593 		 * normalization
10594 		 */
10595 		if (kif == NULL || r == NULL) /* pflog */
10596 			action = PF_DROP;
10597 		else
10598 			action = pf_test_rule(&r, &s, &pd, &a,
10599 			    &ruleset, &reason, inp);
10600 		if (action != PF_PASS)
10601 			REASON_SET(&reason, PFRES_FRAG);
10602 		break;
10603 
10604 	case IPPROTO_TCP: {
10605 		/* Respond to SYN with a syncookie. */
10606 		if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
10607 		    pd.dir == PF_IN && pf_synflood_check(&pd)) {
10608 			pf_syncookie_send(&pd);
10609 			action = PF_DROP;
10610 			break;
10611 		}
10612 
10613 		if ((tcp_get_flags(&pd.hdr.tcp) & TH_ACK) && pd.p_len == 0)
10614 			use_2nd_queue = 1;
10615 		action = pf_normalize_tcp(&pd);
10616 		if (action == PF_DROP)
10617 			break;
10618 		action = pf_test_state(&s, &pd, &reason);
10619 		if (action == PF_PASS || action == PF_AFRT) {
10620 			if (V_pfsync_update_state_ptr != NULL)
10621 				V_pfsync_update_state_ptr(s);
10622 			r = s->rule;
10623 			a = s->anchor;
10624 		} else if (s == NULL) {
10625 			/* Validate remote SYN|ACK, re-create original SYN if
10626 			 * valid. */
10627 			if ((tcp_get_flags(&pd.hdr.tcp) & (TH_SYN|TH_ACK|TH_RST)) ==
10628 			    TH_ACK && pf_syncookie_validate(&pd) &&
10629 			    pd.dir == PF_IN) {
10630 				struct mbuf *msyn;
10631 
10632 				msyn = pf_syncookie_recreate_syn(&pd);
10633 				if (msyn == NULL) {
10634 					action = PF_DROP;
10635 					break;
10636 				}
10637 
10638 				action = pf_test(af, dir, pflags, ifp, &msyn, inp,
10639 				    &pd.act);
10640 				m_freem(msyn);
10641 				if (action != PF_PASS)
10642 					break;
10643 
10644 				action = pf_test_state(&s, &pd, &reason);
10645 				if (action != PF_PASS || s == NULL) {
10646 					action = PF_DROP;
10647 					break;
10648 				}
10649 
10650 				s->src.seqhi = ntohl(pd.hdr.tcp.th_ack) - 1;
10651 				s->src.seqlo = ntohl(pd.hdr.tcp.th_seq) - 1;
10652 				pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_DST);
10653 				action = pf_synproxy(&pd, s, &reason);
10654 				break;
10655 			} else {
10656 				action = pf_test_rule(&r, &s, &pd,
10657 				    &a, &ruleset, &reason, inp);
10658 			}
10659 		}
10660 		break;
10661 	}
10662 
10663 	case IPPROTO_SCTP:
10664 		action = pf_normalize_sctp(&pd);
10665 		if (action == PF_DROP)
10666 			break;
10667 		/* fallthrough */
10668 	case IPPROTO_UDP:
10669 	default:
10670 		action = pf_test_state(&s, &pd, &reason);
10671 		if (action == PF_PASS || action == PF_AFRT) {
10672 			if (V_pfsync_update_state_ptr != NULL)
10673 				V_pfsync_update_state_ptr(s);
10674 			r = s->rule;
10675 			a = s->anchor;
10676 		} else if (s == NULL) {
10677 			action = pf_test_rule(&r, &s,
10678 			    &pd, &a, &ruleset, &reason, inp);
10679 		}
10680 		break;
10681 
10682 	case IPPROTO_ICMP:
10683 	case IPPROTO_ICMPV6: {
10684 		if (pd.virtual_proto == IPPROTO_ICMP && af != AF_INET) {
10685 			action = PF_DROP;
10686 			REASON_SET(&reason, PFRES_NORM);
10687 			DPFPRINTF(PF_DEBUG_MISC,
10688 			    ("dropping IPv6 packet with ICMPv4 payload"));
10689 			break;
10690 		}
10691 		if (pd.virtual_proto == IPPROTO_ICMPV6 && af != AF_INET6) {
10692 			action = PF_DROP;
10693 			REASON_SET(&reason, PFRES_NORM);
10694 			DPFPRINTF(PF_DEBUG_MISC,
10695 			    ("pf: dropping IPv4 packet with ICMPv6 payload\n"));
10696 			break;
10697 		}
10698 		action = pf_test_state_icmp(&s, &pd, &reason);
10699 		if (action == PF_PASS || action == PF_AFRT) {
10700 			if (V_pfsync_update_state_ptr != NULL)
10701 				V_pfsync_update_state_ptr(s);
10702 			r = s->rule;
10703 			a = s->anchor;
10704 		} else if (s == NULL)
10705 			action = pf_test_rule(&r, &s, &pd,
10706 			    &a, &ruleset, &reason, inp);
10707 		break;
10708 	}
10709 
10710 	}
10711 
10712 done:
10713 	PF_RULES_RUNLOCK();
10714 
10715 	if (pd.m == NULL)
10716 		goto eat_pkt;
10717 
10718 	if (s)
10719 		memcpy(&pd.act, &s->act, sizeof(s->act));
10720 
10721 	if (action == PF_PASS && pd.badopts && !pd.act.allow_opts) {
10722 		action = PF_DROP;
10723 		REASON_SET(&reason, PFRES_IPOPTIONS);
10724 		pd.act.log = PF_LOG_FORCE;
10725 		DPFPRINTF(PF_DEBUG_MISC,
10726 		    ("pf: dropping packet with dangerous headers\n"));
10727 	}
10728 
10729 	if (pd.act.max_pkt_size && pd.act.max_pkt_size &&
10730 	    pd.tot_len > pd.act.max_pkt_size) {
10731 		action = PF_DROP;
10732 		REASON_SET(&reason, PFRES_NORM);
10733 		pd.act.log = PF_LOG_FORCE;
10734 		DPFPRINTF(PF_DEBUG_MISC,
10735 		    ("pf: dropping overly long packet\n"));
10736 	}
10737 
10738 	if (s) {
10739 		uint8_t log = pd.act.log;
10740 		memcpy(&pd.act, &s->act, sizeof(struct pf_rule_actions));
10741 		pd.act.log |= log;
10742 		tag = s->tag;
10743 	} else {
10744 		tag = r->tag;
10745 	}
10746 
10747 	if (tag > 0 && pf_tag_packet(&pd, tag)) {
10748 		action = PF_DROP;
10749 		REASON_SET(&reason, PFRES_MEMORY);
10750 	}
10751 
10752 	pf_scrub(&pd);
10753 	if (pd.proto == IPPROTO_TCP && pd.act.max_mss)
10754 		pf_normalize_mss(&pd);
10755 
10756 	if (pd.act.rtableid >= 0)
10757 		M_SETFIB(pd.m, pd.act.rtableid);
10758 
10759 	if (pd.act.flags & PFSTATE_SETPRIO) {
10760 		if (pd.tos & IPTOS_LOWDELAY)
10761 			use_2nd_queue = 1;
10762 		if (vlan_set_pcp(pd.m, pd.act.set_prio[use_2nd_queue])) {
10763 			action = PF_DROP;
10764 			REASON_SET(&reason, PFRES_MEMORY);
10765 			pd.act.log = PF_LOG_FORCE;
10766 			DPFPRINTF(PF_DEBUG_MISC,
10767 			    ("pf: failed to allocate 802.1q mtag\n"));
10768 		}
10769 	}
10770 
10771 #ifdef ALTQ
10772 	if (action == PF_PASS && pd.act.qid) {
10773 		if (pd.pf_mtag == NULL &&
10774 		    ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) {
10775 			action = PF_DROP;
10776 			REASON_SET(&reason, PFRES_MEMORY);
10777 		} else {
10778 			if (s != NULL)
10779 				pd.pf_mtag->qid_hash = pf_state_hash(s);
10780 			if (use_2nd_queue || (pd.tos & IPTOS_LOWDELAY))
10781 				pd.pf_mtag->qid = pd.act.pqid;
10782 			else
10783 				pd.pf_mtag->qid = pd.act.qid;
10784 			/* Add hints for ecn. */
10785 			pd.pf_mtag->hdr = mtod(pd.m, void *);
10786 		}
10787 	}
10788 #endif /* ALTQ */
10789 
10790 	/*
10791 	 * connections redirected to loopback should not match sockets
10792 	 * bound specifically to loopback due to security implications,
10793 	 * see tcp_input() and in_pcblookup_listen().
10794 	 */
10795 	if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP ||
10796 	    pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule != NULL &&
10797 	    (s->nat_rule->action == PF_RDR ||
10798 	    s->nat_rule->action == PF_BINAT) &&
10799 	    pf_is_loopback(af, pd.dst))
10800 		pd.m->m_flags |= M_SKIP_FIREWALL;
10801 
10802 	if (af == AF_INET && __predict_false(ip_divert_ptr != NULL) &&
10803 	    action == PF_PASS && r->divert.port && !PACKET_LOOPED(&pd)) {
10804 		mtag = m_tag_alloc(MTAG_PF_DIVERT, 0,
10805 		    sizeof(struct pf_divert_mtag), M_NOWAIT | M_ZERO);
10806 		if (mtag != NULL) {
10807 			((struct pf_divert_mtag *)(mtag+1))->port =
10808 			    ntohs(r->divert.port);
10809 			((struct pf_divert_mtag *)(mtag+1))->idir =
10810 			    (dir == PF_IN) ? PF_DIVERT_MTAG_DIR_IN :
10811 			    PF_DIVERT_MTAG_DIR_OUT;
10812 
10813 			if (s)
10814 				PF_STATE_UNLOCK(s);
10815 
10816 			m_tag_prepend(pd.m, mtag);
10817 			if (pd.m->m_flags & M_FASTFWD_OURS) {
10818 				if (pd.pf_mtag == NULL &&
10819 				    ((pd.pf_mtag = pf_get_mtag(pd.m)) == NULL)) {
10820 					action = PF_DROP;
10821 					REASON_SET(&reason, PFRES_MEMORY);
10822 					pd.act.log = PF_LOG_FORCE;
10823 					DPFPRINTF(PF_DEBUG_MISC,
10824 					    ("pf: failed to allocate tag\n"));
10825 				} else {
10826 					pd.pf_mtag->flags |=
10827 					    PF_MTAG_FLAG_FASTFWD_OURS_PRESENT;
10828 					pd.m->m_flags &= ~M_FASTFWD_OURS;
10829 				}
10830 			}
10831 			ip_divert_ptr(*m0, dir == PF_IN);
10832 			*m0 = NULL;
10833 
10834 			return (action);
10835 		} else {
10836 			/* XXX: ipfw has the same behaviour! */
10837 			action = PF_DROP;
10838 			REASON_SET(&reason, PFRES_MEMORY);
10839 			pd.act.log = PF_LOG_FORCE;
10840 			DPFPRINTF(PF_DEBUG_MISC,
10841 			    ("pf: failed to allocate divert tag\n"));
10842 		}
10843 	}
10844 	/* XXX: Anybody working on it?! */
10845 	if (af == AF_INET6 && r->divert.port)
10846 		printf("pf: divert(9) is not supported for IPv6\n");
10847 
10848 	/* this flag will need revising if the pkt is forwarded */
10849 	if (pd.pf_mtag)
10850 		pd.pf_mtag->flags &= ~PF_MTAG_FLAG_PACKET_LOOPED;
10851 
10852 	if (pd.act.log) {
10853 		struct pf_krule		*lr;
10854 		struct pf_krule_item	*ri;
10855 
10856 		if (s != NULL && s->nat_rule != NULL &&
10857 		    s->nat_rule->log & PF_LOG_ALL)
10858 			lr = s->nat_rule;
10859 		else
10860 			lr = r;
10861 
10862 		if (pd.act.log & PF_LOG_FORCE || lr->log & PF_LOG_ALL)
10863 			PFLOG_PACKET(action, reason, lr, a,
10864 			    ruleset, &pd, (s == NULL), NULL);
10865 		if (s) {
10866 			SLIST_FOREACH(ri, &s->match_rules, entry)
10867 				if (ri->r->log & PF_LOG_ALL)
10868 					PFLOG_PACKET(action,
10869 					    reason, ri->r, a, ruleset, &pd, 0, NULL);
10870 		}
10871 	}
10872 
10873 	pf_counters_inc(action, &pd, s, r, a);
10874 
10875 	switch (action) {
10876 	case PF_SYNPROXY_DROP:
10877 		m_freem(*m0);
10878 	case PF_DEFER:
10879 		*m0 = NULL;
10880 		action = PF_PASS;
10881 		break;
10882 	case PF_DROP:
10883 		m_freem(*m0);
10884 		*m0 = NULL;
10885 		break;
10886 	case PF_AFRT:
10887 		if (pf_translate_af(&pd)) {
10888 			*m0 = pd.m;
10889 			action = PF_DROP;
10890 			break;
10891 		}
10892 #ifdef INET
10893 		if (pd.naf == AF_INET)
10894 			pf_route(r, kif->pfik_ifp, s, &pd, inp);
10895 #endif /* INET */
10896 #ifdef INET6
10897 		if (pd.naf == AF_INET6)
10898 			pf_route6(r, kif->pfik_ifp, s, &pd, inp);
10899 #endif /* INET6 */
10900 		*m0 = pd.m;
10901 		action = PF_PASS;
10902 		goto out;
10903 		break;
10904 	default:
10905 		if (pd.act.rt) {
10906 			switch (af) {
10907 #ifdef INET
10908 			case AF_INET:
10909 				/* pf_route() returns unlocked. */
10910 				pf_route(r, kif->pfik_ifp, s, &pd, inp);
10911 				break;
10912 #endif /* INET */
10913 #ifdef INET6
10914 			case AF_INET6:
10915 				/* pf_route6() returns unlocked. */
10916 				pf_route6(r, kif->pfik_ifp, s, &pd, inp);
10917 				break;
10918 #endif /* INET6 */
10919 			}
10920 			*m0 = pd.m;
10921 			goto out;
10922 		}
10923 		if (pf_dummynet(&pd, s, r, m0) != 0) {
10924 			action = PF_DROP;
10925 			REASON_SET(&reason, PFRES_MEMORY);
10926 		}
10927 		break;
10928 	}
10929 
10930 eat_pkt:
10931 	SDT_PROBE4(pf, ip, test, done, action, reason, r, s);
10932 
10933 	if (s && action != PF_DROP) {
10934 		if (!s->if_index_in && dir == PF_IN)
10935 			s->if_index_in = ifp->if_index;
10936 		else if (!s->if_index_out && dir == PF_OUT)
10937 			s->if_index_out = ifp->if_index;
10938 	}
10939 
10940 	if (s)
10941 		PF_STATE_UNLOCK(s);
10942 
10943 out:
10944 #ifdef INET6
10945 	/* If reassembled packet passed, create new fragments. */
10946 	if (af == AF_INET6 && action == PF_PASS && *m0 && dir == PF_OUT &&
10947 	    (! (pflags & PF_PFIL_NOREFRAGMENT)) &&
10948 	    (mtag = m_tag_find(pd.m, PACKET_TAG_PF_REASSEMBLED, NULL)) != NULL)
10949 		action = pf_refragment6(ifp, m0, mtag, NULL, pflags & PFIL_FWD);
10950 #endif /* INET6 */
10951 
10952 	pf_sctp_multihome_delayed(&pd, kif, s, action);
10953 
10954 	return (action);
10955 }
10956 #endif /* INET || INET6 */
10957