xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision ae96ff302f8ae50903a96d3a1857f9acf243f3c4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static void		 pf_qid_unref(uint16_t);
120 #endif /* ALTQ */
121 static int		 pf_begin_rules(u_int32_t *, int, const char *);
122 static int		 pf_rollback_rules(u_int32_t, int, char *);
123 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
124 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
125 static void		 pf_hash_rule(struct pf_krule *);
126 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
127 static int		 pf_commit_rules(u_int32_t, int, char *);
128 static int		 pf_addr_setup(struct pf_kruleset *,
129 			    struct pf_addr_wrap *, sa_family_t);
130 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
131 			    struct pf_src_node *);
132 #ifdef ALTQ
133 static int		 pf_export_kaltq(struct pf_altq *,
134 			    struct pfioc_altq_v1 *, size_t);
135 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
136 			    struct pf_altq *, size_t);
137 #endif /* ALTQ */
138 
139 VNET_DEFINE(struct pf_krule,	pf_default_rule);
140 
141 static __inline int             pf_krule_compare(struct pf_krule *,
142 				    struct pf_krule *);
143 
144 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
145 
146 #ifdef ALTQ
147 VNET_DEFINE_STATIC(int,		pf_altq_running);
148 #define	V_pf_altq_running	VNET(pf_altq_running)
149 #endif
150 
151 #define	TAGID_MAX	 50000
152 struct pf_tagname {
153 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
154 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
155 	char			name[PF_TAG_NAME_SIZE];
156 	uint16_t		tag;
157 	int			ref;
158 };
159 
160 struct pf_tagset {
161 	TAILQ_HEAD(, pf_tagname)	*namehash;
162 	TAILQ_HEAD(, pf_tagname)	*taghash;
163 	unsigned int			 mask;
164 	uint32_t			 seed;
165 	BITSET_DEFINE(, TAGID_MAX)	 avail;
166 };
167 
168 VNET_DEFINE(struct pf_tagset, pf_tags);
169 #define	V_pf_tags	VNET(pf_tags)
170 static unsigned int	pf_rule_tag_hashsize;
171 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
172 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
173     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
174     "Size of pf(4) rule tag hashtable");
175 
176 #ifdef ALTQ
177 VNET_DEFINE(struct pf_tagset, pf_qids);
178 #define	V_pf_qids	VNET(pf_qids)
179 static unsigned int	pf_queue_tag_hashsize;
180 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
181 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
182     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
183     "Size of pf(4) queue tag hashtable");
184 #endif
185 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
186 #define	V_pf_tag_z		 VNET(pf_tag_z)
187 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
188 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
189 MALLOC_DEFINE(M_PF, "pf", "pf(4)");
190 
191 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
192 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
193 #endif
194 
195 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
196 #define V_pf_filter_local	VNET(pf_filter_local)
197 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
198     &VNET_NAME(pf_filter_local), false,
199     "Enable filtering for packets delivered to local network stack");
200 
201 #ifdef PF_DEFAULT_TO_DROP
202 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
203 #else
204 VNET_DEFINE_STATIC(bool, default_to_drop);
205 #endif
206 #define	V_default_to_drop VNET(default_to_drop)
207 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
208     &VNET_NAME(default_to_drop), false,
209     "Make the default rule drop all packets.");
210 
211 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
212 			    unsigned int);
213 static void		 pf_cleanup_tagset(struct pf_tagset *);
214 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
215 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
216 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *, bool);
217 static void		 tag_unref(struct pf_tagset *, u_int16_t);
218 
219 struct cdev *pf_dev;
220 
221 /*
222  * XXX - These are new and need to be checked when moveing to a new version
223  */
224 static void		 pf_clear_all_states(void);
225 static int		 pf_killstates_row(struct pf_kstate_kill *,
226 			    struct pf_idhash *);
227 static int		 pf_killstates_nv(struct pfioc_nv *);
228 static int		 pf_clearstates_nv(struct pfioc_nv *);
229 static int		 pf_getstate(struct pfioc_nv *);
230 static int		 pf_getstatus(struct pfioc_nv *);
231 static int		 pf_clear_tables(void);
232 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
233 static int		 pf_keepcounters(struct pfioc_nv *);
234 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
235 
236 /*
237  * Wrapper functions for pfil(9) hooks
238  */
239 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
240     int flags, void *ruleset __unused, struct inpcb *inp);
241 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
242     int flags, void *ruleset __unused, struct inpcb *inp);
243 #ifdef INET
244 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
245     int flags, void *ruleset __unused, struct inpcb *inp);
246 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 #endif
249 #ifdef INET6
250 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
253     int flags, void *ruleset __unused, struct inpcb *inp);
254 #endif
255 
256 static void		hook_pf_eth(void);
257 static void		hook_pf(void);
258 static void		dehook_pf_eth(void);
259 static void		dehook_pf(void);
260 static int		shutdown_pf(void);
261 static int		pf_load(void);
262 static void		pf_unload(void *);
263 
264 static struct cdevsw pf_cdevsw = {
265 	.d_ioctl =	pfioctl,
266 	.d_name =	PF_NAME,
267 	.d_version =	D_VERSION,
268 };
269 
270 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
271 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
272 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
273 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
274 
275 /*
276  * We need a flag that is neither hooked nor running to know when
277  * the VNET is "valid".  We primarily need this to control (global)
278  * external event, e.g., eventhandlers.
279  */
280 VNET_DEFINE(int, pf_vnet_active);
281 #define V_pf_vnet_active	VNET(pf_vnet_active)
282 
283 int pf_end_threads;
284 struct proc *pf_purge_proc;
285 
286 VNET_DEFINE(struct rmlock, pf_rules_lock);
287 VNET_DEFINE(struct rmlock, pf_tags_lock);
288 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
289 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
290 struct sx			pf_end_lock;
291 
292 /* pfsync */
293 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
294 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
295 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
296 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
297 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
298 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
299 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
300 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
301 
302 /* pflog */
303 pflog_packet_t			*pflog_packet_ptr = NULL;
304 
305 /*
306  * Copy a user-provided string, returning an error if truncation would occur.
307  * Avoid scanning past "sz" bytes in the source string since there's no
308  * guarantee that it's nul-terminated.
309  */
310 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)311 pf_user_strcpy(char *dst, const char *src, size_t sz)
312 {
313 	if (strnlen(src, sz) == sz)
314 		return (EINVAL);
315 	(void)strlcpy(dst, src, sz);
316 	return (0);
317 }
318 
319 static void
pfattach_vnet(void)320 pfattach_vnet(void)
321 {
322 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
323 
324 	bzero(&V_pf_status, sizeof(V_pf_status));
325 
326 	pf_initialize();
327 	pfr_initialize();
328 	pfi_initialize_vnet();
329 	pf_normalize_init();
330 	pf_syncookies_init();
331 
332 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
333 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
334 	V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
335 	V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
336 
337 	RB_INIT(&V_pf_anchors);
338 	pf_init_kruleset(&pf_main_ruleset);
339 
340 	pf_init_keth(V_pf_keth);
341 
342 	/* default rule should never be garbage collected */
343 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
344 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
345 	V_pf_default_rule.nr = (uint32_t)-1;
346 	V_pf_default_rule.rtableid = -1;
347 
348 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
349 	for (int i = 0; i < 2; i++) {
350 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
351 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
352 	}
353 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
354 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
355 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
356 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
357 
358 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
359 	    M_WAITOK | M_ZERO);
360 
361 #ifdef PF_WANT_32_TO_64_COUNTER
362 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
363 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
364 	PF_RULES_WLOCK();
365 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
367 	V_pf_allrulecount++;
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
369 	PF_RULES_WUNLOCK();
370 #endif
371 
372 	/* initialize default timeouts */
373 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
374 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
375 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
376 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
377 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
378 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
379 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
380 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
381 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
382 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
383 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
384 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
385 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
386 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
387 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
389 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
391 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
392 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
393 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
394 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
395 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
396 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
397 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
398 
399 	V_pf_status.debug = PF_DEBUG_URGENT;
400 	/*
401 	 * XXX This is different than in OpenBSD where reassembly is enabled by
402 	 * defult. In FreeBSD we expect people to still use scrub rules and
403 	 * switch to the new syntax later. Only when they switch they must
404 	 * explicitly enable reassemle. We could change the default once the
405 	 * scrub rule functionality is hopefully removed some day in future.
406 	 */
407 	V_pf_status.reass = 0;
408 
409 	V_pf_pfil_hooked = false;
410 	V_pf_pfil_eth_hooked = false;
411 
412 	/* XXX do our best to avoid a conflict */
413 	V_pf_status.hostid = arc4random();
414 
415 	for (int i = 0; i < PFRES_MAX; i++)
416 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < KLCNT_MAX; i++)
418 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < FCNT_MAX; i++)
420 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
421 	for (int i = 0; i < SCNT_MAX; i++)
422 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
423 	for (int i = 0; i < NCNT_MAX; i++)
424 		V_pf_status.ncounters[i] = counter_u64_alloc(M_WAITOK);
425 
426 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
427 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
428 		/* XXXGL: leaked all above. */
429 		return;
430 }
431 
432 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)433 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
434     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
435     u_int8_t check_ticket, int which)
436 {
437 	struct pf_kruleset	*ruleset;
438 	struct pf_krule		*rule;
439 	int			 rs_num;
440 
441 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
442 
443 	ruleset = pf_find_kruleset(anchor);
444 	if (ruleset == NULL)
445 		return (NULL);
446 	rs_num = pf_get_ruleset_number(rule_action);
447 	if (rs_num >= PF_RULESET_MAX)
448 		return (NULL);
449 	if (active) {
450 		if (check_ticket && ticket !=
451 		    ruleset->rules[rs_num].active.ticket)
452 			return (NULL);
453 		if (r_last)
454 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
455 			    pf_krulequeue);
456 		else
457 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
458 	} else {
459 		if (check_ticket && ticket !=
460 		    ruleset->rules[rs_num].inactive.ticket)
461 			return (NULL);
462 		if (r_last)
463 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
464 			    pf_krulequeue);
465 		else
466 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
467 	}
468 	if (!r_last) {
469 		while ((rule != NULL) && (rule->nr != rule_number))
470 			rule = TAILQ_NEXT(rule, entries);
471 	}
472 	if (rule == NULL)
473 		return (NULL);
474 
475 	switch (which) {
476 	case PF_RDR:
477 		return (&rule->rdr);
478 	case PF_NAT:
479 		return (&rule->nat);
480 	case PF_RT:
481 		return (&rule->route);
482 	default:
483 		panic("Unknow pool type %d", which);
484 	}
485 }
486 
487 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)488 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
489 {
490 	struct pf_kpooladdr	*mv_pool_pa;
491 
492 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
493 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
494 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
495 	}
496 }
497 
498 static void
pf_empty_kpool(struct pf_kpalist * poola)499 pf_empty_kpool(struct pf_kpalist *poola)
500 {
501 	struct pf_kpooladdr *pa;
502 
503 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
504 		switch (pa->addr.type) {
505 		case PF_ADDR_DYNIFTL:
506 			pfi_dynaddr_remove(pa->addr.p.dyn);
507 			break;
508 		case PF_ADDR_TABLE:
509 			/* XXX: this could be unfinished pooladdr on pabuf */
510 			if (pa->addr.p.tbl != NULL)
511 				pfr_detach_table(pa->addr.p.tbl);
512 			break;
513 		}
514 		if (pa->kif)
515 			pfi_kkif_unref(pa->kif);
516 		TAILQ_REMOVE(poola, pa, entries);
517 		free(pa, M_PFRULE);
518 	}
519 }
520 
521 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)522 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
523 {
524 
525 	PF_RULES_WASSERT();
526 	PF_UNLNKDRULES_ASSERT();
527 
528 	TAILQ_REMOVE(rulequeue, rule, entries);
529 
530 	rule->rule_ref |= PFRULE_REFS;
531 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
532 }
533 
534 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)535 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
536 {
537 
538 	PF_RULES_WASSERT();
539 
540 	PF_UNLNKDRULES_LOCK();
541 	pf_unlink_rule_locked(rulequeue, rule);
542 	PF_UNLNKDRULES_UNLOCK();
543 }
544 
545 static void
pf_free_eth_rule(struct pf_keth_rule * rule)546 pf_free_eth_rule(struct pf_keth_rule *rule)
547 {
548 	PF_RULES_WASSERT();
549 
550 	if (rule == NULL)
551 		return;
552 
553 	if (rule->tag)
554 		tag_unref(&V_pf_tags, rule->tag);
555 	if (rule->match_tag)
556 		tag_unref(&V_pf_tags, rule->match_tag);
557 #ifdef ALTQ
558 	pf_qid_unref(rule->qid);
559 #endif
560 
561 	if (rule->bridge_to)
562 		pfi_kkif_unref(rule->bridge_to);
563 	if (rule->kif)
564 		pfi_kkif_unref(rule->kif);
565 
566 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
567 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
568 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
569 		pfr_detach_table(rule->ipdst.addr.p.tbl);
570 
571 	counter_u64_free(rule->evaluations);
572 	for (int i = 0; i < 2; i++) {
573 		counter_u64_free(rule->packets[i]);
574 		counter_u64_free(rule->bytes[i]);
575 	}
576 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
577 	pf_keth_anchor_remove(rule);
578 
579 	free(rule, M_PFRULE);
580 }
581 
582 void
pf_free_rule(struct pf_krule * rule)583 pf_free_rule(struct pf_krule *rule)
584 {
585 
586 	PF_RULES_WASSERT();
587 	PF_CONFIG_ASSERT();
588 
589 	if (rule->tag)
590 		tag_unref(&V_pf_tags, rule->tag);
591 	if (rule->match_tag)
592 		tag_unref(&V_pf_tags, rule->match_tag);
593 #ifdef ALTQ
594 	if (rule->pqid != rule->qid)
595 		pf_qid_unref(rule->pqid);
596 	pf_qid_unref(rule->qid);
597 #endif
598 	switch (rule->src.addr.type) {
599 	case PF_ADDR_DYNIFTL:
600 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
601 		break;
602 	case PF_ADDR_TABLE:
603 		pfr_detach_table(rule->src.addr.p.tbl);
604 		break;
605 	}
606 	switch (rule->dst.addr.type) {
607 	case PF_ADDR_DYNIFTL:
608 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
609 		break;
610 	case PF_ADDR_TABLE:
611 		pfr_detach_table(rule->dst.addr.p.tbl);
612 		break;
613 	}
614 	if (rule->overload_tbl)
615 		pfr_detach_table(rule->overload_tbl);
616 	if (rule->kif)
617 		pfi_kkif_unref(rule->kif);
618 	if (rule->rcv_kif)
619 		pfi_kkif_unref(rule->rcv_kif);
620 	pf_remove_kanchor(rule);
621 	pf_empty_kpool(&rule->rdr.list);
622 	pf_empty_kpool(&rule->nat.list);
623 	pf_empty_kpool(&rule->route.list);
624 
625 	pf_krule_free(rule);
626 }
627 
628 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)629 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
630     unsigned int default_size)
631 {
632 	unsigned int i;
633 	unsigned int hashsize;
634 
635 	if (*tunable_size == 0 || !powerof2(*tunable_size))
636 		*tunable_size = default_size;
637 
638 	hashsize = *tunable_size;
639 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
640 	    M_WAITOK);
641 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
642 	    M_WAITOK);
643 	ts->mask = hashsize - 1;
644 	ts->seed = arc4random();
645 	for (i = 0; i < hashsize; i++) {
646 		TAILQ_INIT(&ts->namehash[i]);
647 		TAILQ_INIT(&ts->taghash[i]);
648 	}
649 	BIT_FILL(TAGID_MAX, &ts->avail);
650 }
651 
652 static void
pf_cleanup_tagset(struct pf_tagset * ts)653 pf_cleanup_tagset(struct pf_tagset *ts)
654 {
655 	unsigned int i;
656 	unsigned int hashsize;
657 	struct pf_tagname *t, *tmp;
658 
659 	/*
660 	 * Only need to clean up one of the hashes as each tag is hashed
661 	 * into each table.
662 	 */
663 	hashsize = ts->mask + 1;
664 	for (i = 0; i < hashsize; i++)
665 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
666 			uma_zfree(V_pf_tag_z, t);
667 
668 	free(ts->namehash, M_PFHASH);
669 	free(ts->taghash, M_PFHASH);
670 }
671 
672 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)673 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
674 {
675 	size_t len;
676 
677 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
678 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
679 }
680 
681 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)682 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
683 {
684 
685 	return (tag & ts->mask);
686 }
687 
688 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname,bool add_new)689 tagname2tag(struct pf_tagset *ts, const char *tagname, bool add_new)
690 {
691 	struct pf_tagname	*tag;
692 	u_int32_t		 index;
693 	u_int16_t		 new_tagid;
694 
695 	PF_TAGS_RLOCK_TRACKER;
696 
697 	PF_TAGS_RLOCK();
698 
699 	index = tagname2hashindex(ts, tagname);
700 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
701 		if (strcmp(tagname, tag->name) == 0) {
702 			tag->ref++;
703 			new_tagid = tag->tag;
704 			PF_TAGS_RUNLOCK();
705 			return (new_tagid);
706 		}
707 
708 	/*
709 	 * When used for pfsync with queues we must not create new entries.
710 	 * Pf tags can be created just fine by this function, but queues
711 	 * require additional configuration. If they are missing on the target
712 	 * system we just ignore them
713 	 */
714 	if (add_new == false) {
715 		printf("%s: Not creating a new tag\n", __func__);
716 		PF_TAGS_RUNLOCK();
717 		return (0);
718 	}
719 
720 	/*
721 	 * If a new entry must be created do it under a write lock.
722 	 * But first search again, somebody could have created the tag
723 	 * between unlocking the read lock and locking the write lock.
724 	 */
725 	PF_TAGS_RUNLOCK();
726 	PF_TAGS_WLOCK();
727 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
728 		if (strcmp(tagname, tag->name) == 0) {
729 			tag->ref++;
730 			new_tagid = tag->tag;
731 			PF_TAGS_WUNLOCK();
732 			return (new_tagid);
733 		}
734 
735 	/*
736 	 * new entry
737 	 *
738 	 * to avoid fragmentation, we do a linear search from the beginning
739 	 * and take the first free slot we find.
740 	 */
741 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
742 	/*
743 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
744 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
745 	 * set.  It may also return a bit number greater than TAGID_MAX due
746 	 * to rounding of the number of bits in the vector up to a multiple
747 	 * of the vector word size at declaration/allocation time.
748 	 */
749 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) {
750 		PF_TAGS_WUNLOCK();
751 		return (0);
752 	}
753 
754 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
755 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
756 
757 	/* allocate and fill new struct pf_tagname */
758 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
759 	if (tag == NULL) {
760 		PF_TAGS_WUNLOCK();
761 		return (0);
762 	}
763 	strlcpy(tag->name, tagname, sizeof(tag->name));
764 	tag->tag = new_tagid;
765 	tag->ref = 1;
766 
767 	/* Insert into namehash */
768 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
769 
770 	/* Insert into taghash */
771 	index = tag2hashindex(ts, new_tagid);
772 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
773 
774 	PF_TAGS_WUNLOCK();
775 	return (new_tagid);
776 }
777 
778 static char *
tag2tagname(struct pf_tagset * ts,u_int16_t tag)779 tag2tagname(struct pf_tagset *ts, u_int16_t tag)
780 {
781 	struct pf_tagname	*t;
782 	uint16_t		 index;
783 
784 	PF_TAGS_RLOCK_TRACKER;
785 
786 	PF_TAGS_RLOCK();
787 
788 	index = tag2hashindex(ts, tag);
789 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
790 		if (tag == t->tag) {
791 			PF_TAGS_RUNLOCK();
792 			return (t->name);
793 		}
794 
795 	PF_TAGS_RUNLOCK();
796 	return (NULL);
797 }
798 
799 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)800 tag_unref(struct pf_tagset *ts, u_int16_t tag)
801 {
802 	struct pf_tagname	*t;
803 	uint16_t		 index;
804 
805 	PF_TAGS_WLOCK();
806 
807 	index = tag2hashindex(ts, tag);
808 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
809 		if (tag == t->tag) {
810 			if (--t->ref == 0) {
811 				TAILQ_REMOVE(&ts->taghash[index], t,
812 				    taghash_entries);
813 				index = tagname2hashindex(ts, t->name);
814 				TAILQ_REMOVE(&ts->namehash[index], t,
815 				    namehash_entries);
816 				/* Bits are 0-based for BIT_SET() */
817 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
818 				uma_zfree(V_pf_tag_z, t);
819 			}
820 			break;
821 		}
822 
823 	PF_TAGS_WUNLOCK();
824 }
825 
826 uint16_t
pf_tagname2tag(const char * tagname)827 pf_tagname2tag(const char *tagname)
828 {
829 	return (tagname2tag(&V_pf_tags, tagname, true));
830 }
831 
832 static const char *
pf_tag2tagname(uint16_t tag)833 pf_tag2tagname(uint16_t tag)
834 {
835 	return (tag2tagname(&V_pf_tags, tag));
836 }
837 
838 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)839 pf_begin_eth(uint32_t *ticket, const char *anchor)
840 {
841 	struct pf_keth_rule *rule, *tmp;
842 	struct pf_keth_ruleset *rs;
843 
844 	PF_RULES_WASSERT();
845 
846 	rs = pf_find_or_create_keth_ruleset(anchor);
847 	if (rs == NULL)
848 		return (EINVAL);
849 
850 	/* Purge old inactive rules. */
851 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
852 	    tmp) {
853 		TAILQ_REMOVE(rs->inactive.rules, rule,
854 		    entries);
855 		pf_free_eth_rule(rule);
856 	}
857 
858 	*ticket = ++rs->inactive.ticket;
859 	rs->inactive.open = 1;
860 
861 	return (0);
862 }
863 
864 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)865 pf_rollback_eth(uint32_t ticket, const char *anchor)
866 {
867 	struct pf_keth_rule *rule, *tmp;
868 	struct pf_keth_ruleset *rs;
869 
870 	PF_RULES_WASSERT();
871 
872 	rs = pf_find_keth_ruleset(anchor);
873 	if (rs == NULL)
874 		return (EINVAL);
875 
876 	if (!rs->inactive.open ||
877 	    ticket != rs->inactive.ticket)
878 		return (0);
879 
880 	/* Purge old inactive rules. */
881 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
882 	    tmp) {
883 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
884 		pf_free_eth_rule(rule);
885 	}
886 
887 	rs->inactive.open = 0;
888 
889 	pf_remove_if_empty_keth_ruleset(rs);
890 
891 	return (0);
892 }
893 
894 #define	PF_SET_SKIP_STEPS(i)					\
895 	do {							\
896 		while (head[i] != cur) {			\
897 			head[i]->skip[i].ptr = cur;		\
898 			head[i] = TAILQ_NEXT(head[i], entries);	\
899 		}						\
900 	} while (0)
901 
902 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)903 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
904 {
905 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
906 	int i;
907 
908 	cur = TAILQ_FIRST(rules);
909 	prev = cur;
910 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
911 		head[i] = cur;
912 	while (cur != NULL) {
913 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
914 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
915 		if (cur->direction != prev->direction)
916 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
917 		if (cur->proto != prev->proto)
918 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
919 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
920 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
921 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
922 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
923 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
924 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
925 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
926 		if (cur->ipdst.neg != prev->ipdst.neg ||
927 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
928 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
929 
930 		prev = cur;
931 		cur = TAILQ_NEXT(cur, entries);
932 	}
933 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
934 		PF_SET_SKIP_STEPS(i);
935 }
936 
937 static int
pf_commit_eth(uint32_t ticket,const char * anchor)938 pf_commit_eth(uint32_t ticket, const char *anchor)
939 {
940 	struct pf_keth_ruleq *rules;
941 	struct pf_keth_ruleset *rs;
942 
943 	rs = pf_find_keth_ruleset(anchor);
944 	if (rs == NULL) {
945 		return (EINVAL);
946 	}
947 
948 	if (!rs->inactive.open ||
949 	    ticket != rs->inactive.ticket)
950 		return (EBUSY);
951 
952 	PF_RULES_WASSERT();
953 
954 	pf_eth_calc_skip_steps(rs->inactive.rules);
955 
956 	rules = rs->active.rules;
957 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
958 	rs->inactive.rules = rules;
959 	rs->inactive.ticket = rs->active.ticket;
960 
961 	return (pf_rollback_eth(rs->inactive.ticket,
962 	    rs->anchor ? rs->anchor->path : ""));
963 }
964 
965 #ifdef ALTQ
966 uint16_t
pf_qname2qid(const char * qname,bool add_new)967 pf_qname2qid(const char *qname, bool add_new)
968 {
969 	return (tagname2tag(&V_pf_qids, qname, add_new));
970 }
971 
972 static const char *
pf_qid2qname(uint16_t qid)973 pf_qid2qname(uint16_t qid)
974 {
975 	return (tag2tagname(&V_pf_qids, qid));
976 }
977 
978 static void
pf_qid_unref(uint16_t qid)979 pf_qid_unref(uint16_t qid)
980 {
981 	tag_unref(&V_pf_qids, qid);
982 }
983 
984 static int
pf_begin_altq(u_int32_t * ticket)985 pf_begin_altq(u_int32_t *ticket)
986 {
987 	struct pf_altq	*altq, *tmp;
988 	int		 error = 0;
989 
990 	PF_RULES_WASSERT();
991 
992 	/* Purge the old altq lists */
993 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
994 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
995 			/* detach and destroy the discipline */
996 			error = altq_remove(altq);
997 		}
998 		free(altq, M_PFALTQ);
999 	}
1000 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1001 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1002 		pf_qid_unref(altq->qid);
1003 		free(altq, M_PFALTQ);
1004 	}
1005 	TAILQ_INIT(V_pf_altqs_inactive);
1006 	if (error)
1007 		return (error);
1008 	*ticket = ++V_ticket_altqs_inactive;
1009 	V_altqs_inactive_open = 1;
1010 	return (0);
1011 }
1012 
1013 static int
pf_rollback_altq(u_int32_t ticket)1014 pf_rollback_altq(u_int32_t ticket)
1015 {
1016 	struct pf_altq	*altq, *tmp;
1017 	int		 error = 0;
1018 
1019 	PF_RULES_WASSERT();
1020 
1021 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1022 		return (0);
1023 	/* Purge the old altq lists */
1024 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1025 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1026 			/* detach and destroy the discipline */
1027 			error = altq_remove(altq);
1028 		}
1029 		free(altq, M_PFALTQ);
1030 	}
1031 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1032 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1033 		pf_qid_unref(altq->qid);
1034 		free(altq, M_PFALTQ);
1035 	}
1036 	TAILQ_INIT(V_pf_altqs_inactive);
1037 	V_altqs_inactive_open = 0;
1038 	return (error);
1039 }
1040 
1041 static int
pf_commit_altq(u_int32_t ticket)1042 pf_commit_altq(u_int32_t ticket)
1043 {
1044 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
1045 	struct pf_altq		*altq, *tmp;
1046 	int			 err, error = 0;
1047 
1048 	PF_RULES_WASSERT();
1049 
1050 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1051 		return (EBUSY);
1052 
1053 	/* swap altqs, keep the old. */
1054 	old_altqs = V_pf_altqs_active;
1055 	old_altq_ifs = V_pf_altq_ifs_active;
1056 	V_pf_altqs_active = V_pf_altqs_inactive;
1057 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
1058 	V_pf_altqs_inactive = old_altqs;
1059 	V_pf_altq_ifs_inactive = old_altq_ifs;
1060 	V_ticket_altqs_active = V_ticket_altqs_inactive;
1061 
1062 	/* Attach new disciplines */
1063 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1064 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1065 			/* attach the discipline */
1066 			error = altq_pfattach(altq);
1067 			if (error == 0 && V_pf_altq_running)
1068 				error = pf_enable_altq(altq);
1069 			if (error != 0)
1070 				return (error);
1071 		}
1072 	}
1073 
1074 	/* Purge the old altq lists */
1075 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1076 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1077 			/* detach and destroy the discipline */
1078 			if (V_pf_altq_running)
1079 				error = pf_disable_altq(altq);
1080 			err = altq_pfdetach(altq);
1081 			if (err != 0 && error == 0)
1082 				error = err;
1083 			err = altq_remove(altq);
1084 			if (err != 0 && error == 0)
1085 				error = err;
1086 		}
1087 		free(altq, M_PFALTQ);
1088 	}
1089 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1090 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1091 		pf_qid_unref(altq->qid);
1092 		free(altq, M_PFALTQ);
1093 	}
1094 	TAILQ_INIT(V_pf_altqs_inactive);
1095 
1096 	V_altqs_inactive_open = 0;
1097 	return (error);
1098 }
1099 
1100 static int
pf_enable_altq(struct pf_altq * altq)1101 pf_enable_altq(struct pf_altq *altq)
1102 {
1103 	struct ifnet		*ifp;
1104 	struct tb_profile	 tb;
1105 	int			 error = 0;
1106 
1107 	if ((ifp = ifunit(altq->ifname)) == NULL)
1108 		return (EINVAL);
1109 
1110 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1111 		error = altq_enable(&ifp->if_snd);
1112 
1113 	/* set tokenbucket regulator */
1114 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1115 		tb.rate = altq->ifbandwidth;
1116 		tb.depth = altq->tbrsize;
1117 		error = tbr_set(&ifp->if_snd, &tb);
1118 	}
1119 
1120 	return (error);
1121 }
1122 
1123 static int
pf_disable_altq(struct pf_altq * altq)1124 pf_disable_altq(struct pf_altq *altq)
1125 {
1126 	struct ifnet		*ifp;
1127 	struct tb_profile	 tb;
1128 	int			 error;
1129 
1130 	if ((ifp = ifunit(altq->ifname)) == NULL)
1131 		return (EINVAL);
1132 
1133 	/*
1134 	 * when the discipline is no longer referenced, it was overridden
1135 	 * by a new one.  if so, just return.
1136 	 */
1137 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1138 		return (0);
1139 
1140 	error = altq_disable(&ifp->if_snd);
1141 
1142 	if (error == 0) {
1143 		/* clear tokenbucket regulator */
1144 		tb.rate = 0;
1145 		error = tbr_set(&ifp->if_snd, &tb);
1146 	}
1147 
1148 	return (error);
1149 }
1150 
1151 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1152 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1153     struct pf_altq *altq)
1154 {
1155 	struct ifnet	*ifp1;
1156 	int		 error = 0;
1157 
1158 	/* Deactivate the interface in question */
1159 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1160 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1161 	    (remove && ifp1 == ifp)) {
1162 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1163 	} else {
1164 		error = altq_add(ifp1, altq);
1165 
1166 		if (ticket != V_ticket_altqs_inactive)
1167 			error = EBUSY;
1168 
1169 		if (error)
1170 			free(altq, M_PFALTQ);
1171 	}
1172 
1173 	return (error);
1174 }
1175 
1176 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1177 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1178 {
1179 	struct pf_altq	*a1, *a2, *a3;
1180 	u_int32_t	 ticket;
1181 	int		 error = 0;
1182 
1183 	/*
1184 	 * No need to re-evaluate the configuration for events on interfaces
1185 	 * that do not support ALTQ, as it's not possible for such
1186 	 * interfaces to be part of the configuration.
1187 	 */
1188 	if (!ALTQ_IS_READY(&ifp->if_snd))
1189 		return;
1190 
1191 	/* Interrupt userland queue modifications */
1192 	if (V_altqs_inactive_open)
1193 		pf_rollback_altq(V_ticket_altqs_inactive);
1194 
1195 	/* Start new altq ruleset */
1196 	if (pf_begin_altq(&ticket))
1197 		return;
1198 
1199 	/* Copy the current active set */
1200 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1201 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1202 		if (a2 == NULL) {
1203 			error = ENOMEM;
1204 			break;
1205 		}
1206 		bcopy(a1, a2, sizeof(struct pf_altq));
1207 
1208 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1209 		if (error)
1210 			break;
1211 
1212 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1213 	}
1214 	if (error)
1215 		goto out;
1216 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1217 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1218 		if (a2 == NULL) {
1219 			error = ENOMEM;
1220 			break;
1221 		}
1222 		bcopy(a1, a2, sizeof(struct pf_altq));
1223 
1224 		if ((a2->qid = pf_qname2qid(a2->qname, true)) == 0) {
1225 			error = EBUSY;
1226 			free(a2, M_PFALTQ);
1227 			break;
1228 		}
1229 		a2->altq_disc = NULL;
1230 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1231 			if (strncmp(a3->ifname, a2->ifname,
1232 				IFNAMSIZ) == 0) {
1233 				a2->altq_disc = a3->altq_disc;
1234 				break;
1235 			}
1236 		}
1237 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1238 		if (error)
1239 			break;
1240 
1241 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1242 	}
1243 
1244 out:
1245 	if (error != 0)
1246 		pf_rollback_altq(ticket);
1247 	else
1248 		pf_commit_altq(ticket);
1249 }
1250 #endif /* ALTQ */
1251 
1252 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1253 pf_rule_tree_alloc(int flags)
1254 {
1255 	struct pf_krule_global *tree;
1256 
1257 	tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
1258 	if (tree == NULL)
1259 		return (NULL);
1260 	RB_INIT(tree);
1261 	return (tree);
1262 }
1263 
1264 void
pf_rule_tree_free(struct pf_krule_global * tree)1265 pf_rule_tree_free(struct pf_krule_global *tree)
1266 {
1267 
1268 	free(tree, M_PF);
1269 }
1270 
1271 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1272 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1273 {
1274 	struct pf_krule_global *tree;
1275 	struct pf_kruleset	*rs;
1276 	struct pf_krule		*rule;
1277 
1278 	PF_RULES_WASSERT();
1279 
1280 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1281 		return (EINVAL);
1282 	tree = pf_rule_tree_alloc(M_NOWAIT);
1283 	if (tree == NULL)
1284 		return (ENOMEM);
1285 	rs = pf_find_or_create_kruleset(anchor);
1286 	if (rs == NULL) {
1287 		pf_rule_tree_free(tree);
1288 		return (EINVAL);
1289 	}
1290 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1291 	rs->rules[rs_num].inactive.tree = tree;
1292 
1293 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1294 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1295 		rs->rules[rs_num].inactive.rcount--;
1296 	}
1297 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1298 	rs->rules[rs_num].inactive.open = 1;
1299 	return (0);
1300 }
1301 
1302 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1303 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1304 {
1305 	struct pf_kruleset	*rs;
1306 	struct pf_krule		*rule;
1307 
1308 	PF_RULES_WASSERT();
1309 
1310 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1311 		return (EINVAL);
1312 	rs = pf_find_kruleset(anchor);
1313 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1314 	    rs->rules[rs_num].inactive.ticket != ticket)
1315 		return (0);
1316 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1317 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1318 		rs->rules[rs_num].inactive.rcount--;
1319 	}
1320 	rs->rules[rs_num].inactive.open = 0;
1321 	return (0);
1322 }
1323 
1324 #define PF_MD5_UPD(st, elm)						\
1325 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1326 
1327 #define PF_MD5_UPD_STR(st, elm)						\
1328 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1329 
1330 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1331 		(stor) = htonl((st)->elm);				\
1332 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1333 } while (0)
1334 
1335 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1336 		(stor) = htons((st)->elm);				\
1337 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1338 } while (0)
1339 
1340 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1341 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1342 {
1343 	PF_MD5_UPD(pfr, addr.type);
1344 	switch (pfr->addr.type) {
1345 		case PF_ADDR_DYNIFTL:
1346 			PF_MD5_UPD(pfr, addr.v.ifname);
1347 			PF_MD5_UPD(pfr, addr.iflags);
1348 			break;
1349 		case PF_ADDR_TABLE:
1350 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1351 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1352 				PF_MD5_UPD(pfr, addr.v.tblname);
1353 			break;
1354 		case PF_ADDR_ADDRMASK:
1355 			/* XXX ignore af? */
1356 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1357 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1358 			break;
1359 	}
1360 
1361 	PF_MD5_UPD(pfr, port[0]);
1362 	PF_MD5_UPD(pfr, port[1]);
1363 	PF_MD5_UPD(pfr, neg);
1364 	PF_MD5_UPD(pfr, port_op);
1365 }
1366 
1367 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1368 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1369 {
1370 	u_int16_t x;
1371 	u_int32_t y;
1372 
1373 	pf_hash_rule_addr(ctx, &rule->src);
1374 	pf_hash_rule_addr(ctx, &rule->dst);
1375 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1376 		PF_MD5_UPD_STR(rule, label[i]);
1377 	PF_MD5_UPD_STR(rule, ifname);
1378 	PF_MD5_UPD_STR(rule, rcv_ifname);
1379 	PF_MD5_UPD_STR(rule, match_tagname);
1380 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1381 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1382 	PF_MD5_UPD_HTONL(rule, prob, y);
1383 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1384 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1385 	PF_MD5_UPD(rule, uid.op);
1386 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1387 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1388 	PF_MD5_UPD(rule, gid.op);
1389 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1390 	PF_MD5_UPD(rule, action);
1391 	PF_MD5_UPD(rule, direction);
1392 	PF_MD5_UPD(rule, af);
1393 	PF_MD5_UPD(rule, quick);
1394 	PF_MD5_UPD(rule, ifnot);
1395 	PF_MD5_UPD(rule, rcvifnot);
1396 	PF_MD5_UPD(rule, match_tag_not);
1397 	PF_MD5_UPD(rule, natpass);
1398 	PF_MD5_UPD(rule, keep_state);
1399 	PF_MD5_UPD(rule, proto);
1400 	PF_MD5_UPD(rule, type);
1401 	PF_MD5_UPD(rule, code);
1402 	PF_MD5_UPD(rule, flags);
1403 	PF_MD5_UPD(rule, flagset);
1404 	PF_MD5_UPD(rule, allow_opts);
1405 	PF_MD5_UPD(rule, rt);
1406 	PF_MD5_UPD(rule, tos);
1407 	PF_MD5_UPD(rule, scrub_flags);
1408 	PF_MD5_UPD(rule, min_ttl);
1409 	PF_MD5_UPD(rule, set_tos);
1410 	if (rule->anchor != NULL)
1411 		PF_MD5_UPD_STR(rule, anchor->path);
1412 }
1413 
1414 static void
pf_hash_rule(struct pf_krule * rule)1415 pf_hash_rule(struct pf_krule *rule)
1416 {
1417 	MD5_CTX		ctx;
1418 
1419 	MD5Init(&ctx);
1420 	pf_hash_rule_rolling(&ctx, rule);
1421 	MD5Final(rule->md5sum, &ctx);
1422 }
1423 
1424 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1425 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1426 {
1427 
1428 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1429 }
1430 
1431 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1432 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1433 {
1434 	struct pf_kruleset	*rs;
1435 	struct pf_krule		*rule, *old_rule;
1436 	struct pf_krulequeue	*old_rules;
1437 	struct pf_krule_global  *old_tree;
1438 	int			 error;
1439 	u_int32_t		 old_rcount;
1440 
1441 	PF_RULES_WASSERT();
1442 
1443 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1444 		return (EINVAL);
1445 	rs = pf_find_kruleset(anchor);
1446 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1447 	    ticket != rs->rules[rs_num].inactive.ticket)
1448 		return (EBUSY);
1449 
1450 	/* Calculate checksum for the main ruleset */
1451 	if (rs == &pf_main_ruleset) {
1452 		error = pf_setup_pfsync_matching(rs);
1453 		if (error != 0)
1454 			return (error);
1455 	}
1456 
1457 	/* Swap rules, keep the old. */
1458 	old_rules = rs->rules[rs_num].active.ptr;
1459 	old_rcount = rs->rules[rs_num].active.rcount;
1460 	old_tree = rs->rules[rs_num].active.tree;
1461 
1462 	rs->rules[rs_num].active.ptr =
1463 	    rs->rules[rs_num].inactive.ptr;
1464 	rs->rules[rs_num].active.tree =
1465 	    rs->rules[rs_num].inactive.tree;
1466 	rs->rules[rs_num].active.rcount =
1467 	    rs->rules[rs_num].inactive.rcount;
1468 
1469 	/* Attempt to preserve counter information. */
1470 	if (V_pf_status.keep_counters && old_tree != NULL) {
1471 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1472 		    entries) {
1473 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1474 			if (old_rule == NULL) {
1475 				continue;
1476 			}
1477 			pf_counter_u64_critical_enter();
1478 			pf_counter_u64_rollup_protected(&rule->evaluations,
1479 			    pf_counter_u64_fetch(&old_rule->evaluations));
1480 			pf_counter_u64_rollup_protected(&rule->packets[0],
1481 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1482 			pf_counter_u64_rollup_protected(&rule->packets[1],
1483 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1484 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1485 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1486 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1487 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1488 			pf_counter_u64_critical_exit();
1489 		}
1490 	}
1491 
1492 	rs->rules[rs_num].inactive.ptr = old_rules;
1493 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1494 	rs->rules[rs_num].inactive.rcount = old_rcount;
1495 
1496 	rs->rules[rs_num].active.ticket =
1497 	    rs->rules[rs_num].inactive.ticket;
1498 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1499 
1500 	/* Purge the old rule list. */
1501 	PF_UNLNKDRULES_LOCK();
1502 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1503 		pf_unlink_rule_locked(old_rules, rule);
1504 	PF_UNLNKDRULES_UNLOCK();
1505 	rs->rules[rs_num].inactive.rcount = 0;
1506 	rs->rules[rs_num].inactive.open = 0;
1507 	pf_remove_if_empty_kruleset(rs);
1508 	pf_rule_tree_free(old_tree);
1509 
1510 	return (0);
1511 }
1512 
1513 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1514 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1515 {
1516 	MD5_CTX			 ctx;
1517 	struct pf_krule		*rule;
1518 	int			 rs_cnt;
1519 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1520 
1521 	MD5Init(&ctx);
1522 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1523 		/* XXX PF_RULESET_SCRUB as well? */
1524 		if (rs_cnt == PF_RULESET_SCRUB)
1525 			continue;
1526 
1527 		if (rs->rules[rs_cnt].inactive.rcount) {
1528 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1529 			    entries) {
1530 				pf_hash_rule_rolling(&ctx, rule);
1531 			}
1532 		}
1533 	}
1534 
1535 	MD5Final(digest, &ctx);
1536 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1537 	return (0);
1538 }
1539 
1540 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1541 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1542 {
1543 	int error = 0;
1544 
1545 	switch (addr->type) {
1546 	case PF_ADDR_TABLE:
1547 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1548 		if (addr->p.tbl == NULL)
1549 			error = ENOMEM;
1550 		break;
1551 	default:
1552 		error = EINVAL;
1553 	}
1554 
1555 	return (error);
1556 }
1557 
1558 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1559 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1560     sa_family_t af)
1561 {
1562 	int error = 0;
1563 
1564 	switch (addr->type) {
1565 	case PF_ADDR_TABLE:
1566 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1567 		if (addr->p.tbl == NULL)
1568 			error = ENOMEM;
1569 		break;
1570 	case PF_ADDR_DYNIFTL:
1571 		error = pfi_dynaddr_setup(addr, af);
1572 		break;
1573 	}
1574 
1575 	return (error);
1576 }
1577 
1578 void
pf_addr_copyout(struct pf_addr_wrap * addr)1579 pf_addr_copyout(struct pf_addr_wrap *addr)
1580 {
1581 
1582 	switch (addr->type) {
1583 	case PF_ADDR_DYNIFTL:
1584 		pfi_dynaddr_copyout(addr);
1585 		break;
1586 	case PF_ADDR_TABLE:
1587 		pf_tbladdr_copyout(addr);
1588 		break;
1589 	}
1590 }
1591 
1592 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1593 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1594 {
1595 	int	secs = time_uptime;
1596 
1597 	bzero(out, sizeof(struct pf_src_node));
1598 
1599 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1600 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1601 
1602 	if (in->rule != NULL)
1603 		out->rule.nr = in->rule->nr;
1604 
1605 	for (int i = 0; i < 2; i++) {
1606 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1607 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1608 	}
1609 
1610 	out->states = in->states;
1611 	out->conn = in->conn;
1612 	out->af = in->af;
1613 	out->ruletype = in->ruletype;
1614 
1615 	out->creation = secs - in->creation;
1616 	if (out->expire > secs)
1617 		out->expire -= secs;
1618 	else
1619 		out->expire = 0;
1620 
1621 	/* Adjust the connection rate estimate. */
1622 	out->conn_rate.limit = in->conn_rate.limit;
1623 	out->conn_rate.seconds = in->conn_rate.seconds;
1624 	/* If there's no limit there's no counter_rate. */
1625 	if (in->conn_rate.cr != NULL)
1626 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1627 }
1628 
1629 #ifdef ALTQ
1630 /*
1631  * Handle export of struct pf_kaltq to user binaries that may be using any
1632  * version of struct pf_altq.
1633  */
1634 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1635 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1636 {
1637 	u_int32_t version;
1638 
1639 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1640 		version = 0;
1641 	else
1642 		version = pa->version;
1643 
1644 	if (version > PFIOC_ALTQ_VERSION)
1645 		return (EINVAL);
1646 
1647 #define ASSIGN(x) exported_q->x = q->x
1648 #define COPY(x) \
1649 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1650 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1651 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1652 
1653 	switch (version) {
1654 	case 0: {
1655 		struct pf_altq_v0 *exported_q =
1656 		    &((struct pfioc_altq_v0 *)pa)->altq;
1657 
1658 		COPY(ifname);
1659 
1660 		ASSIGN(scheduler);
1661 		ASSIGN(tbrsize);
1662 		exported_q->tbrsize = SATU16(q->tbrsize);
1663 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1664 
1665 		COPY(qname);
1666 		COPY(parent);
1667 		ASSIGN(parent_qid);
1668 		exported_q->bandwidth = SATU32(q->bandwidth);
1669 		ASSIGN(priority);
1670 		ASSIGN(local_flags);
1671 
1672 		ASSIGN(qlimit);
1673 		ASSIGN(flags);
1674 
1675 		if (q->scheduler == ALTQT_HFSC) {
1676 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1677 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1678 			    SATU32(q->pq_u.hfsc_opts.x)
1679 
1680 			ASSIGN_OPT_SATU32(rtsc_m1);
1681 			ASSIGN_OPT(rtsc_d);
1682 			ASSIGN_OPT_SATU32(rtsc_m2);
1683 
1684 			ASSIGN_OPT_SATU32(lssc_m1);
1685 			ASSIGN_OPT(lssc_d);
1686 			ASSIGN_OPT_SATU32(lssc_m2);
1687 
1688 			ASSIGN_OPT_SATU32(ulsc_m1);
1689 			ASSIGN_OPT(ulsc_d);
1690 			ASSIGN_OPT_SATU32(ulsc_m2);
1691 
1692 			ASSIGN_OPT(flags);
1693 
1694 #undef ASSIGN_OPT
1695 #undef ASSIGN_OPT_SATU32
1696 		} else
1697 			COPY(pq_u);
1698 
1699 		ASSIGN(qid);
1700 		break;
1701 	}
1702 	case 1:	{
1703 		struct pf_altq_v1 *exported_q =
1704 		    &((struct pfioc_altq_v1 *)pa)->altq;
1705 
1706 		COPY(ifname);
1707 
1708 		ASSIGN(scheduler);
1709 		ASSIGN(tbrsize);
1710 		ASSIGN(ifbandwidth);
1711 
1712 		COPY(qname);
1713 		COPY(parent);
1714 		ASSIGN(parent_qid);
1715 		ASSIGN(bandwidth);
1716 		ASSIGN(priority);
1717 		ASSIGN(local_flags);
1718 
1719 		ASSIGN(qlimit);
1720 		ASSIGN(flags);
1721 		COPY(pq_u);
1722 
1723 		ASSIGN(qid);
1724 		break;
1725 	}
1726 	default:
1727 		panic("%s: unhandled struct pfioc_altq version", __func__);
1728 		break;
1729 	}
1730 
1731 #undef ASSIGN
1732 #undef COPY
1733 #undef SATU16
1734 #undef SATU32
1735 
1736 	return (0);
1737 }
1738 
1739 /*
1740  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1741  * that may be using any version of it.
1742  */
1743 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1744 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1745 {
1746 	u_int32_t version;
1747 
1748 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1749 		version = 0;
1750 	else
1751 		version = pa->version;
1752 
1753 	if (version > PFIOC_ALTQ_VERSION)
1754 		return (EINVAL);
1755 
1756 #define ASSIGN(x) q->x = imported_q->x
1757 #define COPY(x) \
1758 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1759 
1760 	switch (version) {
1761 	case 0: {
1762 		struct pf_altq_v0 *imported_q =
1763 		    &((struct pfioc_altq_v0 *)pa)->altq;
1764 
1765 		COPY(ifname);
1766 
1767 		ASSIGN(scheduler);
1768 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1769 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1770 
1771 		COPY(qname);
1772 		COPY(parent);
1773 		ASSIGN(parent_qid);
1774 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1775 		ASSIGN(priority);
1776 		ASSIGN(local_flags);
1777 
1778 		ASSIGN(qlimit);
1779 		ASSIGN(flags);
1780 
1781 		if (imported_q->scheduler == ALTQT_HFSC) {
1782 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1783 
1784 			/*
1785 			 * The m1 and m2 parameters are being copied from
1786 			 * 32-bit to 64-bit.
1787 			 */
1788 			ASSIGN_OPT(rtsc_m1);
1789 			ASSIGN_OPT(rtsc_d);
1790 			ASSIGN_OPT(rtsc_m2);
1791 
1792 			ASSIGN_OPT(lssc_m1);
1793 			ASSIGN_OPT(lssc_d);
1794 			ASSIGN_OPT(lssc_m2);
1795 
1796 			ASSIGN_OPT(ulsc_m1);
1797 			ASSIGN_OPT(ulsc_d);
1798 			ASSIGN_OPT(ulsc_m2);
1799 
1800 			ASSIGN_OPT(flags);
1801 
1802 #undef ASSIGN_OPT
1803 		} else
1804 			COPY(pq_u);
1805 
1806 		ASSIGN(qid);
1807 		break;
1808 	}
1809 	case 1: {
1810 		struct pf_altq_v1 *imported_q =
1811 		    &((struct pfioc_altq_v1 *)pa)->altq;
1812 
1813 		COPY(ifname);
1814 
1815 		ASSIGN(scheduler);
1816 		ASSIGN(tbrsize);
1817 		ASSIGN(ifbandwidth);
1818 
1819 		COPY(qname);
1820 		COPY(parent);
1821 		ASSIGN(parent_qid);
1822 		ASSIGN(bandwidth);
1823 		ASSIGN(priority);
1824 		ASSIGN(local_flags);
1825 
1826 		ASSIGN(qlimit);
1827 		ASSIGN(flags);
1828 		COPY(pq_u);
1829 
1830 		ASSIGN(qid);
1831 		break;
1832 	}
1833 	default:
1834 		panic("%s: unhandled struct pfioc_altq version", __func__);
1835 		break;
1836 	}
1837 
1838 #undef ASSIGN
1839 #undef COPY
1840 
1841 	return (0);
1842 }
1843 
1844 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1845 pf_altq_get_nth_active(u_int32_t n)
1846 {
1847 	struct pf_altq		*altq;
1848 	u_int32_t		 nr;
1849 
1850 	nr = 0;
1851 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1852 		if (nr == n)
1853 			return (altq);
1854 		nr++;
1855 	}
1856 
1857 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1858 		if (nr == n)
1859 			return (altq);
1860 		nr++;
1861 	}
1862 
1863 	return (NULL);
1864 }
1865 #endif /* ALTQ */
1866 
1867 struct pf_krule *
pf_krule_alloc(void)1868 pf_krule_alloc(void)
1869 {
1870 	struct pf_krule *rule;
1871 
1872 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1873 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1874 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1875 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1876 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1877 	    M_WAITOK | M_ZERO);
1878 	return (rule);
1879 }
1880 
1881 void
pf_krule_free(struct pf_krule * rule)1882 pf_krule_free(struct pf_krule *rule)
1883 {
1884 #ifdef PF_WANT_32_TO_64_COUNTER
1885 	bool wowned;
1886 #endif
1887 
1888 	if (rule == NULL)
1889 		return;
1890 
1891 #ifdef PF_WANT_32_TO_64_COUNTER
1892 	if (rule->allrulelinked) {
1893 		wowned = PF_RULES_WOWNED();
1894 		if (!wowned)
1895 			PF_RULES_WLOCK();
1896 		LIST_REMOVE(rule, allrulelist);
1897 		V_pf_allrulecount--;
1898 		if (!wowned)
1899 			PF_RULES_WUNLOCK();
1900 	}
1901 #endif
1902 
1903 	pf_counter_u64_deinit(&rule->evaluations);
1904 	for (int i = 0; i < 2; i++) {
1905 		pf_counter_u64_deinit(&rule->packets[i]);
1906 		pf_counter_u64_deinit(&rule->bytes[i]);
1907 	}
1908 	counter_u64_free(rule->states_cur);
1909 	counter_u64_free(rule->states_tot);
1910 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1911 		counter_u64_free(rule->src_nodes[sn_type]);
1912 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1913 
1914 	mtx_destroy(&rule->nat.mtx);
1915 	mtx_destroy(&rule->rdr.mtx);
1916 	mtx_destroy(&rule->route.mtx);
1917 	free(rule, M_PFRULE);
1918 }
1919 
1920 void
pf_krule_clear_counters(struct pf_krule * rule)1921 pf_krule_clear_counters(struct pf_krule *rule)
1922 {
1923 	pf_counter_u64_zero(&rule->evaluations);
1924 	for (int i = 0; i < 2; i++) {
1925 		pf_counter_u64_zero(&rule->packets[i]);
1926 		pf_counter_u64_zero(&rule->bytes[i]);
1927 	}
1928 	counter_u64_zero(rule->states_tot);
1929 }
1930 
1931 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1932 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1933     struct pf_pooladdr *pool)
1934 {
1935 
1936 	bzero(pool, sizeof(*pool));
1937 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1938 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1939 }
1940 
1941 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1942 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1943     struct pf_kpooladdr *kpool)
1944 {
1945 	int ret;
1946 
1947 	bzero(kpool, sizeof(*kpool));
1948 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1949 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1950 	    sizeof(kpool->ifname));
1951 	return (ret);
1952 }
1953 
1954 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1955 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1956 {
1957 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1958 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1959 
1960 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1961 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1962 
1963 	kpool->tblidx = pool->tblidx;
1964 	kpool->proxy_port[0] = pool->proxy_port[0];
1965 	kpool->proxy_port[1] = pool->proxy_port[1];
1966 	kpool->opts = pool->opts;
1967 }
1968 
1969 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1970 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1971 {
1972 	int ret;
1973 
1974 #ifndef INET
1975 	if (rule->af == AF_INET) {
1976 		return (EAFNOSUPPORT);
1977 	}
1978 #endif /* INET */
1979 #ifndef INET6
1980 	if (rule->af == AF_INET6) {
1981 		return (EAFNOSUPPORT);
1982 	}
1983 #endif /* INET6 */
1984 
1985 	ret = pf_check_rule_addr(&rule->src);
1986 	if (ret != 0)
1987 		return (ret);
1988 	ret = pf_check_rule_addr(&rule->dst);
1989 	if (ret != 0)
1990 		return (ret);
1991 
1992 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1993 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1994 
1995 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1996 	if (ret != 0)
1997 		return (ret);
1998 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1999 	if (ret != 0)
2000 		return (ret);
2001 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
2002 	if (ret != 0)
2003 		return (ret);
2004 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2005 	if (ret != 0)
2006 		return (ret);
2007 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
2008 	    sizeof(rule->tagname));
2009 	if (ret != 0)
2010 		return (ret);
2011 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
2012 	    sizeof(rule->match_tagname));
2013 	if (ret != 0)
2014 		return (ret);
2015 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
2016 	    sizeof(rule->overload_tblname));
2017 	if (ret != 0)
2018 		return (ret);
2019 
2020 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
2021 
2022 	/* Don't allow userspace to set evaluations, packets or bytes. */
2023 	/* kif, anchor, overload_tbl are not copied over. */
2024 
2025 	krule->os_fingerprint = rule->os_fingerprint;
2026 
2027 	krule->rtableid = rule->rtableid;
2028 	/* pf_rule->timeout is smaller than pf_krule->timeout */
2029 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
2030 	krule->max_states = rule->max_states;
2031 	krule->max_src_nodes = rule->max_src_nodes;
2032 	krule->max_src_states = rule->max_src_states;
2033 	krule->max_src_conn = rule->max_src_conn;
2034 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2035 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2036 	krule->qid = rule->qid;
2037 	krule->pqid = rule->pqid;
2038 	krule->nr = rule->nr;
2039 	krule->prob = rule->prob;
2040 	krule->cuid = rule->cuid;
2041 	krule->cpid = rule->cpid;
2042 
2043 	krule->return_icmp = rule->return_icmp;
2044 	krule->return_icmp6 = rule->return_icmp6;
2045 	krule->max_mss = rule->max_mss;
2046 	krule->tag = rule->tag;
2047 	krule->match_tag = rule->match_tag;
2048 	krule->scrub_flags = rule->scrub_flags;
2049 
2050 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2051 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2052 
2053 	krule->rule_flag = rule->rule_flag;
2054 	krule->action = rule->action;
2055 	krule->direction = rule->direction;
2056 	krule->log = rule->log;
2057 	krule->logif = rule->logif;
2058 	krule->quick = rule->quick;
2059 	krule->ifnot = rule->ifnot;
2060 	krule->match_tag_not = rule->match_tag_not;
2061 	krule->natpass = rule->natpass;
2062 
2063 	krule->keep_state = rule->keep_state;
2064 	krule->af = rule->af;
2065 	krule->proto = rule->proto;
2066 	krule->type = rule->type;
2067 	krule->code = rule->code;
2068 	krule->flags = rule->flags;
2069 	krule->flagset = rule->flagset;
2070 	krule->min_ttl = rule->min_ttl;
2071 	krule->allow_opts = rule->allow_opts;
2072 	krule->rt = rule->rt;
2073 	krule->return_ttl = rule->return_ttl;
2074 	krule->tos = rule->tos;
2075 	krule->set_tos = rule->set_tos;
2076 
2077 	krule->flush = rule->flush;
2078 	krule->prio = rule->prio;
2079 	krule->set_prio[0] = rule->set_prio[0];
2080 	krule->set_prio[1] = rule->set_prio[1];
2081 
2082 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2083 
2084 	return (0);
2085 }
2086 
2087 int
pf_ioctl_getrules(struct pfioc_rule * pr)2088 pf_ioctl_getrules(struct pfioc_rule *pr)
2089 {
2090 	PF_RULES_RLOCK_TRACKER;
2091 	struct pf_kruleset	*ruleset;
2092 	struct pf_krule		*tail;
2093 	int			 rs_num;
2094 
2095 	PF_RULES_RLOCK();
2096 	ruleset = pf_find_kruleset(pr->anchor);
2097 	if (ruleset == NULL) {
2098 		PF_RULES_RUNLOCK();
2099 		return (EINVAL);
2100 	}
2101 	rs_num = pf_get_ruleset_number(pr->rule.action);
2102 	if (rs_num >= PF_RULESET_MAX) {
2103 		PF_RULES_RUNLOCK();
2104 		return (EINVAL);
2105 	}
2106 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2107 	    pf_krulequeue);
2108 	if (tail)
2109 		pr->nr = tail->nr + 1;
2110 	else
2111 		pr->nr = 0;
2112 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2113 	PF_RULES_RUNLOCK();
2114 
2115 	return (0);
2116 }
2117 
2118 static int
pf_rule_checkaf(struct pf_krule * r)2119 pf_rule_checkaf(struct pf_krule *r)
2120 {
2121 	switch (r->af) {
2122 	case 0:
2123 		if (r->rule_flag & PFRULE_AFTO)
2124 			return (EPFNOSUPPORT);
2125 		break;
2126 	case AF_INET:
2127 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2128 			return (EPFNOSUPPORT);
2129 		break;
2130 #ifdef INET6
2131 	case AF_INET6:
2132 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2133 			return (EPFNOSUPPORT);
2134 		break;
2135 #endif /* INET6 */
2136 	default:
2137 		return (EPFNOSUPPORT);
2138 	}
2139 
2140 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2141 		return (EPFNOSUPPORT);
2142 
2143 	return (0);
2144 }
2145 
2146 static int
pf_validate_range(uint8_t op,uint16_t port[2])2147 pf_validate_range(uint8_t op, uint16_t port[2])
2148 {
2149 	uint16_t a = ntohs(port[0]);
2150 	uint16_t b = ntohs(port[1]);
2151 
2152 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2153 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2154 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
2155 		return 1;
2156 	return 0;
2157 }
2158 
2159 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2160 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2161     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2162     uid_t uid, pid_t pid)
2163 {
2164 	struct pf_kruleset	*ruleset;
2165 	struct pf_krule		*tail;
2166 	struct pf_kpooladdr	*pa;
2167 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2168 	int			 rs_num;
2169 	int			 error = 0;
2170 
2171 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
2172 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
2173 
2174 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2175 		ERROUT_UNLOCKED(EINVAL);
2176 
2177 	if ((error = pf_rule_checkaf(rule)))
2178 		ERROUT_UNLOCKED(error);
2179 	if (pf_validate_range(rule->src.port_op, rule->src.port))
2180 		ERROUT_UNLOCKED(EINVAL);
2181 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2182 		ERROUT_UNLOCKED(EINVAL);
2183 
2184 	if (rule->ifname[0])
2185 		kif = pf_kkif_create(M_WAITOK);
2186 	if (rule->rcv_ifname[0])
2187 		rcv_kif = pf_kkif_create(M_WAITOK);
2188 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2189 	for (int i = 0; i < 2; i++) {
2190 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2191 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2192 	}
2193 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2194 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2195 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2196 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2197 	rule->cuid = uid;
2198 	rule->cpid = pid;
2199 	TAILQ_INIT(&rule->rdr.list);
2200 	TAILQ_INIT(&rule->nat.list);
2201 	TAILQ_INIT(&rule->route.list);
2202 
2203 	PF_CONFIG_LOCK();
2204 	PF_RULES_WLOCK();
2205 #ifdef PF_WANT_32_TO_64_COUNTER
2206 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2207 	MPASS(!rule->allrulelinked);
2208 	rule->allrulelinked = true;
2209 	V_pf_allrulecount++;
2210 #endif
2211 	ruleset = pf_find_kruleset(anchor);
2212 	if (ruleset == NULL)
2213 		ERROUT(EINVAL);
2214 	rs_num = pf_get_ruleset_number(rule->action);
2215 	if (rs_num >= PF_RULESET_MAX)
2216 		ERROUT(EINVAL);
2217 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2218 		DPFPRINTF(PF_DEBUG_MISC,
2219 		    "ticket: %d != [%d]%d", ticket, rs_num,
2220 		    ruleset->rules[rs_num].inactive.ticket);
2221 		ERROUT(EBUSY);
2222 	}
2223 	if (pool_ticket != V_ticket_pabuf) {
2224 		DPFPRINTF(PF_DEBUG_MISC,
2225 		    "pool_ticket: %d != %d", pool_ticket,
2226 		    V_ticket_pabuf);
2227 		ERROUT(EBUSY);
2228 	}
2229 	/*
2230 	 * XXXMJG hack: there is no mechanism to ensure they started the
2231 	 * transaction. Ticket checked above may happen to match by accident,
2232 	 * even if nobody called DIOCXBEGIN, let alone this process.
2233 	 * Partially work around it by checking if the RB tree got allocated,
2234 	 * see pf_begin_rules.
2235 	 */
2236 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2237 		ERROUT(EINVAL);
2238 	}
2239 
2240 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2241 	    pf_krulequeue);
2242 	if (tail)
2243 		rule->nr = tail->nr + 1;
2244 	else
2245 		rule->nr = 0;
2246 	if (rule->ifname[0]) {
2247 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2248 		kif = NULL;
2249 		pfi_kkif_ref(rule->kif);
2250 	} else
2251 		rule->kif = NULL;
2252 
2253 	if (rule->rcv_ifname[0]) {
2254 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2255 		rcv_kif = NULL;
2256 		pfi_kkif_ref(rule->rcv_kif);
2257 	} else
2258 		rule->rcv_kif = NULL;
2259 
2260 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2261 		ERROUT(EBUSY);
2262 #ifdef ALTQ
2263 	/* set queue IDs */
2264 	if (rule->qname[0] != 0) {
2265 		if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
2266 			ERROUT(EBUSY);
2267 		else if (rule->pqname[0] != 0) {
2268 			if ((rule->pqid =
2269 			    pf_qname2qid(rule->pqname, true)) == 0)
2270 				ERROUT(EBUSY);
2271 		} else
2272 			rule->pqid = rule->qid;
2273 	}
2274 #endif
2275 	if (rule->tagname[0])
2276 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2277 			ERROUT(EBUSY);
2278 	if (rule->match_tagname[0])
2279 		if ((rule->match_tag =
2280 		    pf_tagname2tag(rule->match_tagname)) == 0)
2281 			ERROUT(EBUSY);
2282 	if (rule->rt && !rule->direction)
2283 		ERROUT(EINVAL);
2284 	if (!rule->log)
2285 		rule->logif = 0;
2286 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2287 	   rule->pktrate.seconds))
2288 		ERROUT(ENOMEM);
2289 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2290 		ERROUT(ENOMEM);
2291 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2292 		ERROUT(ENOMEM);
2293 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2294 		ERROUT(EINVAL);
2295 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2296 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2297 	    rule->set_prio[1] > PF_PRIO_MAX))
2298 		ERROUT(EINVAL);
2299 	for (int i = 0; i < 3; i++) {
2300 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2301 			if (pa->addr.type == PF_ADDR_TABLE) {
2302 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2303 				    pa->addr.v.tblname);
2304 				if (pa->addr.p.tbl == NULL)
2305 					ERROUT(ENOMEM);
2306 			}
2307 	}
2308 
2309 	rule->overload_tbl = NULL;
2310 	if (rule->overload_tblname[0]) {
2311 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2312 		    rule->overload_tblname)) == NULL)
2313 			ERROUT(EINVAL);
2314 		else
2315 			rule->overload_tbl->pfrkt_flags |=
2316 			    PFR_TFLAG_ACTIVE;
2317 	}
2318 
2319 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2320 
2321 	/*
2322 	 * Old version of pfctl provide route redirection pools in single
2323 	 * common redirection pool rdr. New versions use rdr only for
2324 	 * rdr-to rules.
2325 	 */
2326 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2327 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2328 	} else {
2329 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2330 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2331 	}
2332 
2333 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2334 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2335 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2336 		ERROUT(EINVAL);
2337 	}
2338 
2339 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2340 		ERROUT(EINVAL);
2341 	}
2342 
2343 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2344 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2345 		ERROUT(EINVAL);
2346 	}
2347 
2348 	MPASS(error == 0);
2349 
2350 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2351 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2352 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2353 	rule->route.ipv6_nexthop_af = AF_INET6;
2354 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2355 	    rule, entries);
2356 	ruleset->rules[rs_num].inactive.rcount++;
2357 
2358 	PF_RULES_WUNLOCK();
2359 	pf_hash_rule(rule);
2360 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2361 		PF_RULES_WLOCK();
2362 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2363 		ruleset->rules[rs_num].inactive.rcount--;
2364 		pf_free_rule(rule);
2365 		rule = NULL;
2366 		ERROUT(EEXIST);
2367 	}
2368 	PF_CONFIG_UNLOCK();
2369 
2370 	return (0);
2371 
2372 #undef ERROUT
2373 #undef ERROUT_UNLOCKED
2374 errout:
2375 	PF_RULES_WUNLOCK();
2376 	PF_CONFIG_UNLOCK();
2377 errout_unlocked:
2378 	pf_kkif_free(rcv_kif);
2379 	pf_kkif_free(kif);
2380 	pf_krule_free(rule);
2381 	return (error);
2382 }
2383 
2384 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2385 pf_label_match(const struct pf_krule *rule, const char *label)
2386 {
2387 	int i = 0;
2388 
2389 	while (*rule->label[i]) {
2390 		if (strcmp(rule->label[i], label) == 0)
2391 			return (true);
2392 		i++;
2393 	}
2394 
2395 	return (false);
2396 }
2397 
2398 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2399 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2400 {
2401 	struct pf_kstate *s;
2402 	int more = 0;
2403 
2404 	s = pf_find_state_all(key, dir, &more);
2405 	if (s == NULL)
2406 		return (0);
2407 
2408 	if (more) {
2409 		PF_STATE_UNLOCK(s);
2410 		return (0);
2411 	}
2412 
2413 	pf_remove_state(s);
2414 	return (1);
2415 }
2416 
2417 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2418 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2419 {
2420 	struct pf_kstate	*s;
2421 	struct pf_state_key	*sk;
2422 	struct pf_addr		*srcaddr, *dstaddr;
2423 	struct pf_state_key_cmp	 match_key;
2424 	int			 idx, killed = 0;
2425 	unsigned int		 dir;
2426 	u_int16_t		 srcport, dstport;
2427 	struct pfi_kkif		*kif;
2428 
2429 relock_DIOCKILLSTATES:
2430 	PF_HASHROW_LOCK(ih);
2431 	LIST_FOREACH(s, &ih->states, entry) {
2432 		/* For floating states look at the original kif. */
2433 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2434 
2435 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2436 		if (s->direction == PF_OUT) {
2437 			srcaddr = &sk->addr[1];
2438 			dstaddr = &sk->addr[0];
2439 			srcport = sk->port[1];
2440 			dstport = sk->port[0];
2441 		} else {
2442 			srcaddr = &sk->addr[0];
2443 			dstaddr = &sk->addr[1];
2444 			srcport = sk->port[0];
2445 			dstport = sk->port[1];
2446 		}
2447 
2448 		if (psk->psk_af && sk->af != psk->psk_af)
2449 			continue;
2450 
2451 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2452 			continue;
2453 
2454 		if (! pf_match_addr(psk->psk_src.neg,
2455 		    &psk->psk_src.addr.v.a.addr,
2456 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2457 			continue;
2458 
2459 		if (! pf_match_addr(psk->psk_dst.neg,
2460 		    &psk->psk_dst.addr.v.a.addr,
2461 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2462 			continue;
2463 
2464 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
2465 		    &psk->psk_rt_addr.addr.v.a.addr,
2466 		    &psk->psk_rt_addr.addr.v.a.mask,
2467 		    &s->act.rt_addr, sk->af))
2468 			continue;
2469 
2470 		if (psk->psk_src.port_op != 0 &&
2471 		    ! pf_match_port(psk->psk_src.port_op,
2472 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2473 			continue;
2474 
2475 		if (psk->psk_dst.port_op != 0 &&
2476 		    ! pf_match_port(psk->psk_dst.port_op,
2477 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2478 			continue;
2479 
2480 		if (psk->psk_label[0] &&
2481 		    ! pf_label_match(s->rule, psk->psk_label))
2482 			continue;
2483 
2484 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2485 		    kif->pfik_name))
2486 			continue;
2487 
2488 		if (psk->psk_kill_match) {
2489 			/* Create the key to find matching states, with lock
2490 			 * held. */
2491 
2492 			bzero(&match_key, sizeof(match_key));
2493 
2494 			if (s->direction == PF_OUT) {
2495 				dir = PF_IN;
2496 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2497 			} else {
2498 				dir = PF_OUT;
2499 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2500 			}
2501 
2502 			match_key.af = s->key[idx]->af;
2503 			match_key.proto = s->key[idx]->proto;
2504 			pf_addrcpy(&match_key.addr[0],
2505 			    &s->key[idx]->addr[1], match_key.af);
2506 			match_key.port[0] = s->key[idx]->port[1];
2507 			pf_addrcpy(&match_key.addr[1],
2508 			    &s->key[idx]->addr[0], match_key.af);
2509 			match_key.port[1] = s->key[idx]->port[0];
2510 		}
2511 
2512 		pf_remove_state(s);
2513 		killed++;
2514 
2515 		if (psk->psk_kill_match)
2516 			killed += pf_kill_matching_state(&match_key, dir);
2517 
2518 		goto relock_DIOCKILLSTATES;
2519 	}
2520 	PF_HASHROW_UNLOCK(ih);
2521 
2522 	return (killed);
2523 }
2524 
2525 int
pf_start(void)2526 pf_start(void)
2527 {
2528 	int error = 0;
2529 
2530 	sx_xlock(&V_pf_ioctl_lock);
2531 	if (V_pf_status.running)
2532 		error = EEXIST;
2533 	else {
2534 		hook_pf();
2535 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2536 			hook_pf_eth();
2537 		V_pf_status.running = 1;
2538 		V_pf_status.since = time_uptime;
2539 		new_unrhdr64(&V_pf_stateid, time_second);
2540 
2541 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
2542 	}
2543 	sx_xunlock(&V_pf_ioctl_lock);
2544 
2545 	return (error);
2546 }
2547 
2548 int
pf_stop(void)2549 pf_stop(void)
2550 {
2551 	int error = 0;
2552 
2553 	sx_xlock(&V_pf_ioctl_lock);
2554 	if (!V_pf_status.running)
2555 		error = ENOENT;
2556 	else {
2557 		V_pf_status.running = 0;
2558 		dehook_pf();
2559 		dehook_pf_eth();
2560 		V_pf_status.since = time_uptime;
2561 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
2562 	}
2563 	sx_xunlock(&V_pf_ioctl_lock);
2564 
2565 	return (error);
2566 }
2567 
2568 void
pf_ioctl_clear_status(void)2569 pf_ioctl_clear_status(void)
2570 {
2571 	PF_RULES_WLOCK();
2572 	for (int i = 0; i < PFRES_MAX; i++)
2573 		counter_u64_zero(V_pf_status.counters[i]);
2574 	for (int i = 0; i < FCNT_MAX; i++)
2575 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2576 	for (int i = 0; i < SCNT_MAX; i++)
2577 		counter_u64_zero(V_pf_status.scounters[i]);
2578 	for (int i = 0; i < NCNT_MAX; i++)
2579 		counter_u64_zero(V_pf_status.ncounters[i]);
2580 	for (int i = 0; i < KLCNT_MAX; i++)
2581 		counter_u64_zero(V_pf_status.lcounters[i]);
2582 	V_pf_status.since = time_uptime;
2583 	if (*V_pf_status.ifname)
2584 		pfi_update_status(V_pf_status.ifname, NULL);
2585 	PF_RULES_WUNLOCK();
2586 }
2587 
2588 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2589 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2590 {
2591 	uint32_t old;
2592 
2593 	if (timeout < 0 || timeout >= PFTM_MAX ||
2594 	    seconds < 0)
2595 		return (EINVAL);
2596 
2597 	PF_RULES_WLOCK();
2598 	old = V_pf_default_rule.timeout[timeout];
2599 	if (timeout == PFTM_INTERVAL && seconds == 0)
2600 		seconds = 1;
2601 	V_pf_default_rule.timeout[timeout] = seconds;
2602 	if (timeout == PFTM_INTERVAL && seconds < old)
2603 		wakeup(pf_purge_thread);
2604 
2605 	if (prev_seconds != NULL)
2606 		*prev_seconds = old;
2607 
2608 	PF_RULES_WUNLOCK();
2609 
2610 	return (0);
2611 }
2612 
2613 int
pf_ioctl_get_timeout(int timeout,int * seconds)2614 pf_ioctl_get_timeout(int timeout, int *seconds)
2615 {
2616 	PF_RULES_RLOCK_TRACKER;
2617 
2618 	if (timeout < 0 || timeout >= PFTM_MAX)
2619 		return (EINVAL);
2620 
2621 	PF_RULES_RLOCK();
2622 	*seconds = V_pf_default_rule.timeout[timeout];
2623 	PF_RULES_RUNLOCK();
2624 
2625 	return (0);
2626 }
2627 
2628 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2629 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2630 {
2631 
2632 	PF_RULES_WLOCK();
2633 	if (index < 0 || index >= PF_LIMIT_MAX ||
2634 	    V_pf_limits[index].zone == NULL) {
2635 		PF_RULES_WUNLOCK();
2636 		return (EINVAL);
2637 	}
2638 	uma_zone_set_max(V_pf_limits[index].zone,
2639 	    limit == 0 ? INT_MAX : limit);
2640 	if (old_limit != NULL)
2641 		*old_limit = V_pf_limits[index].limit;
2642 	V_pf_limits[index].limit = limit;
2643 	PF_RULES_WUNLOCK();
2644 
2645 	return (0);
2646 }
2647 
2648 int
pf_ioctl_get_limit(int index,unsigned int * limit)2649 pf_ioctl_get_limit(int index, unsigned int *limit)
2650 {
2651 	PF_RULES_RLOCK_TRACKER;
2652 
2653 	if (index < 0 || index >= PF_LIMIT_MAX)
2654 		return (EINVAL);
2655 
2656 	PF_RULES_RLOCK();
2657 	*limit = V_pf_limits[index].limit;
2658 	PF_RULES_RUNLOCK();
2659 
2660 	return (0);
2661 }
2662 
2663 int
pf_ioctl_begin_addrs(uint32_t * ticket)2664 pf_ioctl_begin_addrs(uint32_t *ticket)
2665 {
2666 	PF_RULES_WLOCK();
2667 	pf_empty_kpool(&V_pf_pabuf[0]);
2668 	pf_empty_kpool(&V_pf_pabuf[1]);
2669 	pf_empty_kpool(&V_pf_pabuf[2]);
2670 	*ticket = ++V_ticket_pabuf;
2671 	PF_RULES_WUNLOCK();
2672 
2673 	return (0);
2674 }
2675 
2676 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2677 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2678 {
2679 	struct pf_kpooladdr	*pa = NULL;
2680 	struct pfi_kkif		*kif = NULL;
2681 	int error;
2682 
2683 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2684 	    pp->which != PF_RT)
2685 		return (EINVAL);
2686 
2687 	switch (pp->af) {
2688 #ifdef INET
2689 	case AF_INET:
2690 		/* FALLTHROUGH */
2691 #endif /* INET */
2692 #ifdef INET6
2693 	case AF_INET6:
2694 		/* FALLTHROUGH */
2695 #endif /* INET6 */
2696 	case AF_UNSPEC:
2697 		break;
2698 	default:
2699 		return (EAFNOSUPPORT);
2700 	}
2701 
2702 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2703 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2704 	    pp->addr.addr.type != PF_ADDR_TABLE)
2705 		return (EINVAL);
2706 
2707 	if (pp->addr.addr.p.dyn != NULL)
2708 		return (EINVAL);
2709 
2710 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2711 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2712 	if (error != 0)
2713 		goto out;
2714 	if (pa->ifname[0])
2715 		kif = pf_kkif_create(M_WAITOK);
2716 	PF_RULES_WLOCK();
2717 	if (pp->ticket != V_ticket_pabuf) {
2718 		PF_RULES_WUNLOCK();
2719 		if (pa->ifname[0])
2720 			pf_kkif_free(kif);
2721 		error = EBUSY;
2722 		goto out;
2723 	}
2724 	if (pa->ifname[0]) {
2725 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2726 		kif = NULL;
2727 		pfi_kkif_ref(pa->kif);
2728 	} else
2729 		pa->kif = NULL;
2730 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2731 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2732 		if (pa->ifname[0])
2733 			pfi_kkif_unref(pa->kif);
2734 		PF_RULES_WUNLOCK();
2735 		goto out;
2736 	}
2737 	pa->af = pp->af;
2738 	switch (pp->which) {
2739 	case PF_NAT:
2740 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2741 		break;
2742 	case PF_RDR:
2743 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2744 		break;
2745 	case PF_RT:
2746 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2747 		break;
2748 	}
2749 	PF_RULES_WUNLOCK();
2750 
2751 	return (0);
2752 
2753 out:
2754 	free(pa, M_PFRULE);
2755 	return (error);
2756 }
2757 
2758 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2759 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2760 {
2761 	struct pf_kpool		*pool;
2762 	struct pf_kpooladdr	*pa;
2763 
2764 	PF_RULES_RLOCK_TRACKER;
2765 
2766 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2767 	    pp->which != PF_RT)
2768 		return (EINVAL);
2769 
2770 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2771 	pp->nr = 0;
2772 
2773 	PF_RULES_RLOCK();
2774 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2775 	    pp->r_num, 0, 1, 0, pp->which);
2776 	if (pool == NULL) {
2777 		PF_RULES_RUNLOCK();
2778 		return (EBUSY);
2779 	}
2780 	TAILQ_FOREACH(pa, &pool->list, entries)
2781 		pp->nr++;
2782 	PF_RULES_RUNLOCK();
2783 
2784 	return (0);
2785 }
2786 
2787 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2788 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2789 {
2790 	struct pf_kpool		*pool;
2791 	struct pf_kpooladdr	*pa;
2792 	u_int32_t		 nr = 0;
2793 
2794 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2795 	    pp->which != PF_RT)
2796 		return (EINVAL);
2797 
2798 	PF_RULES_RLOCK_TRACKER;
2799 
2800 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
2801 
2802 	PF_RULES_RLOCK();
2803 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2804 	    pp->r_num, 0, 1, 1, pp->which);
2805 	if (pool == NULL) {
2806 		PF_RULES_RUNLOCK();
2807 		return (EBUSY);
2808 	}
2809 	pa = TAILQ_FIRST(&pool->list);
2810 	while ((pa != NULL) && (nr < pp->nr)) {
2811 		pa = TAILQ_NEXT(pa, entries);
2812 		nr++;
2813 	}
2814 	if (pa == NULL) {
2815 		PF_RULES_RUNLOCK();
2816 		return (EBUSY);
2817 	}
2818 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2819 	pp->af = pa->af;
2820 	pf_addr_copyout(&pp->addr.addr);
2821 	PF_RULES_RUNLOCK();
2822 
2823 	return (0);
2824 }
2825 
2826 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2827 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2828 {
2829 	struct pf_kruleset	*ruleset;
2830 	struct pf_kanchor	*anchor;
2831 
2832 	PF_RULES_RLOCK_TRACKER;
2833 
2834 	pr->path[sizeof(pr->path) - 1] = '\0';
2835 
2836 	PF_RULES_RLOCK();
2837 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2838 		PF_RULES_RUNLOCK();
2839 		return (ENOENT);
2840 	}
2841 	pr->nr = 0;
2842 	if (ruleset == &pf_main_ruleset) {
2843 		/* XXX kludge for pf_main_ruleset */
2844 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2845 			if (anchor->parent == NULL)
2846 				pr->nr++;
2847 	} else {
2848 		RB_FOREACH(anchor, pf_kanchor_node,
2849 		    &ruleset->anchor->children)
2850 			pr->nr++;
2851 	}
2852 	PF_RULES_RUNLOCK();
2853 
2854 	return (0);
2855 }
2856 
2857 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2858 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2859 {
2860 	struct pf_kruleset	*ruleset;
2861 	struct pf_kanchor	*anchor;
2862 	u_int32_t		 nr = 0;
2863 	int			 error = 0;
2864 
2865 	PF_RULES_RLOCK_TRACKER;
2866 
2867 	PF_RULES_RLOCK();
2868 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2869 		PF_RULES_RUNLOCK();
2870 		return (ENOENT);
2871 	}
2872 
2873 	pr->name[0] = '\0';
2874 	if (ruleset == &pf_main_ruleset) {
2875 		/* XXX kludge for pf_main_ruleset */
2876 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2877 			if (anchor->parent == NULL && nr++ == pr->nr) {
2878 				strlcpy(pr->name, anchor->name,
2879 				    sizeof(pr->name));
2880 				break;
2881 			}
2882 	} else {
2883 		RB_FOREACH(anchor, pf_kanchor_node,
2884 		    &ruleset->anchor->children)
2885 			if (nr++ == pr->nr) {
2886 				strlcpy(pr->name, anchor->name,
2887 				    sizeof(pr->name));
2888 				break;
2889 			}
2890 	}
2891 	if (!pr->name[0])
2892 		error = EBUSY;
2893 	PF_RULES_RUNLOCK();
2894 
2895 	return (error);
2896 }
2897 
2898 int
pf_ioctl_natlook(struct pfioc_natlook * pnl)2899 pf_ioctl_natlook(struct pfioc_natlook *pnl)
2900 {
2901 	struct pf_state_key	*sk;
2902 	struct pf_kstate	*state;
2903 	struct pf_state_key_cmp	 key;
2904 	int			 m = 0, direction = pnl->direction;
2905 	int			 sidx, didx;
2906 
2907 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
2908 	sidx = (direction == PF_IN) ? 1 : 0;
2909 	didx = (direction == PF_IN) ? 0 : 1;
2910 
2911 	if (!pnl->proto ||
2912 	    PF_AZERO(&pnl->saddr, pnl->af) ||
2913 	    PF_AZERO(&pnl->daddr, pnl->af) ||
2914 	    ((pnl->proto == IPPROTO_TCP ||
2915 	    pnl->proto == IPPROTO_UDP) &&
2916 	    (!pnl->dport || !pnl->sport)))
2917 		return (EINVAL);
2918 
2919 	switch (pnl->direction) {
2920 	case PF_IN:
2921 	case PF_OUT:
2922 	case PF_INOUT:
2923 		break;
2924 	default:
2925 		return (EINVAL);
2926 	}
2927 
2928 	switch (pnl->af) {
2929 #ifdef INET
2930 	case AF_INET:
2931 		break;
2932 #endif /* INET */
2933 #ifdef INET6
2934 	case AF_INET6:
2935 		break;
2936 #endif /* INET6 */
2937 	default:
2938 		return (EAFNOSUPPORT);
2939 	}
2940 
2941 	bzero(&key, sizeof(key));
2942 	key.af = pnl->af;
2943 	key.proto = pnl->proto;
2944 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2945 	key.port[sidx] = pnl->sport;
2946 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2947 	key.port[didx] = pnl->dport;
2948 
2949 	state = pf_find_state_all(&key, direction, &m);
2950 	if (state == NULL)
2951 		return (ENOENT);
2952 
2953 	if (m > 1) {
2954 		PF_STATE_UNLOCK(state);
2955 		return (E2BIG);	/* more than one state */
2956 	}
2957 
2958 	sk = state->key[sidx];
2959 	pf_addrcpy(&pnl->rsaddr,
2960 	    &sk->addr[sidx], sk->af);
2961 	pnl->rsport = sk->port[sidx];
2962 	pf_addrcpy(&pnl->rdaddr,
2963 	    &sk->addr[didx], sk->af);
2964 	pnl->rdport = sk->port[didx];
2965 	PF_STATE_UNLOCK(state);
2966 
2967 	return (0);
2968 }
2969 
2970 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2971 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2972 {
2973 	int			 error = 0;
2974 	PF_RULES_RLOCK_TRACKER;
2975 
2976 #define	ERROUT_IOCTL(target, x)					\
2977     do {								\
2978 	    error = (x);						\
2979 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2980 	    goto target;						\
2981     } while (0)
2982 
2983 
2984 	/* XXX keep in sync with switch() below */
2985 	if (securelevel_gt(td->td_ucred, 2))
2986 		switch (cmd) {
2987 		case DIOCGETRULES:
2988 		case DIOCGETRULENV:
2989 		case DIOCGETADDRS:
2990 		case DIOCGETADDR:
2991 		case DIOCGETSTATE:
2992 		case DIOCGETSTATENV:
2993 		case DIOCSETSTATUSIF:
2994 		case DIOCGETSTATUSNV:
2995 		case DIOCCLRSTATUS:
2996 		case DIOCNATLOOK:
2997 		case DIOCSETDEBUG:
2998 #ifdef COMPAT_FREEBSD14
2999 		case DIOCGETSTATES:
3000 		case DIOCGETSTATESV2:
3001 #endif
3002 		case DIOCGETTIMEOUT:
3003 		case DIOCCLRRULECTRS:
3004 		case DIOCGETLIMIT:
3005 		case DIOCGETALTQSV0:
3006 		case DIOCGETALTQSV1:
3007 		case DIOCGETALTQV0:
3008 		case DIOCGETALTQV1:
3009 		case DIOCGETQSTATSV0:
3010 		case DIOCGETQSTATSV1:
3011 		case DIOCGETRULESETS:
3012 		case DIOCGETRULESET:
3013 		case DIOCRGETTABLES:
3014 		case DIOCRGETTSTATS:
3015 		case DIOCRCLRTSTATS:
3016 		case DIOCRCLRADDRS:
3017 		case DIOCRADDADDRS:
3018 		case DIOCRDELADDRS:
3019 		case DIOCRSETADDRS:
3020 		case DIOCRGETADDRS:
3021 		case DIOCRGETASTATS:
3022 		case DIOCRCLRASTATS:
3023 		case DIOCRTSTADDRS:
3024 		case DIOCOSFPGET:
3025 		case DIOCGETSRCNODES:
3026 		case DIOCCLRSRCNODES:
3027 		case DIOCGETSYNCOOKIES:
3028 		case DIOCIGETIFACES:
3029 		case DIOCGIFSPEEDV0:
3030 		case DIOCGIFSPEEDV1:
3031 		case DIOCSETIFFLAG:
3032 		case DIOCCLRIFFLAG:
3033 		case DIOCGETETHRULES:
3034 		case DIOCGETETHRULE:
3035 		case DIOCGETETHRULESETS:
3036 		case DIOCGETETHRULESET:
3037 			break;
3038 		case DIOCRCLRTABLES:
3039 		case DIOCRADDTABLES:
3040 		case DIOCRDELTABLES:
3041 		case DIOCRSETTFLAGS:
3042 			if (((struct pfioc_table *)addr)->pfrio_flags &
3043 			    PFR_FLAG_DUMMY)
3044 				break; /* dummy operation ok */
3045 			return (EPERM);
3046 		default:
3047 			return (EPERM);
3048 		}
3049 
3050 	if (!(flags & FWRITE))
3051 		switch (cmd) {
3052 		case DIOCGETRULES:
3053 		case DIOCGETADDRS:
3054 		case DIOCGETADDR:
3055 		case DIOCGETSTATE:
3056 		case DIOCGETSTATENV:
3057 		case DIOCGETSTATUSNV:
3058 #ifdef COMPAT_FREEBSD14
3059 		case DIOCGETSTATES:
3060 		case DIOCGETSTATESV2:
3061 #endif
3062 		case DIOCGETTIMEOUT:
3063 		case DIOCGETLIMIT:
3064 		case DIOCGETALTQSV0:
3065 		case DIOCGETALTQSV1:
3066 		case DIOCGETALTQV0:
3067 		case DIOCGETALTQV1:
3068 		case DIOCGETQSTATSV0:
3069 		case DIOCGETQSTATSV1:
3070 		case DIOCGETRULESETS:
3071 		case DIOCGETRULESET:
3072 		case DIOCNATLOOK:
3073 		case DIOCRGETTABLES:
3074 		case DIOCRGETTSTATS:
3075 		case DIOCRGETADDRS:
3076 		case DIOCRGETASTATS:
3077 		case DIOCRTSTADDRS:
3078 		case DIOCOSFPGET:
3079 		case DIOCGETSRCNODES:
3080 		case DIOCGETSYNCOOKIES:
3081 		case DIOCIGETIFACES:
3082 		case DIOCGIFSPEEDV1:
3083 		case DIOCGIFSPEEDV0:
3084 		case DIOCGETRULENV:
3085 		case DIOCGETETHRULES:
3086 		case DIOCGETETHRULE:
3087 		case DIOCGETETHRULESETS:
3088 		case DIOCGETETHRULESET:
3089 			break;
3090 		case DIOCRCLRTABLES:
3091 		case DIOCRADDTABLES:
3092 		case DIOCRDELTABLES:
3093 		case DIOCRCLRTSTATS:
3094 		case DIOCRCLRADDRS:
3095 		case DIOCRADDADDRS:
3096 		case DIOCRDELADDRS:
3097 		case DIOCRSETADDRS:
3098 		case DIOCRSETTFLAGS:
3099 			if (((struct pfioc_table *)addr)->pfrio_flags &
3100 			    PFR_FLAG_DUMMY) {
3101 				flags |= FWRITE; /* need write lock for dummy */
3102 				break; /* dummy operation ok */
3103 			}
3104 			return (EACCES);
3105 		default:
3106 			return (EACCES);
3107 		}
3108 
3109 	CURVNET_SET(TD_TO_VNET(td));
3110 
3111 	switch (cmd) {
3112 #ifdef COMPAT_FREEBSD14
3113 	case DIOCSTART:
3114 		error = pf_start();
3115 		break;
3116 
3117 	case DIOCSTOP:
3118 		error = pf_stop();
3119 		break;
3120 #endif
3121 
3122 	case DIOCGETETHRULES: {
3123 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3124 		nvlist_t		*nvl;
3125 		void			*packed;
3126 		struct pf_keth_rule	*tail;
3127 		struct pf_keth_ruleset	*rs;
3128 		u_int32_t		 ticket, nr;
3129 		const char		*anchor = "";
3130 
3131 		nvl = NULL;
3132 		packed = NULL;
3133 
3134 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3135 
3136 		if (nv->len > pf_ioctl_maxcount)
3137 			ERROUT(ENOMEM);
3138 
3139 		/* Copy the request in */
3140 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3141 		error = copyin(nv->data, packed, nv->len);
3142 		if (error)
3143 			ERROUT(error);
3144 
3145 		nvl = nvlist_unpack(packed, nv->len, 0);
3146 		if (nvl == NULL)
3147 			ERROUT(EBADMSG);
3148 
3149 		if (! nvlist_exists_string(nvl, "anchor"))
3150 			ERROUT(EBADMSG);
3151 
3152 		anchor = nvlist_get_string(nvl, "anchor");
3153 
3154 		rs = pf_find_keth_ruleset(anchor);
3155 
3156 		nvlist_destroy(nvl);
3157 		nvl = NULL;
3158 		free(packed, M_NVLIST);
3159 		packed = NULL;
3160 
3161 		if (rs == NULL)
3162 			ERROUT(ENOENT);
3163 
3164 		/* Reply */
3165 		nvl = nvlist_create(0);
3166 		if (nvl == NULL)
3167 			ERROUT(ENOMEM);
3168 
3169 		PF_RULES_RLOCK();
3170 
3171 		ticket = rs->active.ticket;
3172 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3173 		if (tail)
3174 			nr = tail->nr + 1;
3175 		else
3176 			nr = 0;
3177 
3178 		PF_RULES_RUNLOCK();
3179 
3180 		nvlist_add_number(nvl, "ticket", ticket);
3181 		nvlist_add_number(nvl, "nr", nr);
3182 
3183 		packed = nvlist_pack(nvl, &nv->len);
3184 		if (packed == NULL)
3185 			ERROUT(ENOMEM);
3186 
3187 		if (nv->size == 0)
3188 			ERROUT(0);
3189 		else if (nv->size < nv->len)
3190 			ERROUT(ENOSPC);
3191 
3192 		error = copyout(packed, nv->data, nv->len);
3193 
3194 #undef ERROUT
3195 DIOCGETETHRULES_error:
3196 		free(packed, M_NVLIST);
3197 		nvlist_destroy(nvl);
3198 		break;
3199 	}
3200 
3201 	case DIOCGETETHRULE: {
3202 		struct epoch_tracker	 et;
3203 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3204 		nvlist_t		*nvl = NULL;
3205 		void			*nvlpacked = NULL;
3206 		struct pf_keth_rule	*rule = NULL;
3207 		struct pf_keth_ruleset	*rs;
3208 		u_int32_t		 ticket, nr;
3209 		bool			 clear = false;
3210 		const char		*anchor;
3211 
3212 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3213 
3214 		if (nv->len > pf_ioctl_maxcount)
3215 			ERROUT(ENOMEM);
3216 
3217 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3218 		error = copyin(nv->data, nvlpacked, nv->len);
3219 		if (error)
3220 			ERROUT(error);
3221 
3222 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3223 		if (nvl == NULL)
3224 			ERROUT(EBADMSG);
3225 		if (! nvlist_exists_number(nvl, "ticket"))
3226 			ERROUT(EBADMSG);
3227 		ticket = nvlist_get_number(nvl, "ticket");
3228 		if (! nvlist_exists_string(nvl, "anchor"))
3229 			ERROUT(EBADMSG);
3230 		anchor = nvlist_get_string(nvl, "anchor");
3231 
3232 		if (nvlist_exists_bool(nvl, "clear"))
3233 			clear = nvlist_get_bool(nvl, "clear");
3234 
3235 		if (clear && !(flags & FWRITE))
3236 			ERROUT(EACCES);
3237 
3238 		if (! nvlist_exists_number(nvl, "nr"))
3239 			ERROUT(EBADMSG);
3240 		nr = nvlist_get_number(nvl, "nr");
3241 
3242 		PF_RULES_RLOCK();
3243 		rs = pf_find_keth_ruleset(anchor);
3244 		if (rs == NULL) {
3245 			PF_RULES_RUNLOCK();
3246 			ERROUT(ENOENT);
3247 		}
3248 		if (ticket != rs->active.ticket) {
3249 			PF_RULES_RUNLOCK();
3250 			ERROUT(EBUSY);
3251 		}
3252 
3253 		nvlist_destroy(nvl);
3254 		nvl = NULL;
3255 		free(nvlpacked, M_NVLIST);
3256 		nvlpacked = NULL;
3257 
3258 		rule = TAILQ_FIRST(rs->active.rules);
3259 		while ((rule != NULL) && (rule->nr != nr))
3260 			rule = TAILQ_NEXT(rule, entries);
3261 		if (rule == NULL) {
3262 			PF_RULES_RUNLOCK();
3263 			ERROUT(ENOENT);
3264 		}
3265 		/* Make sure rule can't go away. */
3266 		NET_EPOCH_ENTER(et);
3267 		PF_RULES_RUNLOCK();
3268 		nvl = pf_keth_rule_to_nveth_rule(rule);
3269 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3270 			NET_EPOCH_EXIT(et);
3271 			ERROUT(EBUSY);
3272 		}
3273 		NET_EPOCH_EXIT(et);
3274 		if (nvl == NULL)
3275 			ERROUT(ENOMEM);
3276 
3277 		nvlpacked = nvlist_pack(nvl, &nv->len);
3278 		if (nvlpacked == NULL)
3279 			ERROUT(ENOMEM);
3280 
3281 		if (nv->size == 0)
3282 			ERROUT(0);
3283 		else if (nv->size < nv->len)
3284 			ERROUT(ENOSPC);
3285 
3286 		error = copyout(nvlpacked, nv->data, nv->len);
3287 		if (error == 0 && clear) {
3288 			counter_u64_zero(rule->evaluations);
3289 			for (int i = 0; i < 2; i++) {
3290 				counter_u64_zero(rule->packets[i]);
3291 				counter_u64_zero(rule->bytes[i]);
3292 			}
3293 		}
3294 
3295 #undef ERROUT
3296 DIOCGETETHRULE_error:
3297 		free(nvlpacked, M_NVLIST);
3298 		nvlist_destroy(nvl);
3299 		break;
3300 	}
3301 
3302 	case DIOCADDETHRULE: {
3303 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3304 		nvlist_t		*nvl = NULL;
3305 		void			*nvlpacked = NULL;
3306 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3307 		struct pf_keth_ruleset	*ruleset = NULL;
3308 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3309 		const char		*anchor = "", *anchor_call = "";
3310 
3311 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3312 
3313 		if (nv->len > pf_ioctl_maxcount)
3314 			ERROUT(ENOMEM);
3315 
3316 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3317 		error = copyin(nv->data, nvlpacked, nv->len);
3318 		if (error)
3319 			ERROUT(error);
3320 
3321 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3322 		if (nvl == NULL)
3323 			ERROUT(EBADMSG);
3324 
3325 		if (! nvlist_exists_number(nvl, "ticket"))
3326 			ERROUT(EBADMSG);
3327 
3328 		if (nvlist_exists_string(nvl, "anchor"))
3329 			anchor = nvlist_get_string(nvl, "anchor");
3330 		if (nvlist_exists_string(nvl, "anchor_call"))
3331 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3332 
3333 		ruleset = pf_find_keth_ruleset(anchor);
3334 		if (ruleset == NULL)
3335 			ERROUT(EINVAL);
3336 
3337 		if (nvlist_get_number(nvl, "ticket") !=
3338 		    ruleset->inactive.ticket) {
3339 			DPFPRINTF(PF_DEBUG_MISC,
3340 			    "ticket: %d != %d",
3341 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3342 			    ruleset->inactive.ticket);
3343 			ERROUT(EBUSY);
3344 		}
3345 
3346 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3347 		rule->timestamp = NULL;
3348 
3349 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3350 		if (error != 0)
3351 			ERROUT(error);
3352 
3353 		if (rule->ifname[0])
3354 			kif = pf_kkif_create(M_WAITOK);
3355 		if (rule->bridge_to_name[0])
3356 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3357 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3358 		for (int i = 0; i < 2; i++) {
3359 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3360 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3361 		}
3362 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3363 		    M_WAITOK | M_ZERO);
3364 
3365 		PF_RULES_WLOCK();
3366 
3367 		if (rule->ifname[0]) {
3368 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3369 			pfi_kkif_ref(rule->kif);
3370 		} else
3371 			rule->kif = NULL;
3372 		if (rule->bridge_to_name[0]) {
3373 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3374 			    rule->bridge_to_name);
3375 			pfi_kkif_ref(rule->bridge_to);
3376 		} else
3377 			rule->bridge_to = NULL;
3378 
3379 #ifdef ALTQ
3380 		/* set queue IDs */
3381 		if (rule->qname[0] != 0) {
3382 			if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
3383 				error = EBUSY;
3384 			else
3385 				rule->qid = rule->qid;
3386 		}
3387 #endif
3388 		if (rule->tagname[0])
3389 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3390 				error = EBUSY;
3391 		if (rule->match_tagname[0])
3392 			if ((rule->match_tag = pf_tagname2tag(
3393 			    rule->match_tagname)) == 0)
3394 				error = EBUSY;
3395 
3396 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3397 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3398 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3399 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3400 
3401 		if (error) {
3402 			pf_free_eth_rule(rule);
3403 			PF_RULES_WUNLOCK();
3404 			ERROUT(error);
3405 		}
3406 
3407 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3408 			pf_free_eth_rule(rule);
3409 			PF_RULES_WUNLOCK();
3410 			ERROUT(EINVAL);
3411 		}
3412 
3413 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3414 		if (tail)
3415 			rule->nr = tail->nr + 1;
3416 		else
3417 			rule->nr = 0;
3418 
3419 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3420 
3421 		PF_RULES_WUNLOCK();
3422 
3423 #undef ERROUT
3424 DIOCADDETHRULE_error:
3425 		nvlist_destroy(nvl);
3426 		free(nvlpacked, M_NVLIST);
3427 		break;
3428 	}
3429 
3430 	case DIOCGETETHRULESETS: {
3431 		struct epoch_tracker	 et;
3432 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3433 		nvlist_t		*nvl = NULL;
3434 		void			*nvlpacked = NULL;
3435 		struct pf_keth_ruleset	*ruleset;
3436 		struct pf_keth_anchor	*anchor;
3437 		int			 nr = 0;
3438 
3439 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3440 
3441 		if (nv->len > pf_ioctl_maxcount)
3442 			ERROUT(ENOMEM);
3443 
3444 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3445 		error = copyin(nv->data, nvlpacked, nv->len);
3446 		if (error)
3447 			ERROUT(error);
3448 
3449 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3450 		if (nvl == NULL)
3451 			ERROUT(EBADMSG);
3452 		if (! nvlist_exists_string(nvl, "path"))
3453 			ERROUT(EBADMSG);
3454 
3455 		NET_EPOCH_ENTER(et);
3456 
3457 		if ((ruleset = pf_find_keth_ruleset(
3458 		    nvlist_get_string(nvl, "path"))) == NULL) {
3459 			NET_EPOCH_EXIT(et);
3460 			ERROUT(ENOENT);
3461 		}
3462 
3463 		if (ruleset->anchor == NULL) {
3464 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3465 				if (anchor->parent == NULL)
3466 					nr++;
3467 		} else {
3468 			RB_FOREACH(anchor, pf_keth_anchor_node,
3469 			    &ruleset->anchor->children)
3470 				nr++;
3471 		}
3472 
3473 		NET_EPOCH_EXIT(et);
3474 
3475 		nvlist_destroy(nvl);
3476 		nvl = NULL;
3477 		free(nvlpacked, M_NVLIST);
3478 		nvlpacked = NULL;
3479 
3480 		nvl = nvlist_create(0);
3481 		if (nvl == NULL)
3482 			ERROUT(ENOMEM);
3483 
3484 		nvlist_add_number(nvl, "nr", nr);
3485 
3486 		nvlpacked = nvlist_pack(nvl, &nv->len);
3487 		if (nvlpacked == NULL)
3488 			ERROUT(ENOMEM);
3489 
3490 		if (nv->size == 0)
3491 			ERROUT(0);
3492 		else if (nv->size < nv->len)
3493 			ERROUT(ENOSPC);
3494 
3495 		error = copyout(nvlpacked, nv->data, nv->len);
3496 
3497 #undef ERROUT
3498 DIOCGETETHRULESETS_error:
3499 		free(nvlpacked, M_NVLIST);
3500 		nvlist_destroy(nvl);
3501 		break;
3502 	}
3503 
3504 	case DIOCGETETHRULESET: {
3505 		struct epoch_tracker	 et;
3506 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3507 		nvlist_t		*nvl = NULL;
3508 		void			*nvlpacked = NULL;
3509 		struct pf_keth_ruleset	*ruleset;
3510 		struct pf_keth_anchor	*anchor;
3511 		int			 nr = 0, req_nr = 0;
3512 		bool			 found = false;
3513 
3514 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3515 
3516 		if (nv->len > pf_ioctl_maxcount)
3517 			ERROUT(ENOMEM);
3518 
3519 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3520 		error = copyin(nv->data, nvlpacked, nv->len);
3521 		if (error)
3522 			ERROUT(error);
3523 
3524 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3525 		if (nvl == NULL)
3526 			ERROUT(EBADMSG);
3527 		if (! nvlist_exists_string(nvl, "path"))
3528 			ERROUT(EBADMSG);
3529 		if (! nvlist_exists_number(nvl, "nr"))
3530 			ERROUT(EBADMSG);
3531 
3532 		req_nr = nvlist_get_number(nvl, "nr");
3533 
3534 		NET_EPOCH_ENTER(et);
3535 
3536 		if ((ruleset = pf_find_keth_ruleset(
3537 		    nvlist_get_string(nvl, "path"))) == NULL) {
3538 			NET_EPOCH_EXIT(et);
3539 			ERROUT(ENOENT);
3540 		}
3541 
3542 		nvlist_destroy(nvl);
3543 		nvl = NULL;
3544 		free(nvlpacked, M_NVLIST);
3545 		nvlpacked = NULL;
3546 
3547 		nvl = nvlist_create(0);
3548 		if (nvl == NULL) {
3549 			NET_EPOCH_EXIT(et);
3550 			ERROUT(ENOMEM);
3551 		}
3552 
3553 		if (ruleset->anchor == NULL) {
3554 			RB_FOREACH(anchor, pf_keth_anchor_global,
3555 			    &V_pf_keth_anchors) {
3556 				if (anchor->parent == NULL && nr++ == req_nr) {
3557 					found = true;
3558 					break;
3559 				}
3560 			}
3561 		} else {
3562 			RB_FOREACH(anchor, pf_keth_anchor_node,
3563 			     &ruleset->anchor->children) {
3564 				if (nr++ == req_nr) {
3565 					found = true;
3566 					break;
3567 				}
3568 			}
3569 		}
3570 
3571 		NET_EPOCH_EXIT(et);
3572 		if (found) {
3573 			nvlist_add_number(nvl, "nr", nr);
3574 			nvlist_add_string(nvl, "name", anchor->name);
3575 			if (ruleset->anchor)
3576 				nvlist_add_string(nvl, "path",
3577 				    ruleset->anchor->path);
3578 			else
3579 				nvlist_add_string(nvl, "path", "");
3580 		} else {
3581 			ERROUT(EBUSY);
3582 		}
3583 
3584 		nvlpacked = nvlist_pack(nvl, &nv->len);
3585 		if (nvlpacked == NULL)
3586 			ERROUT(ENOMEM);
3587 
3588 		if (nv->size == 0)
3589 			ERROUT(0);
3590 		else if (nv->size < nv->len)
3591 			ERROUT(ENOSPC);
3592 
3593 		error = copyout(nvlpacked, nv->data, nv->len);
3594 
3595 #undef ERROUT
3596 DIOCGETETHRULESET_error:
3597 		free(nvlpacked, M_NVLIST);
3598 		nvlist_destroy(nvl);
3599 		break;
3600 	}
3601 
3602 	case DIOCADDRULENV: {
3603 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3604 		nvlist_t	*nvl = NULL;
3605 		void		*nvlpacked = NULL;
3606 		struct pf_krule	*rule = NULL;
3607 		const char	*anchor = "", *anchor_call = "";
3608 		uint32_t	 ticket = 0, pool_ticket = 0;
3609 
3610 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3611 
3612 		if (nv->len > pf_ioctl_maxcount)
3613 			ERROUT(ENOMEM);
3614 
3615 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3616 		error = copyin(nv->data, nvlpacked, nv->len);
3617 		if (error)
3618 			ERROUT(error);
3619 
3620 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3621 		if (nvl == NULL)
3622 			ERROUT(EBADMSG);
3623 
3624 		if (! nvlist_exists_number(nvl, "ticket"))
3625 			ERROUT(EINVAL);
3626 		ticket = nvlist_get_number(nvl, "ticket");
3627 
3628 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3629 			ERROUT(EINVAL);
3630 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3631 
3632 		if (! nvlist_exists_nvlist(nvl, "rule"))
3633 			ERROUT(EINVAL);
3634 
3635 		rule = pf_krule_alloc();
3636 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3637 		    rule);
3638 		if (error)
3639 			ERROUT(error);
3640 
3641 		if (nvlist_exists_string(nvl, "anchor"))
3642 			anchor = nvlist_get_string(nvl, "anchor");
3643 		if (nvlist_exists_string(nvl, "anchor_call"))
3644 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3645 
3646 		if ((error = nvlist_error(nvl)))
3647 			ERROUT(error);
3648 
3649 		/* Frees rule on error */
3650 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3651 		    anchor_call, td->td_ucred->cr_ruid,
3652 		    td->td_proc ? td->td_proc->p_pid : 0);
3653 
3654 		nvlist_destroy(nvl);
3655 		free(nvlpacked, M_NVLIST);
3656 		break;
3657 #undef ERROUT
3658 DIOCADDRULENV_error:
3659 		pf_krule_free(rule);
3660 		nvlist_destroy(nvl);
3661 		free(nvlpacked, M_NVLIST);
3662 
3663 		break;
3664 	}
3665 	case DIOCADDRULE: {
3666 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3667 		struct pf_krule		*rule;
3668 
3669 		rule = pf_krule_alloc();
3670 		error = pf_rule_to_krule(&pr->rule, rule);
3671 		if (error != 0) {
3672 			pf_krule_free(rule);
3673 			goto fail;
3674 		}
3675 
3676 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3677 
3678 		/* Frees rule on error */
3679 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3680 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3681 		    td->td_proc ? td->td_proc->p_pid : 0);
3682 		break;
3683 	}
3684 
3685 	case DIOCGETRULES: {
3686 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3687 
3688 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3689 
3690 		error = pf_ioctl_getrules(pr);
3691 
3692 		break;
3693 	}
3694 
3695 	case DIOCGETRULENV: {
3696 		PF_RULES_RLOCK_TRACKER;
3697 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3698 		nvlist_t		*nvrule = NULL;
3699 		nvlist_t		*nvl = NULL;
3700 		struct pf_kruleset	*ruleset;
3701 		struct pf_krule		*rule;
3702 		void			*nvlpacked = NULL;
3703 		int			 rs_num, nr;
3704 		bool			 clear_counter = false;
3705 
3706 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3707 #define	ERROUT_LOCKED(x) do {			\
3708 	if (clear_counter)			\
3709 		PF_RULES_WUNLOCK();		\
3710 	else					\
3711 		PF_RULES_RUNLOCK();		\
3712 	ERROUT(x);				\
3713 } while (0)
3714 
3715 		if (nv->len > pf_ioctl_maxcount)
3716 			ERROUT(ENOMEM);
3717 
3718 		/* Copy the request in */
3719 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3720 		error = copyin(nv->data, nvlpacked, nv->len);
3721 		if (error)
3722 			ERROUT(error);
3723 
3724 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3725 		if (nvl == NULL)
3726 			ERROUT(EBADMSG);
3727 
3728 		if (! nvlist_exists_string(nvl, "anchor"))
3729 			ERROUT(EBADMSG);
3730 		if (! nvlist_exists_number(nvl, "ruleset"))
3731 			ERROUT(EBADMSG);
3732 		if (! nvlist_exists_number(nvl, "ticket"))
3733 			ERROUT(EBADMSG);
3734 		if (! nvlist_exists_number(nvl, "nr"))
3735 			ERROUT(EBADMSG);
3736 
3737 		if (nvlist_exists_bool(nvl, "clear_counter"))
3738 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3739 
3740 		if (clear_counter && !(flags & FWRITE))
3741 			ERROUT(EACCES);
3742 
3743 		nr = nvlist_get_number(nvl, "nr");
3744 
3745 		if (clear_counter)
3746 			PF_RULES_WLOCK();
3747 		else
3748 			PF_RULES_RLOCK();
3749 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3750 		if (ruleset == NULL)
3751 			ERROUT_LOCKED(ENOENT);
3752 
3753 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3754 		if (rs_num >= PF_RULESET_MAX)
3755 			ERROUT_LOCKED(EINVAL);
3756 
3757 		if (nvlist_get_number(nvl, "ticket") !=
3758 		    ruleset->rules[rs_num].active.ticket)
3759 			ERROUT_LOCKED(EBUSY);
3760 
3761 		if ((error = nvlist_error(nvl)))
3762 			ERROUT_LOCKED(error);
3763 
3764 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3765 		while ((rule != NULL) && (rule->nr != nr))
3766 			rule = TAILQ_NEXT(rule, entries);
3767 		if (rule == NULL)
3768 			ERROUT_LOCKED(EBUSY);
3769 
3770 		nvrule = pf_krule_to_nvrule(rule);
3771 
3772 		nvlist_destroy(nvl);
3773 		nvl = nvlist_create(0);
3774 		if (nvl == NULL)
3775 			ERROUT_LOCKED(ENOMEM);
3776 		nvlist_add_number(nvl, "nr", nr);
3777 		nvlist_add_nvlist(nvl, "rule", nvrule);
3778 		nvlist_destroy(nvrule);
3779 		nvrule = NULL;
3780 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl))
3781 			ERROUT_LOCKED(EBUSY);
3782 
3783 		free(nvlpacked, M_NVLIST);
3784 		nvlpacked = nvlist_pack(nvl, &nv->len);
3785 		if (nvlpacked == NULL)
3786 			ERROUT_LOCKED(ENOMEM);
3787 
3788 		if (nv->size == 0)
3789 			ERROUT_LOCKED(0);
3790 		else if (nv->size < nv->len)
3791 			ERROUT_LOCKED(ENOSPC);
3792 
3793 		if (clear_counter) {
3794 			pf_krule_clear_counters(rule);
3795 			PF_RULES_WUNLOCK();
3796 		} else {
3797 			PF_RULES_RUNLOCK();
3798 		}
3799 
3800 		error = copyout(nvlpacked, nv->data, nv->len);
3801 
3802 #undef ERROUT_LOCKED
3803 #undef ERROUT
3804 DIOCGETRULENV_error:
3805 		free(nvlpacked, M_NVLIST);
3806 		nvlist_destroy(nvrule);
3807 		nvlist_destroy(nvl);
3808 
3809 		break;
3810 	}
3811 
3812 	case DIOCCHANGERULE: {
3813 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3814 		struct pf_kruleset	*ruleset;
3815 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3816 		struct pfi_kkif		*kif = NULL;
3817 		struct pf_kpooladdr	*pa;
3818 		u_int32_t		 nr = 0;
3819 		int			 rs_num;
3820 
3821 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3822 
3823 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3824 		    pcr->action > PF_CHANGE_GET_TICKET) {
3825 			error = EINVAL;
3826 			goto fail;
3827 		}
3828 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3829 			error = EINVAL;
3830 			goto fail;
3831 		}
3832 
3833 		if (pcr->action != PF_CHANGE_REMOVE) {
3834 			newrule = pf_krule_alloc();
3835 			error = pf_rule_to_krule(&pcr->rule, newrule);
3836 			if (error != 0) {
3837 				pf_krule_free(newrule);
3838 				goto fail;
3839 			}
3840 
3841 			if ((error = pf_rule_checkaf(newrule))) {
3842 				pf_krule_free(newrule);
3843 				goto fail;
3844 			}
3845 			if (newrule->ifname[0])
3846 				kif = pf_kkif_create(M_WAITOK);
3847 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3848 			for (int i = 0; i < 2; i++) {
3849 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3850 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3851 			}
3852 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3853 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3854 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3855 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3856 			newrule->cuid = td->td_ucred->cr_ruid;
3857 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3858 			TAILQ_INIT(&newrule->nat.list);
3859 			TAILQ_INIT(&newrule->rdr.list);
3860 			TAILQ_INIT(&newrule->route.list);
3861 		}
3862 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3863 
3864 		PF_CONFIG_LOCK();
3865 		PF_RULES_WLOCK();
3866 #ifdef PF_WANT_32_TO_64_COUNTER
3867 		if (newrule != NULL) {
3868 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3869 			newrule->allrulelinked = true;
3870 			V_pf_allrulecount++;
3871 		}
3872 #endif
3873 
3874 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3875 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3876 		    pcr->pool_ticket != V_ticket_pabuf)
3877 			ERROUT(EBUSY);
3878 
3879 		ruleset = pf_find_kruleset(pcr->anchor);
3880 		if (ruleset == NULL)
3881 			ERROUT(EINVAL);
3882 
3883 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3884 		if (rs_num >= PF_RULESET_MAX)
3885 			ERROUT(EINVAL);
3886 
3887 		/*
3888 		 * XXXMJG: there is no guarantee that the ruleset was
3889 		 * created by the usual route of calling DIOCXBEGIN.
3890 		 * As a result it is possible the rule tree will not
3891 		 * be allocated yet. Hack around it by doing it here.
3892 		 * Note it is fine to let the tree persist in case of
3893 		 * error as it will be freed down the road on future
3894 		 * updates (if need be).
3895 		 */
3896 		if (ruleset->rules[rs_num].active.tree == NULL) {
3897 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3898 			if (ruleset->rules[rs_num].active.tree == NULL) {
3899 				ERROUT(ENOMEM);
3900 			}
3901 		}
3902 
3903 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3904 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3905 			ERROUT(0);
3906 		} else if (pcr->ticket !=
3907 			    ruleset->rules[rs_num].active.ticket)
3908 				ERROUT(EINVAL);
3909 
3910 		if (pcr->action != PF_CHANGE_REMOVE) {
3911 			if (newrule->ifname[0]) {
3912 				newrule->kif = pfi_kkif_attach(kif,
3913 				    newrule->ifname);
3914 				kif = NULL;
3915 				pfi_kkif_ref(newrule->kif);
3916 			} else
3917 				newrule->kif = NULL;
3918 
3919 			if (newrule->rtableid > 0 &&
3920 			    newrule->rtableid >= rt_numfibs)
3921 				error = EBUSY;
3922 
3923 #ifdef ALTQ
3924 			/* set queue IDs */
3925 			if (newrule->qname[0] != 0) {
3926 				if ((newrule->qid =
3927 				    pf_qname2qid(newrule->qname, true)) == 0)
3928 					error = EBUSY;
3929 				else if (newrule->pqname[0] != 0) {
3930 					if ((newrule->pqid =
3931 					    pf_qname2qid(newrule->pqname, true)) == 0)
3932 						error = EBUSY;
3933 				} else
3934 					newrule->pqid = newrule->qid;
3935 			}
3936 #endif /* ALTQ */
3937 			if (newrule->tagname[0])
3938 				if ((newrule->tag =
3939 				    pf_tagname2tag(newrule->tagname)) == 0)
3940 					error = EBUSY;
3941 			if (newrule->match_tagname[0])
3942 				if ((newrule->match_tag = pf_tagname2tag(
3943 				    newrule->match_tagname)) == 0)
3944 					error = EBUSY;
3945 			if (newrule->rt && !newrule->direction)
3946 				error = EINVAL;
3947 			if (!newrule->log)
3948 				newrule->logif = 0;
3949 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3950 				error = ENOMEM;
3951 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3952 				error = ENOMEM;
3953 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3954 				error = EINVAL;
3955 			for (int i = 0; i < 3; i++) {
3956 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3957 					if (pa->addr.type == PF_ADDR_TABLE) {
3958 						pa->addr.p.tbl =
3959 						    pfr_attach_table(ruleset,
3960 						    pa->addr.v.tblname);
3961 						if (pa->addr.p.tbl == NULL)
3962 							error = ENOMEM;
3963 					}
3964 			}
3965 
3966 			newrule->overload_tbl = NULL;
3967 			if (newrule->overload_tblname[0]) {
3968 				if ((newrule->overload_tbl = pfr_attach_table(
3969 				    ruleset, newrule->overload_tblname)) ==
3970 				    NULL)
3971 					error = EINVAL;
3972 				else
3973 					newrule->overload_tbl->pfrkt_flags |=
3974 					    PFR_TFLAG_ACTIVE;
3975 			}
3976 
3977 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3978 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3979 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3980 			if (((((newrule->action == PF_NAT) ||
3981 			    (newrule->action == PF_RDR) ||
3982 			    (newrule->action == PF_BINAT) ||
3983 			    (newrule->rt > PF_NOPFROUTE)) &&
3984 			    !newrule->anchor)) &&
3985 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3986 				error = EINVAL;
3987 
3988 			if (error) {
3989 				pf_free_rule(newrule);
3990 				PF_RULES_WUNLOCK();
3991 				PF_CONFIG_UNLOCK();
3992 				goto fail;
3993 			}
3994 
3995 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3996 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3997 		}
3998 		pf_empty_kpool(&V_pf_pabuf[0]);
3999 		pf_empty_kpool(&V_pf_pabuf[1]);
4000 		pf_empty_kpool(&V_pf_pabuf[2]);
4001 
4002 		if (pcr->action == PF_CHANGE_ADD_HEAD)
4003 			oldrule = TAILQ_FIRST(
4004 			    ruleset->rules[rs_num].active.ptr);
4005 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
4006 			oldrule = TAILQ_LAST(
4007 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
4008 		else {
4009 			oldrule = TAILQ_FIRST(
4010 			    ruleset->rules[rs_num].active.ptr);
4011 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
4012 				oldrule = TAILQ_NEXT(oldrule, entries);
4013 			if (oldrule == NULL) {
4014 				if (newrule != NULL)
4015 					pf_free_rule(newrule);
4016 				PF_RULES_WUNLOCK();
4017 				PF_CONFIG_UNLOCK();
4018 				error = EINVAL;
4019 				goto fail;
4020 			}
4021 		}
4022 
4023 		if (pcr->action == PF_CHANGE_REMOVE) {
4024 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
4025 			    oldrule);
4026 			RB_REMOVE(pf_krule_global,
4027 			    ruleset->rules[rs_num].active.tree, oldrule);
4028 			ruleset->rules[rs_num].active.rcount--;
4029 		} else {
4030 			pf_hash_rule(newrule);
4031 			if (RB_INSERT(pf_krule_global,
4032 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
4033 				pf_free_rule(newrule);
4034 				PF_RULES_WUNLOCK();
4035 				PF_CONFIG_UNLOCK();
4036 				error = EEXIST;
4037 				goto fail;
4038 			}
4039 
4040 			if (oldrule == NULL)
4041 				TAILQ_INSERT_TAIL(
4042 				    ruleset->rules[rs_num].active.ptr,
4043 				    newrule, entries);
4044 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
4045 			    pcr->action == PF_CHANGE_ADD_BEFORE)
4046 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
4047 			else
4048 				TAILQ_INSERT_AFTER(
4049 				    ruleset->rules[rs_num].active.ptr,
4050 				    oldrule, newrule, entries);
4051 			ruleset->rules[rs_num].active.rcount++;
4052 		}
4053 
4054 		nr = 0;
4055 		TAILQ_FOREACH(oldrule,
4056 		    ruleset->rules[rs_num].active.ptr, entries)
4057 			oldrule->nr = nr++;
4058 
4059 		ruleset->rules[rs_num].active.ticket++;
4060 
4061 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
4062 		pf_remove_if_empty_kruleset(ruleset);
4063 
4064 		PF_RULES_WUNLOCK();
4065 		PF_CONFIG_UNLOCK();
4066 		break;
4067 
4068 #undef ERROUT
4069 DIOCCHANGERULE_error:
4070 		PF_RULES_WUNLOCK();
4071 		PF_CONFIG_UNLOCK();
4072 		pf_krule_free(newrule);
4073 		pf_kkif_free(kif);
4074 		break;
4075 	}
4076 
4077 	case DIOCCLRSTATESNV: {
4078 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4079 		break;
4080 	}
4081 
4082 	case DIOCKILLSTATESNV: {
4083 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4084 		break;
4085 	}
4086 
4087 	case DIOCADDSTATE: {
4088 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4089 		struct pfsync_state_1301	*sp = &ps->state;
4090 
4091 		if (sp->timeout >= PFTM_MAX) {
4092 			error = EINVAL;
4093 			goto fail;
4094 		}
4095 		if (V_pfsync_state_import_ptr != NULL) {
4096 			PF_RULES_RLOCK();
4097 			error = V_pfsync_state_import_ptr(
4098 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4099 			    PFSYNC_MSG_VERSION_1301);
4100 			PF_RULES_RUNLOCK();
4101 		} else
4102 			error = EOPNOTSUPP;
4103 		break;
4104 	}
4105 
4106 	case DIOCGETSTATE: {
4107 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4108 		struct pf_kstate	*s;
4109 
4110 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4111 		if (s == NULL) {
4112 			error = ENOENT;
4113 			goto fail;
4114 		}
4115 
4116 		pfsync_state_export_1301(&ps->state, s);
4117 		PF_STATE_UNLOCK(s);
4118 		break;
4119 	}
4120 
4121 	case DIOCGETSTATENV: {
4122 		error = pf_getstate((struct pfioc_nv *)addr);
4123 		break;
4124 	}
4125 
4126 #ifdef COMPAT_FREEBSD14
4127 	case DIOCGETSTATES: {
4128 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4129 		struct pf_kstate	*s;
4130 		struct pfsync_state_1301	*pstore, *p;
4131 		int			 i, nr;
4132 		size_t			 slice_count = 16, count;
4133 		void			*out;
4134 
4135 		if (ps->ps_len <= 0) {
4136 			nr = uma_zone_get_cur(V_pf_state_z);
4137 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4138 			break;
4139 		}
4140 
4141 		out = ps->ps_states;
4142 		pstore = mallocarray(slice_count,
4143 		    sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
4144 		nr = 0;
4145 
4146 		for (i = 0; i <= V_pf_hashmask; i++) {
4147 			struct pf_idhash *ih = &V_pf_idhash[i];
4148 
4149 DIOCGETSTATES_retry:
4150 			p = pstore;
4151 
4152 			if (LIST_EMPTY(&ih->states))
4153 				continue;
4154 
4155 			PF_HASHROW_LOCK(ih);
4156 			count = 0;
4157 			LIST_FOREACH(s, &ih->states, entry) {
4158 				if (s->timeout == PFTM_UNLINKED)
4159 					continue;
4160 				count++;
4161 			}
4162 
4163 			if (count > slice_count) {
4164 				PF_HASHROW_UNLOCK(ih);
4165 				free(pstore, M_PF);
4166 				slice_count = count * 2;
4167 				pstore = mallocarray(slice_count,
4168 				    sizeof(struct pfsync_state_1301), M_PF,
4169 				    M_WAITOK | M_ZERO);
4170 				goto DIOCGETSTATES_retry;
4171 			}
4172 
4173 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4174 				PF_HASHROW_UNLOCK(ih);
4175 				goto DIOCGETSTATES_full;
4176 			}
4177 
4178 			LIST_FOREACH(s, &ih->states, entry) {
4179 				if (s->timeout == PFTM_UNLINKED)
4180 					continue;
4181 
4182 				pfsync_state_export_1301(p, s);
4183 				p++;
4184 				nr++;
4185 			}
4186 			PF_HASHROW_UNLOCK(ih);
4187 			error = copyout(pstore, out,
4188 			    sizeof(struct pfsync_state_1301) * count);
4189 			if (error) {
4190 				free(pstore, M_PF);
4191 				goto fail;
4192 			}
4193 			out = ps->ps_states + nr;
4194 		}
4195 DIOCGETSTATES_full:
4196 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4197 		free(pstore, M_PF);
4198 
4199 		break;
4200 	}
4201 
4202 	case DIOCGETSTATESV2: {
4203 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4204 		struct pf_kstate	*s;
4205 		struct pf_state_export	*pstore, *p;
4206 		int i, nr;
4207 		size_t slice_count = 16, count;
4208 		void *out;
4209 
4210 		if (ps->ps_req_version > PF_STATE_VERSION) {
4211 			error = ENOTSUP;
4212 			goto fail;
4213 		}
4214 
4215 		if (ps->ps_len <= 0) {
4216 			nr = uma_zone_get_cur(V_pf_state_z);
4217 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4218 			break;
4219 		}
4220 
4221 		out = ps->ps_states;
4222 		pstore = mallocarray(slice_count,
4223 		    sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
4224 		nr = 0;
4225 
4226 		for (i = 0; i <= V_pf_hashmask; i++) {
4227 			struct pf_idhash *ih = &V_pf_idhash[i];
4228 
4229 DIOCGETSTATESV2_retry:
4230 			p = pstore;
4231 
4232 			if (LIST_EMPTY(&ih->states))
4233 				continue;
4234 
4235 			PF_HASHROW_LOCK(ih);
4236 			count = 0;
4237 			LIST_FOREACH(s, &ih->states, entry) {
4238 				if (s->timeout == PFTM_UNLINKED)
4239 					continue;
4240 				count++;
4241 			}
4242 
4243 			if (count > slice_count) {
4244 				PF_HASHROW_UNLOCK(ih);
4245 				free(pstore, M_PF);
4246 				slice_count = count * 2;
4247 				pstore = mallocarray(slice_count,
4248 				    sizeof(struct pf_state_export), M_PF,
4249 				    M_WAITOK | M_ZERO);
4250 				goto DIOCGETSTATESV2_retry;
4251 			}
4252 
4253 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4254 				PF_HASHROW_UNLOCK(ih);
4255 				goto DIOCGETSTATESV2_full;
4256 			}
4257 
4258 			LIST_FOREACH(s, &ih->states, entry) {
4259 				if (s->timeout == PFTM_UNLINKED)
4260 					continue;
4261 
4262 				pf_state_export(p, s);
4263 				p++;
4264 				nr++;
4265 			}
4266 			PF_HASHROW_UNLOCK(ih);
4267 			error = copyout(pstore, out,
4268 			    sizeof(struct pf_state_export) * count);
4269 			if (error) {
4270 				free(pstore, M_PF);
4271 				goto fail;
4272 			}
4273 			out = ps->ps_states + nr;
4274 		}
4275 DIOCGETSTATESV2_full:
4276 		ps->ps_len = nr * sizeof(struct pf_state_export);
4277 		free(pstore, M_PF);
4278 
4279 		break;
4280 	}
4281 #endif
4282 	case DIOCGETSTATUSNV: {
4283 		error = pf_getstatus((struct pfioc_nv *)addr);
4284 		break;
4285 	}
4286 
4287 	case DIOCSETSTATUSIF: {
4288 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4289 
4290 		if (pi->ifname[0] == 0) {
4291 			bzero(V_pf_status.ifname, IFNAMSIZ);
4292 			break;
4293 		}
4294 		PF_RULES_WLOCK();
4295 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4296 		PF_RULES_WUNLOCK();
4297 		break;
4298 	}
4299 
4300 	case DIOCCLRSTATUS: {
4301 		pf_ioctl_clear_status();
4302 		break;
4303 	}
4304 
4305 	case DIOCNATLOOK: {
4306 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4307 
4308 		error = pf_ioctl_natlook(pnl);
4309 		break;
4310 	}
4311 
4312 	case DIOCSETTIMEOUT: {
4313 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4314 
4315 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4316 		    &pt->seconds);
4317 		break;
4318 	}
4319 
4320 	case DIOCGETTIMEOUT: {
4321 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4322 
4323 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4324 		break;
4325 	}
4326 
4327 	case DIOCGETLIMIT: {
4328 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4329 
4330 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4331 		break;
4332 	}
4333 
4334 	case DIOCSETLIMIT: {
4335 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4336 		unsigned int old_limit;
4337 
4338 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4339 		pl->limit = old_limit;
4340 		break;
4341 	}
4342 
4343 	case DIOCSETDEBUG: {
4344 		u_int32_t	*level = (u_int32_t *)addr;
4345 
4346 		PF_RULES_WLOCK();
4347 		V_pf_status.debug = *level;
4348 		PF_RULES_WUNLOCK();
4349 		break;
4350 	}
4351 
4352 	case DIOCCLRRULECTRS: {
4353 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4354 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4355 		struct pf_krule		*rule;
4356 
4357 		PF_RULES_WLOCK();
4358 		TAILQ_FOREACH(rule,
4359 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4360 			pf_counter_u64_zero(&rule->evaluations);
4361 			for (int i = 0; i < 2; i++) {
4362 				pf_counter_u64_zero(&rule->packets[i]);
4363 				pf_counter_u64_zero(&rule->bytes[i]);
4364 			}
4365 		}
4366 		PF_RULES_WUNLOCK();
4367 		break;
4368 	}
4369 
4370 	case DIOCGIFSPEEDV0:
4371 	case DIOCGIFSPEEDV1: {
4372 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4373 		struct pf_ifspeed_v1	ps;
4374 		struct ifnet		*ifp;
4375 
4376 		if (psp->ifname[0] == '\0') {
4377 			error = EINVAL;
4378 			goto fail;
4379 		}
4380 
4381 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4382 		if (error != 0)
4383 			goto fail;
4384 		ifp = ifunit(ps.ifname);
4385 		if (ifp != NULL) {
4386 			psp->baudrate32 =
4387 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4388 			if (cmd == DIOCGIFSPEEDV1)
4389 				psp->baudrate = ifp->if_baudrate;
4390 		} else {
4391 			error = EINVAL;
4392 		}
4393 		break;
4394 	}
4395 
4396 #ifdef ALTQ
4397 	case DIOCSTARTALTQ: {
4398 		struct pf_altq		*altq;
4399 
4400 		PF_RULES_WLOCK();
4401 		/* enable all altq interfaces on active list */
4402 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4403 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4404 				error = pf_enable_altq(altq);
4405 				if (error != 0)
4406 					break;
4407 			}
4408 		}
4409 		if (error == 0)
4410 			V_pf_altq_running = 1;
4411 		PF_RULES_WUNLOCK();
4412 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
4413 		break;
4414 	}
4415 
4416 	case DIOCSTOPALTQ: {
4417 		struct pf_altq		*altq;
4418 
4419 		PF_RULES_WLOCK();
4420 		/* disable all altq interfaces on active list */
4421 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4422 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4423 				error = pf_disable_altq(altq);
4424 				if (error != 0)
4425 					break;
4426 			}
4427 		}
4428 		if (error == 0)
4429 			V_pf_altq_running = 0;
4430 		PF_RULES_WUNLOCK();
4431 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
4432 		break;
4433 	}
4434 
4435 	case DIOCADDALTQV0:
4436 	case DIOCADDALTQV1: {
4437 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4438 		struct pf_altq		*altq, *a;
4439 		struct ifnet		*ifp;
4440 
4441 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4442 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4443 		if (error)
4444 			goto fail;
4445 		altq->local_flags = 0;
4446 
4447 		PF_RULES_WLOCK();
4448 		if (pa->ticket != V_ticket_altqs_inactive) {
4449 			PF_RULES_WUNLOCK();
4450 			free(altq, M_PFALTQ);
4451 			error = EBUSY;
4452 			goto fail;
4453 		}
4454 
4455 		/*
4456 		 * if this is for a queue, find the discipline and
4457 		 * copy the necessary fields
4458 		 */
4459 		if (altq->qname[0] != 0) {
4460 			if ((altq->qid = pf_qname2qid(altq->qname, true)) == 0) {
4461 				PF_RULES_WUNLOCK();
4462 				error = EBUSY;
4463 				free(altq, M_PFALTQ);
4464 				goto fail;
4465 			}
4466 			altq->altq_disc = NULL;
4467 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4468 				if (strncmp(a->ifname, altq->ifname,
4469 				    IFNAMSIZ) == 0) {
4470 					altq->altq_disc = a->altq_disc;
4471 					break;
4472 				}
4473 			}
4474 		}
4475 
4476 		if ((ifp = ifunit(altq->ifname)) == NULL)
4477 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4478 		else
4479 			error = altq_add(ifp, altq);
4480 
4481 		if (error) {
4482 			PF_RULES_WUNLOCK();
4483 			free(altq, M_PFALTQ);
4484 			goto fail;
4485 		}
4486 
4487 		if (altq->qname[0] != 0)
4488 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4489 		else
4490 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4491 		/* version error check done on import above */
4492 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4493 		PF_RULES_WUNLOCK();
4494 		break;
4495 	}
4496 
4497 	case DIOCGETALTQSV0:
4498 	case DIOCGETALTQSV1: {
4499 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4500 		struct pf_altq		*altq;
4501 
4502 		PF_RULES_RLOCK();
4503 		pa->nr = 0;
4504 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4505 			pa->nr++;
4506 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4507 			pa->nr++;
4508 		pa->ticket = V_ticket_altqs_active;
4509 		PF_RULES_RUNLOCK();
4510 		break;
4511 	}
4512 
4513 	case DIOCGETALTQV0:
4514 	case DIOCGETALTQV1: {
4515 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4516 		struct pf_altq		*altq;
4517 
4518 		PF_RULES_RLOCK();
4519 		if (pa->ticket != V_ticket_altqs_active) {
4520 			PF_RULES_RUNLOCK();
4521 			error = EBUSY;
4522 			goto fail;
4523 		}
4524 		altq = pf_altq_get_nth_active(pa->nr);
4525 		if (altq == NULL) {
4526 			PF_RULES_RUNLOCK();
4527 			error = EBUSY;
4528 			goto fail;
4529 		}
4530 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4531 		PF_RULES_RUNLOCK();
4532 		break;
4533 	}
4534 
4535 	case DIOCCHANGEALTQV0:
4536 	case DIOCCHANGEALTQV1:
4537 		/* CHANGEALTQ not supported yet! */
4538 		error = ENODEV;
4539 		break;
4540 
4541 	case DIOCGETQSTATSV0:
4542 	case DIOCGETQSTATSV1: {
4543 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4544 		struct pf_altq		*altq;
4545 		int			 nbytes;
4546 		u_int32_t		 version;
4547 
4548 		PF_RULES_RLOCK();
4549 		if (pq->ticket != V_ticket_altqs_active) {
4550 			PF_RULES_RUNLOCK();
4551 			error = EBUSY;
4552 			goto fail;
4553 		}
4554 		nbytes = pq->nbytes;
4555 		altq = pf_altq_get_nth_active(pq->nr);
4556 		if (altq == NULL) {
4557 			PF_RULES_RUNLOCK();
4558 			error = EBUSY;
4559 			goto fail;
4560 		}
4561 
4562 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4563 			PF_RULES_RUNLOCK();
4564 			error = ENXIO;
4565 			goto fail;
4566 		}
4567 		PF_RULES_RUNLOCK();
4568 		if (cmd == DIOCGETQSTATSV0)
4569 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4570 		else
4571 			version = pq->version;
4572 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4573 		if (error == 0) {
4574 			pq->scheduler = altq->scheduler;
4575 			pq->nbytes = nbytes;
4576 		}
4577 		break;
4578 	}
4579 #endif /* ALTQ */
4580 
4581 	case DIOCBEGINADDRS: {
4582 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4583 
4584 		error = pf_ioctl_begin_addrs(&pp->ticket);
4585 		break;
4586 	}
4587 
4588 	case DIOCADDADDR: {
4589 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4590 		struct pf_nl_pooladdr npp = {};
4591 
4592 		npp.which = PF_RDR;
4593 		memcpy(&npp, pp, sizeof(*pp));
4594 		error = pf_ioctl_add_addr(&npp);
4595 		break;
4596 	}
4597 
4598 	case DIOCGETADDRS: {
4599 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4600 		struct pf_nl_pooladdr npp = {};
4601 
4602 		npp.which = PF_RDR;
4603 		memcpy(&npp, pp, sizeof(*pp));
4604 		error = pf_ioctl_get_addrs(&npp);
4605 		memcpy(pp, &npp, sizeof(*pp));
4606 
4607 		break;
4608 	}
4609 
4610 	case DIOCGETADDR: {
4611 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4612 		struct pf_nl_pooladdr npp = {};
4613 
4614 		npp.which = PF_RDR;
4615 		memcpy(&npp, pp, sizeof(*pp));
4616 		error = pf_ioctl_get_addr(&npp);
4617 		memcpy(pp, &npp, sizeof(*pp));
4618 
4619 		break;
4620 	}
4621 
4622 	case DIOCCHANGEADDR: {
4623 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4624 		struct pf_kpool		*pool;
4625 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4626 		struct pf_kruleset	*ruleset;
4627 		struct pfi_kkif		*kif = NULL;
4628 
4629 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
4630 
4631 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4632 		    pca->action > PF_CHANGE_REMOVE) {
4633 			error = EINVAL;
4634 			goto fail;
4635 		}
4636 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4637 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4638 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4639 			error = EINVAL;
4640 			goto fail;
4641 		}
4642 		if (pca->addr.addr.p.dyn != NULL) {
4643 			error = EINVAL;
4644 			goto fail;
4645 		}
4646 
4647 		if (pca->action != PF_CHANGE_REMOVE) {
4648 #ifndef INET
4649 			if (pca->af == AF_INET) {
4650 				error = EAFNOSUPPORT;
4651 				goto fail;
4652 			}
4653 #endif /* INET */
4654 #ifndef INET6
4655 			if (pca->af == AF_INET6) {
4656 				error = EAFNOSUPPORT;
4657 				goto fail;
4658 			}
4659 #endif /* INET6 */
4660 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4661 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4662 			if (newpa->ifname[0])
4663 				kif = pf_kkif_create(M_WAITOK);
4664 			newpa->kif = NULL;
4665 		}
4666 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4667 		PF_RULES_WLOCK();
4668 		ruleset = pf_find_kruleset(pca->anchor);
4669 		if (ruleset == NULL)
4670 			ERROUT(EBUSY);
4671 
4672 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4673 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4674 		if (pool == NULL)
4675 			ERROUT(EBUSY);
4676 
4677 		if (pca->action != PF_CHANGE_REMOVE) {
4678 			if (newpa->ifname[0]) {
4679 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4680 				pfi_kkif_ref(newpa->kif);
4681 				kif = NULL;
4682 			}
4683 
4684 			switch (newpa->addr.type) {
4685 			case PF_ADDR_DYNIFTL:
4686 				error = pfi_dynaddr_setup(&newpa->addr,
4687 				    pca->af);
4688 				break;
4689 			case PF_ADDR_TABLE:
4690 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4691 				    newpa->addr.v.tblname);
4692 				if (newpa->addr.p.tbl == NULL)
4693 					error = ENOMEM;
4694 				break;
4695 			}
4696 			if (error)
4697 				goto DIOCCHANGEADDR_error;
4698 		}
4699 
4700 		switch (pca->action) {
4701 		case PF_CHANGE_ADD_HEAD:
4702 			oldpa = TAILQ_FIRST(&pool->list);
4703 			break;
4704 		case PF_CHANGE_ADD_TAIL:
4705 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4706 			break;
4707 		default:
4708 			oldpa = TAILQ_FIRST(&pool->list);
4709 			for (int i = 0; oldpa && i < pca->nr; i++)
4710 				oldpa = TAILQ_NEXT(oldpa, entries);
4711 
4712 			if (oldpa == NULL)
4713 				ERROUT(EINVAL);
4714 		}
4715 
4716 		if (pca->action == PF_CHANGE_REMOVE) {
4717 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4718 			switch (oldpa->addr.type) {
4719 			case PF_ADDR_DYNIFTL:
4720 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4721 				break;
4722 			case PF_ADDR_TABLE:
4723 				pfr_detach_table(oldpa->addr.p.tbl);
4724 				break;
4725 			}
4726 			if (oldpa->kif)
4727 				pfi_kkif_unref(oldpa->kif);
4728 			free(oldpa, M_PFRULE);
4729 		} else {
4730 			if (oldpa == NULL)
4731 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4732 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4733 			    pca->action == PF_CHANGE_ADD_BEFORE)
4734 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4735 			else
4736 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4737 				    newpa, entries);
4738 		}
4739 
4740 		pool->cur = TAILQ_FIRST(&pool->list);
4741 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4742 		PF_RULES_WUNLOCK();
4743 		break;
4744 
4745 #undef ERROUT
4746 DIOCCHANGEADDR_error:
4747 		if (newpa != NULL) {
4748 			if (newpa->kif)
4749 				pfi_kkif_unref(newpa->kif);
4750 			free(newpa, M_PFRULE);
4751 		}
4752 		PF_RULES_WUNLOCK();
4753 		pf_kkif_free(kif);
4754 		break;
4755 	}
4756 
4757 	case DIOCGETRULESETS: {
4758 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4759 
4760 		pr->path[sizeof(pr->path) - 1] = '\0';
4761 
4762 		error = pf_ioctl_get_rulesets(pr);
4763 		break;
4764 	}
4765 
4766 	case DIOCGETRULESET: {
4767 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4768 
4769 		pr->path[sizeof(pr->path) - 1] = '\0';
4770 
4771 		error = pf_ioctl_get_ruleset(pr);
4772 		break;
4773 	}
4774 
4775 	case DIOCRCLRTABLES: {
4776 		struct pfioc_table *io = (struct pfioc_table *)addr;
4777 
4778 		if (io->pfrio_esize != 0) {
4779 			error = ENODEV;
4780 			goto fail;
4781 		}
4782 		if (strnlen(io->pfrio_table.pfrt_anchor, MAXPATHLEN)
4783 		    == MAXPATHLEN) {
4784 			error = EINVAL;
4785 			goto fail;
4786 		}
4787 		if (strnlen(io->pfrio_table.pfrt_name, PF_TABLE_NAME_SIZE)
4788 		    == PF_TABLE_NAME_SIZE) {
4789 			error = EINVAL;
4790 			goto fail;
4791 		}
4792 
4793 		PF_RULES_WLOCK();
4794 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4795 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4796 		PF_RULES_WUNLOCK();
4797 		break;
4798 	}
4799 
4800 	case DIOCRADDTABLES: {
4801 		struct pfioc_table *io = (struct pfioc_table *)addr;
4802 		struct pfr_table *pfrts;
4803 		size_t totlen;
4804 
4805 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4806 			error = ENODEV;
4807 			goto fail;
4808 		}
4809 
4810 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4811 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4812 			error = ENOMEM;
4813 			goto fail;
4814 		}
4815 
4816 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4817 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4818 		    M_PF, M_WAITOK);
4819 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4820 		if (error) {
4821 			free(pfrts, M_PF);
4822 			goto fail;
4823 		}
4824 		PF_RULES_WLOCK();
4825 		error = pfr_add_tables(pfrts, io->pfrio_size,
4826 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4827 		PF_RULES_WUNLOCK();
4828 		free(pfrts, M_PF);
4829 		break;
4830 	}
4831 
4832 	case DIOCRDELTABLES: {
4833 		struct pfioc_table *io = (struct pfioc_table *)addr;
4834 		struct pfr_table *pfrts;
4835 		size_t totlen;
4836 
4837 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4838 			error = ENODEV;
4839 			goto fail;
4840 		}
4841 
4842 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4843 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4844 			error = ENOMEM;
4845 			goto fail;
4846 		}
4847 
4848 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4849 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4850 		    M_PF, M_WAITOK);
4851 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4852 		if (error) {
4853 			free(pfrts, M_PF);
4854 			goto fail;
4855 		}
4856 		PF_RULES_WLOCK();
4857 		error = pfr_del_tables(pfrts, io->pfrio_size,
4858 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4859 		PF_RULES_WUNLOCK();
4860 		free(pfrts, M_PF);
4861 		break;
4862 	}
4863 
4864 	case DIOCRGETTABLES: {
4865 		struct pfioc_table *io = (struct pfioc_table *)addr;
4866 		struct pfr_table *pfrts;
4867 		size_t totlen;
4868 		int n;
4869 
4870 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4871 			error = ENODEV;
4872 			goto fail;
4873 		}
4874 		PF_RULES_RLOCK();
4875 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4876 		if (n < 0) {
4877 			PF_RULES_RUNLOCK();
4878 			error = EINVAL;
4879 			goto fail;
4880 		}
4881 		io->pfrio_size = min(io->pfrio_size, n);
4882 
4883 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4884 
4885 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4886 		    M_PF, M_NOWAIT | M_ZERO);
4887 		if (pfrts == NULL) {
4888 			error = ENOMEM;
4889 			PF_RULES_RUNLOCK();
4890 			goto fail;
4891 		}
4892 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4893 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4894 		PF_RULES_RUNLOCK();
4895 		if (error == 0)
4896 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4897 		free(pfrts, M_PF);
4898 		break;
4899 	}
4900 
4901 	case DIOCRGETTSTATS: {
4902 		struct pfioc_table *io = (struct pfioc_table *)addr;
4903 		struct pfr_tstats *pfrtstats;
4904 		size_t totlen;
4905 		int n;
4906 
4907 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4908 			error = ENODEV;
4909 			goto fail;
4910 		}
4911 		PF_TABLE_STATS_LOCK();
4912 		PF_RULES_RLOCK();
4913 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4914 		if (n < 0) {
4915 			PF_RULES_RUNLOCK();
4916 			PF_TABLE_STATS_UNLOCK();
4917 			error = EINVAL;
4918 			goto fail;
4919 		}
4920 		io->pfrio_size = min(io->pfrio_size, n);
4921 
4922 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4923 		pfrtstats = mallocarray(io->pfrio_size,
4924 		    sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
4925 		if (pfrtstats == NULL) {
4926 			error = ENOMEM;
4927 			PF_RULES_RUNLOCK();
4928 			PF_TABLE_STATS_UNLOCK();
4929 			goto fail;
4930 		}
4931 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4932 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4933 		PF_RULES_RUNLOCK();
4934 		PF_TABLE_STATS_UNLOCK();
4935 		if (error == 0)
4936 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4937 		free(pfrtstats, M_PF);
4938 		break;
4939 	}
4940 
4941 	case DIOCRCLRTSTATS: {
4942 		struct pfioc_table *io = (struct pfioc_table *)addr;
4943 		struct pfr_table *pfrts;
4944 		size_t totlen;
4945 
4946 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4947 			error = ENODEV;
4948 			goto fail;
4949 		}
4950 
4951 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4952 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4953 			/* We used to count tables and use the minimum required
4954 			 * size, so we didn't fail on overly large requests.
4955 			 * Keep doing so. */
4956 			io->pfrio_size = pf_ioctl_maxcount;
4957 			goto fail;
4958 		}
4959 
4960 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4961 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4962 		    M_PF, M_WAITOK);
4963 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4964 		if (error) {
4965 			free(pfrts, M_PF);
4966 			goto fail;
4967 		}
4968 
4969 		PF_TABLE_STATS_LOCK();
4970 		PF_RULES_RLOCK();
4971 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4972 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4973 		PF_RULES_RUNLOCK();
4974 		PF_TABLE_STATS_UNLOCK();
4975 		free(pfrts, M_PF);
4976 		break;
4977 	}
4978 
4979 	case DIOCRSETTFLAGS: {
4980 		struct pfioc_table *io = (struct pfioc_table *)addr;
4981 		struct pfr_table *pfrts;
4982 		size_t totlen;
4983 		int n;
4984 
4985 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4986 			error = ENODEV;
4987 			goto fail;
4988 		}
4989 
4990 		PF_RULES_RLOCK();
4991 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4992 		if (n < 0) {
4993 			PF_RULES_RUNLOCK();
4994 			error = EINVAL;
4995 			goto fail;
4996 		}
4997 
4998 		io->pfrio_size = min(io->pfrio_size, n);
4999 		PF_RULES_RUNLOCK();
5000 
5001 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5002 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5003 		    M_PF, M_WAITOK);
5004 		error = copyin(io->pfrio_buffer, pfrts, totlen);
5005 		if (error) {
5006 			free(pfrts, M_PF);
5007 			goto fail;
5008 		}
5009 		PF_RULES_WLOCK();
5010 		error = pfr_set_tflags(pfrts, io->pfrio_size,
5011 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
5012 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5013 		PF_RULES_WUNLOCK();
5014 		free(pfrts, M_PF);
5015 		break;
5016 	}
5017 
5018 	case DIOCRCLRADDRS: {
5019 		struct pfioc_table *io = (struct pfioc_table *)addr;
5020 
5021 		if (io->pfrio_esize != 0) {
5022 			error = ENODEV;
5023 			goto fail;
5024 		}
5025 		PF_RULES_WLOCK();
5026 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
5027 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
5028 		PF_RULES_WUNLOCK();
5029 		break;
5030 	}
5031 
5032 	case DIOCRADDADDRS: {
5033 		struct pfioc_table *io = (struct pfioc_table *)addr;
5034 		struct pfr_addr *pfras;
5035 		size_t totlen;
5036 
5037 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5038 			error = ENODEV;
5039 			goto fail;
5040 		}
5041 		if (io->pfrio_size < 0 ||
5042 		    io->pfrio_size > pf_ioctl_maxcount ||
5043 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5044 			error = EINVAL;
5045 			goto fail;
5046 		}
5047 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5048 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5049 		    M_PF, M_WAITOK);
5050 		error = copyin(io->pfrio_buffer, pfras, totlen);
5051 		if (error) {
5052 			free(pfras, M_PF);
5053 			goto fail;
5054 		}
5055 		PF_RULES_WLOCK();
5056 		io->pfrio_nadd = 0;
5057 		error = pfr_add_addrs(&io->pfrio_table, pfras,
5058 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
5059 		    PFR_FLAG_USERIOCTL);
5060 		PF_RULES_WUNLOCK();
5061 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5062 			error = copyout(pfras, io->pfrio_buffer, totlen);
5063 		free(pfras, M_PF);
5064 		break;
5065 	}
5066 
5067 	case DIOCRDELADDRS: {
5068 		struct pfioc_table *io = (struct pfioc_table *)addr;
5069 		struct pfr_addr *pfras;
5070 		size_t totlen;
5071 
5072 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5073 			error = ENODEV;
5074 			goto fail;
5075 		}
5076 		if (io->pfrio_size < 0 ||
5077 		    io->pfrio_size > pf_ioctl_maxcount ||
5078 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5079 			error = EINVAL;
5080 			goto fail;
5081 		}
5082 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5083 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5084 		    M_PF, M_WAITOK);
5085 		error = copyin(io->pfrio_buffer, pfras, totlen);
5086 		if (error) {
5087 			free(pfras, M_PF);
5088 			goto fail;
5089 		}
5090 		PF_RULES_WLOCK();
5091 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5092 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5093 		    PFR_FLAG_USERIOCTL);
5094 		PF_RULES_WUNLOCK();
5095 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5096 			error = copyout(pfras, io->pfrio_buffer, totlen);
5097 		free(pfras, M_PF);
5098 		break;
5099 	}
5100 
5101 	case DIOCRSETADDRS: {
5102 		struct pfioc_table *io = (struct pfioc_table *)addr;
5103 		struct pfr_addr *pfras;
5104 		size_t totlen, count;
5105 
5106 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5107 			error = ENODEV;
5108 			goto fail;
5109 		}
5110 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5111 			error = EINVAL;
5112 			goto fail;
5113 		}
5114 		count = max(io->pfrio_size, io->pfrio_size2);
5115 		if (count > pf_ioctl_maxcount ||
5116 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5117 			error = EINVAL;
5118 			goto fail;
5119 		}
5120 		totlen = count * sizeof(struct pfr_addr);
5121 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
5122 		    M_WAITOK);
5123 		error = copyin(io->pfrio_buffer, pfras, totlen);
5124 		if (error) {
5125 			free(pfras, M_PF);
5126 			goto fail;
5127 		}
5128 		PF_RULES_WLOCK();
5129 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5130 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5131 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5132 		    PFR_FLAG_START | PFR_FLAG_DONE | PFR_FLAG_USERIOCTL, 0);
5133 		PF_RULES_WUNLOCK();
5134 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5135 			error = copyout(pfras, io->pfrio_buffer, totlen);
5136 		free(pfras, M_PF);
5137 		break;
5138 	}
5139 
5140 	case DIOCRGETADDRS: {
5141 		struct pfioc_table *io = (struct pfioc_table *)addr;
5142 		struct pfr_addr *pfras;
5143 		size_t totlen;
5144 
5145 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5146 			error = ENODEV;
5147 			goto fail;
5148 		}
5149 		if (io->pfrio_size < 0 ||
5150 		    io->pfrio_size > pf_ioctl_maxcount ||
5151 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5152 			error = EINVAL;
5153 			goto fail;
5154 		}
5155 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5156 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5157 		    M_PF, M_WAITOK | M_ZERO);
5158 		PF_RULES_RLOCK();
5159 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5160 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5161 		PF_RULES_RUNLOCK();
5162 		if (error == 0)
5163 			error = copyout(pfras, io->pfrio_buffer, totlen);
5164 		free(pfras, M_PF);
5165 		break;
5166 	}
5167 
5168 	case DIOCRGETASTATS: {
5169 		struct pfioc_table *io = (struct pfioc_table *)addr;
5170 		struct pfr_astats *pfrastats;
5171 		size_t totlen;
5172 
5173 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5174 			error = ENODEV;
5175 			goto fail;
5176 		}
5177 		if (io->pfrio_size < 0 ||
5178 		    io->pfrio_size > pf_ioctl_maxcount ||
5179 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5180 			error = EINVAL;
5181 			goto fail;
5182 		}
5183 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5184 		pfrastats = mallocarray(io->pfrio_size,
5185 		    sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
5186 		PF_RULES_RLOCK();
5187 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5188 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5189 		PF_RULES_RUNLOCK();
5190 		if (error == 0)
5191 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5192 		free(pfrastats, M_PF);
5193 		break;
5194 	}
5195 
5196 	case DIOCRCLRASTATS: {
5197 		struct pfioc_table *io = (struct pfioc_table *)addr;
5198 		struct pfr_addr *pfras;
5199 		size_t totlen;
5200 
5201 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5202 			error = ENODEV;
5203 			goto fail;
5204 		}
5205 		if (io->pfrio_size < 0 ||
5206 		    io->pfrio_size > pf_ioctl_maxcount ||
5207 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5208 			error = EINVAL;
5209 			goto fail;
5210 		}
5211 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5212 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5213 		    M_PF, M_WAITOK);
5214 		error = copyin(io->pfrio_buffer, pfras, totlen);
5215 		if (error) {
5216 			free(pfras, M_PF);
5217 			goto fail;
5218 		}
5219 		PF_RULES_WLOCK();
5220 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5221 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5222 		    PFR_FLAG_USERIOCTL);
5223 		PF_RULES_WUNLOCK();
5224 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5225 			error = copyout(pfras, io->pfrio_buffer, totlen);
5226 		free(pfras, M_PF);
5227 		break;
5228 	}
5229 
5230 	case DIOCRTSTADDRS: {
5231 		struct pfioc_table *io = (struct pfioc_table *)addr;
5232 		struct pfr_addr *pfras;
5233 		size_t totlen;
5234 
5235 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5236 			error = ENODEV;
5237 			goto fail;
5238 		}
5239 		if (io->pfrio_size < 0 ||
5240 		    io->pfrio_size > pf_ioctl_maxcount ||
5241 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5242 			error = EINVAL;
5243 			goto fail;
5244 		}
5245 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5246 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5247 		    M_PF, M_WAITOK);
5248 		error = copyin(io->pfrio_buffer, pfras, totlen);
5249 		if (error) {
5250 			free(pfras, M_PF);
5251 			goto fail;
5252 		}
5253 		PF_RULES_RLOCK();
5254 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5255 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5256 		    PFR_FLAG_USERIOCTL);
5257 		PF_RULES_RUNLOCK();
5258 		if (error == 0)
5259 			error = copyout(pfras, io->pfrio_buffer, totlen);
5260 		free(pfras, M_PF);
5261 		break;
5262 	}
5263 
5264 	case DIOCRINADEFINE: {
5265 		struct pfioc_table *io = (struct pfioc_table *)addr;
5266 		struct pfr_addr *pfras;
5267 		size_t totlen;
5268 
5269 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5270 			error = ENODEV;
5271 			goto fail;
5272 		}
5273 		if (io->pfrio_size < 0 ||
5274 		    io->pfrio_size > pf_ioctl_maxcount ||
5275 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5276 			error = EINVAL;
5277 			goto fail;
5278 		}
5279 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5280 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5281 		    M_PF, M_WAITOK);
5282 		error = copyin(io->pfrio_buffer, pfras, totlen);
5283 		if (error) {
5284 			free(pfras, M_PF);
5285 			goto fail;
5286 		}
5287 		PF_RULES_WLOCK();
5288 		error = pfr_ina_define(&io->pfrio_table, pfras,
5289 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5290 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5291 		PF_RULES_WUNLOCK();
5292 		free(pfras, M_PF);
5293 		break;
5294 	}
5295 
5296 	case DIOCOSFPADD: {
5297 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5298 		PF_RULES_WLOCK();
5299 		error = pf_osfp_add(io);
5300 		PF_RULES_WUNLOCK();
5301 		break;
5302 	}
5303 
5304 	case DIOCOSFPGET: {
5305 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5306 		PF_RULES_RLOCK();
5307 		error = pf_osfp_get(io);
5308 		PF_RULES_RUNLOCK();
5309 		break;
5310 	}
5311 
5312 	case DIOCXBEGIN: {
5313 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5314 		struct pfioc_trans_e	*ioes, *ioe;
5315 		size_t			 totlen;
5316 		int			 i;
5317 
5318 		if (io->esize != sizeof(*ioe)) {
5319 			error = ENODEV;
5320 			goto fail;
5321 		}
5322 		if (io->size < 0 ||
5323 		    io->size > pf_ioctl_maxcount ||
5324 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5325 			error = EINVAL;
5326 			goto fail;
5327 		}
5328 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5329 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5330 		    M_PF, M_WAITOK);
5331 		error = copyin(io->array, ioes, totlen);
5332 		if (error) {
5333 			free(ioes, M_PF);
5334 			goto fail;
5335 		}
5336 		PF_RULES_WLOCK();
5337 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5338 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5339 			switch (ioe->rs_num) {
5340 			case PF_RULESET_ETH:
5341 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5342 					PF_RULES_WUNLOCK();
5343 					free(ioes, M_PF);
5344 					goto fail;
5345 				}
5346 				break;
5347 #ifdef ALTQ
5348 			case PF_RULESET_ALTQ:
5349 				if (ioe->anchor[0]) {
5350 					PF_RULES_WUNLOCK();
5351 					free(ioes, M_PF);
5352 					error = EINVAL;
5353 					goto fail;
5354 				}
5355 				if ((error = pf_begin_altq(&ioe->ticket))) {
5356 					PF_RULES_WUNLOCK();
5357 					free(ioes, M_PF);
5358 					goto fail;
5359 				}
5360 				break;
5361 #endif /* ALTQ */
5362 			case PF_RULESET_TABLE:
5363 			    {
5364 				struct pfr_table table;
5365 
5366 				bzero(&table, sizeof(table));
5367 				strlcpy(table.pfrt_anchor, ioe->anchor,
5368 				    sizeof(table.pfrt_anchor));
5369 				if ((error = pfr_ina_begin(&table,
5370 				    &ioe->ticket, NULL, 0))) {
5371 					PF_RULES_WUNLOCK();
5372 					free(ioes, M_PF);
5373 					goto fail;
5374 				}
5375 				break;
5376 			    }
5377 			default:
5378 				if ((error = pf_begin_rules(&ioe->ticket,
5379 				    ioe->rs_num, ioe->anchor))) {
5380 					PF_RULES_WUNLOCK();
5381 					free(ioes, M_PF);
5382 					goto fail;
5383 				}
5384 				break;
5385 			}
5386 		}
5387 		PF_RULES_WUNLOCK();
5388 		error = copyout(ioes, io->array, totlen);
5389 		free(ioes, M_PF);
5390 		break;
5391 	}
5392 
5393 	case DIOCXROLLBACK: {
5394 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5395 		struct pfioc_trans_e	*ioe, *ioes;
5396 		size_t			 totlen;
5397 		int			 i;
5398 
5399 		if (io->esize != sizeof(*ioe)) {
5400 			error = ENODEV;
5401 			goto fail;
5402 		}
5403 		if (io->size < 0 ||
5404 		    io->size > pf_ioctl_maxcount ||
5405 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5406 			error = EINVAL;
5407 			goto fail;
5408 		}
5409 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5410 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5411 		    M_PF, M_WAITOK);
5412 		error = copyin(io->array, ioes, totlen);
5413 		if (error) {
5414 			free(ioes, M_PF);
5415 			goto fail;
5416 		}
5417 		PF_RULES_WLOCK();
5418 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5419 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5420 			switch (ioe->rs_num) {
5421 			case PF_RULESET_ETH:
5422 				if ((error = pf_rollback_eth(ioe->ticket,
5423 				    ioe->anchor))) {
5424 					PF_RULES_WUNLOCK();
5425 					free(ioes, M_PF);
5426 					goto fail; /* really bad */
5427 				}
5428 				break;
5429 #ifdef ALTQ
5430 			case PF_RULESET_ALTQ:
5431 				if (ioe->anchor[0]) {
5432 					PF_RULES_WUNLOCK();
5433 					free(ioes, M_PF);
5434 					error = EINVAL;
5435 					goto fail;
5436 				}
5437 				if ((error = pf_rollback_altq(ioe->ticket))) {
5438 					PF_RULES_WUNLOCK();
5439 					free(ioes, M_PF);
5440 					goto fail; /* really bad */
5441 				}
5442 				break;
5443 #endif /* ALTQ */
5444 			case PF_RULESET_TABLE:
5445 			    {
5446 				struct pfr_table table;
5447 
5448 				bzero(&table, sizeof(table));
5449 				strlcpy(table.pfrt_anchor, ioe->anchor,
5450 				    sizeof(table.pfrt_anchor));
5451 				if ((error = pfr_ina_rollback(&table,
5452 				    ioe->ticket, NULL, 0))) {
5453 					PF_RULES_WUNLOCK();
5454 					free(ioes, M_PF);
5455 					goto fail; /* really bad */
5456 				}
5457 				break;
5458 			    }
5459 			default:
5460 				if ((error = pf_rollback_rules(ioe->ticket,
5461 				    ioe->rs_num, ioe->anchor))) {
5462 					PF_RULES_WUNLOCK();
5463 					free(ioes, M_PF);
5464 					goto fail; /* really bad */
5465 				}
5466 				break;
5467 			}
5468 		}
5469 		PF_RULES_WUNLOCK();
5470 		free(ioes, M_PF);
5471 		break;
5472 	}
5473 
5474 	case DIOCXCOMMIT: {
5475 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5476 		struct pfioc_trans_e	*ioe, *ioes;
5477 		struct pf_kruleset	*rs;
5478 		struct pf_keth_ruleset	*ers;
5479 		size_t			 totlen;
5480 		int			 i;
5481 
5482 		if (io->esize != sizeof(*ioe)) {
5483 			error = ENODEV;
5484 			goto fail;
5485 		}
5486 
5487 		if (io->size < 0 ||
5488 		    io->size > pf_ioctl_maxcount ||
5489 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5490 			error = EINVAL;
5491 			goto fail;
5492 		}
5493 
5494 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5495 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5496 		    M_PF, M_WAITOK);
5497 		error = copyin(io->array, ioes, totlen);
5498 		if (error) {
5499 			free(ioes, M_PF);
5500 			goto fail;
5501 		}
5502 		PF_RULES_WLOCK();
5503 		/* First makes sure everything will succeed. */
5504 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5505 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5506 			switch (ioe->rs_num) {
5507 			case PF_RULESET_ETH:
5508 				ers = pf_find_keth_ruleset(ioe->anchor);
5509 				if (ers == NULL || ioe->ticket == 0 ||
5510 				    ioe->ticket != ers->inactive.ticket) {
5511 					PF_RULES_WUNLOCK();
5512 					free(ioes, M_PF);
5513 					error = EINVAL;
5514 					goto fail;
5515 				}
5516 				break;
5517 #ifdef ALTQ
5518 			case PF_RULESET_ALTQ:
5519 				if (ioe->anchor[0]) {
5520 					PF_RULES_WUNLOCK();
5521 					free(ioes, M_PF);
5522 					error = EINVAL;
5523 					goto fail;
5524 				}
5525 				if (!V_altqs_inactive_open || ioe->ticket !=
5526 				    V_ticket_altqs_inactive) {
5527 					PF_RULES_WUNLOCK();
5528 					free(ioes, M_PF);
5529 					error = EBUSY;
5530 					goto fail;
5531 				}
5532 				break;
5533 #endif /* ALTQ */
5534 			case PF_RULESET_TABLE:
5535 				rs = pf_find_kruleset(ioe->anchor);
5536 				if (rs == NULL || !rs->topen || ioe->ticket !=
5537 				    rs->tticket) {
5538 					PF_RULES_WUNLOCK();
5539 					free(ioes, M_PF);
5540 					error = EBUSY;
5541 					goto fail;
5542 				}
5543 				break;
5544 			default:
5545 				if (ioe->rs_num < 0 || ioe->rs_num >=
5546 				    PF_RULESET_MAX) {
5547 					PF_RULES_WUNLOCK();
5548 					free(ioes, M_PF);
5549 					error = EINVAL;
5550 					goto fail;
5551 				}
5552 				rs = pf_find_kruleset(ioe->anchor);
5553 				if (rs == NULL ||
5554 				    !rs->rules[ioe->rs_num].inactive.open ||
5555 				    rs->rules[ioe->rs_num].inactive.ticket !=
5556 				    ioe->ticket) {
5557 					PF_RULES_WUNLOCK();
5558 					free(ioes, M_PF);
5559 					error = EBUSY;
5560 					goto fail;
5561 				}
5562 				break;
5563 			}
5564 		}
5565 		/* Now do the commit - no errors should happen here. */
5566 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5567 			switch (ioe->rs_num) {
5568 			case PF_RULESET_ETH:
5569 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5570 					PF_RULES_WUNLOCK();
5571 					free(ioes, M_PF);
5572 					goto fail; /* really bad */
5573 				}
5574 				break;
5575 #ifdef ALTQ
5576 			case PF_RULESET_ALTQ:
5577 				if ((error = pf_commit_altq(ioe->ticket))) {
5578 					PF_RULES_WUNLOCK();
5579 					free(ioes, M_PF);
5580 					goto fail; /* really bad */
5581 				}
5582 				break;
5583 #endif /* ALTQ */
5584 			case PF_RULESET_TABLE:
5585 			    {
5586 				struct pfr_table table;
5587 
5588 				bzero(&table, sizeof(table));
5589 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5590 				    sizeof(table.pfrt_anchor));
5591 				if ((error = pfr_ina_commit(&table,
5592 				    ioe->ticket, NULL, NULL, 0))) {
5593 					PF_RULES_WUNLOCK();
5594 					free(ioes, M_PF);
5595 					goto fail; /* really bad */
5596 				}
5597 				break;
5598 			    }
5599 			default:
5600 				if ((error = pf_commit_rules(ioe->ticket,
5601 				    ioe->rs_num, ioe->anchor))) {
5602 					PF_RULES_WUNLOCK();
5603 					free(ioes, M_PF);
5604 					goto fail; /* really bad */
5605 				}
5606 				break;
5607 			}
5608 		}
5609 		PF_RULES_WUNLOCK();
5610 
5611 		/* Only hook into EtherNet taffic if we've got rules for it. */
5612 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5613 			hook_pf_eth();
5614 		else
5615 			dehook_pf_eth();
5616 
5617 		free(ioes, M_PF);
5618 		break;
5619 	}
5620 
5621 	case DIOCGETSRCNODES: {
5622 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5623 		struct pf_srchash	*sh;
5624 		struct pf_ksrc_node	*n;
5625 		struct pf_src_node	*p, *pstore;
5626 		uint32_t		 i, nr = 0;
5627 
5628 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5629 				i++, sh++) {
5630 			PF_HASHROW_LOCK(sh);
5631 			LIST_FOREACH(n, &sh->nodes, entry)
5632 				nr++;
5633 			PF_HASHROW_UNLOCK(sh);
5634 		}
5635 
5636 		psn->psn_len = min(psn->psn_len,
5637 		    sizeof(struct pf_src_node) * nr);
5638 
5639 		if (psn->psn_len == 0) {
5640 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5641 			goto fail;
5642 		}
5643 
5644 		nr = 0;
5645 
5646 		p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
5647 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5648 		    i++, sh++) {
5649 		    PF_HASHROW_LOCK(sh);
5650 		    LIST_FOREACH(n, &sh->nodes, entry) {
5651 
5652 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5653 				break;
5654 
5655 			pf_src_node_copy(n, p);
5656 
5657 			p++;
5658 			nr++;
5659 		    }
5660 		    PF_HASHROW_UNLOCK(sh);
5661 		}
5662 		error = copyout(pstore, psn->psn_src_nodes,
5663 		    sizeof(struct pf_src_node) * nr);
5664 		if (error) {
5665 			free(pstore, M_PF);
5666 			goto fail;
5667 		}
5668 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5669 		free(pstore, M_PF);
5670 		break;
5671 	}
5672 
5673 	case DIOCCLRSRCNODES: {
5674 		pf_kill_srcnodes(NULL);
5675 		break;
5676 	}
5677 
5678 	case DIOCKILLSRCNODES:
5679 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5680 		break;
5681 
5682 #ifdef COMPAT_FREEBSD13
5683 	case DIOCKEEPCOUNTERS_FREEBSD13:
5684 #endif
5685 	case DIOCKEEPCOUNTERS:
5686 		error = pf_keepcounters((struct pfioc_nv *)addr);
5687 		break;
5688 
5689 	case DIOCGETSYNCOOKIES:
5690 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5691 		break;
5692 
5693 	case DIOCSETSYNCOOKIES:
5694 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5695 		break;
5696 
5697 	case DIOCSETHOSTID: {
5698 		u_int32_t	*hostid = (u_int32_t *)addr;
5699 
5700 		PF_RULES_WLOCK();
5701 		if (*hostid == 0)
5702 			V_pf_status.hostid = arc4random();
5703 		else
5704 			V_pf_status.hostid = *hostid;
5705 		PF_RULES_WUNLOCK();
5706 		break;
5707 	}
5708 
5709 	case DIOCOSFPFLUSH:
5710 		PF_RULES_WLOCK();
5711 		pf_osfp_flush();
5712 		PF_RULES_WUNLOCK();
5713 		break;
5714 
5715 	case DIOCIGETIFACES: {
5716 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5717 		struct pfi_kif *ifstore;
5718 		size_t bufsiz;
5719 
5720 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5721 			error = ENODEV;
5722 			goto fail;
5723 		}
5724 
5725 		if (io->pfiio_size < 0 ||
5726 		    io->pfiio_size > pf_ioctl_maxcount ||
5727 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5728 			error = EINVAL;
5729 			goto fail;
5730 		}
5731 
5732 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5733 
5734 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5735 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5736 		    M_PF, M_WAITOK | M_ZERO);
5737 
5738 		PF_RULES_RLOCK();
5739 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5740 		PF_RULES_RUNLOCK();
5741 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5742 		free(ifstore, M_PF);
5743 		break;
5744 	}
5745 
5746 	case DIOCSETIFFLAG: {
5747 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5748 
5749 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5750 
5751 		PF_RULES_WLOCK();
5752 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5753 		PF_RULES_WUNLOCK();
5754 		break;
5755 	}
5756 
5757 	case DIOCCLRIFFLAG: {
5758 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5759 
5760 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5761 
5762 		PF_RULES_WLOCK();
5763 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5764 		PF_RULES_WUNLOCK();
5765 		break;
5766 	}
5767 
5768 	case DIOCSETREASS: {
5769 		u_int32_t	*reass = (u_int32_t *)addr;
5770 
5771 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5772 		/* Removal of DF flag without reassembly enabled is not a
5773 		 * valid combination. Disable reassembly in such case. */
5774 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5775 			V_pf_status.reass = 0;
5776 		break;
5777 	}
5778 
5779 	default:
5780 		error = ENODEV;
5781 		break;
5782 	}
5783 fail:
5784 	CURVNET_RESTORE();
5785 
5786 #undef ERROUT_IOCTL
5787 
5788 	return (error);
5789 }
5790 
5791 static void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5792 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5793 {
5794 	const char	*tagname;
5795 
5796 	/* copy from state key */
5797 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5798 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5799 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5800 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5801 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5802 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5803 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5804 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5805 
5806 	/* copy from state */
5807 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5808 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5809 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5810 	sp->pfs_1301.expire = pf_state_expires(st);
5811 	if (sp->pfs_1301.expire <= time_uptime)
5812 		sp->pfs_1301.expire = htonl(0);
5813 	else
5814 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5815 
5816 	switch (msg_version) {
5817 		case PFSYNC_MSG_VERSION_1301:
5818 			sp->pfs_1301.state_flags = st->state_flags;
5819 			sp->pfs_1301.direction = st->direction;
5820 			sp->pfs_1301.log = st->act.log;
5821 			sp->pfs_1301.timeout = st->timeout;
5822 			sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5823 			sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5824 			/*
5825 			 * XXX Why do we bother pfsyncing source node information if source
5826 			 * nodes are not synced? Showing users that there is source tracking
5827 			 * when there is none seems useless.
5828 			 */
5829 			if (st->sns[PF_SN_LIMIT] != NULL)
5830 				sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5831 			if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5832 				sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5833 			break;
5834 		case PFSYNC_MSG_VERSION_1400:
5835 			sp->pfs_1400.state_flags = htons(st->state_flags);
5836 			sp->pfs_1400.direction = st->direction;
5837 			sp->pfs_1400.log = st->act.log;
5838 			sp->pfs_1400.timeout = st->timeout;
5839 			sp->pfs_1400.proto = st->key[PF_SK_WIRE]->proto;
5840 			sp->pfs_1400.af = st->key[PF_SK_WIRE]->af;
5841 			sp->pfs_1400.qid = htons(st->act.qid);
5842 			sp->pfs_1400.pqid = htons(st->act.pqid);
5843 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5844 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5845 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5846 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5847 			sp->pfs_1400.set_tos = st->act.set_tos;
5848 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5849 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5850 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5851 			sp->pfs_1400.rt = st->act.rt;
5852 			if (st->act.rt_kif)
5853 				strlcpy(sp->pfs_1400.rt_ifname,
5854 				    st->act.rt_kif->pfik_name,
5855 				    sizeof(sp->pfs_1400.rt_ifname));
5856 			/*
5857 			 * XXX Why do we bother pfsyncing source node information if source
5858 			 * nodes are not synced? Showing users that there is source tracking
5859 			 * when there is none seems useless.
5860 			 */
5861 			if (st->sns[PF_SN_LIMIT] != NULL)
5862 				sp->pfs_1400.sync_flags |= PFSYNC_FLAG_SRCNODE;
5863 			if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5864 				sp->pfs_1400.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5865 			break;
5866 		case PFSYNC_MSG_VERSION_1500:
5867 			sp->pfs_1500.state_flags = htons(st->state_flags);
5868 			sp->pfs_1500.direction = st->direction;
5869 			sp->pfs_1500.log = st->act.log;
5870 			sp->pfs_1500.timeout = st->timeout;
5871 			sp->pfs_1500.wire_proto = st->key[PF_SK_WIRE]->proto;
5872 			sp->pfs_1500.wire_af = st->key[PF_SK_WIRE]->af;
5873 			sp->pfs_1500.stack_proto = st->key[PF_SK_STACK]->proto;
5874 			sp->pfs_1500.stack_af = st->key[PF_SK_STACK]->af;
5875 			sp->pfs_1500.qid = htons(st->act.qid);
5876 			sp->pfs_1500.pqid = htons(st->act.pqid);
5877 			sp->pfs_1500.dnpipe = htons(st->act.dnpipe);
5878 			sp->pfs_1500.dnrpipe = htons(st->act.dnrpipe);
5879 			sp->pfs_1500.rtableid = htonl(st->act.rtableid);
5880 			sp->pfs_1500.min_ttl = st->act.min_ttl;
5881 			sp->pfs_1500.set_tos = st->act.set_tos;
5882 			sp->pfs_1500.max_mss = htons(st->act.max_mss);
5883 			sp->pfs_1500.set_prio[0] = st->act.set_prio[0];
5884 			sp->pfs_1500.set_prio[1] = st->act.set_prio[1];
5885 			sp->pfs_1500.rt = st->act.rt;
5886 			sp->pfs_1500.rt_af = st->act.rt_af;
5887 			if (st->act.rt_kif)
5888 				strlcpy(sp->pfs_1500.rt_ifname,
5889 				    st->act.rt_kif->pfik_name,
5890 				    sizeof(sp->pfs_1500.rt_ifname));
5891 			strlcpy(sp->pfs_1500.orig_ifname,
5892 			    st->orig_kif->pfik_name,
5893 			    sizeof(sp->pfs_1500.orig_ifname));
5894 			if ((tagname = pf_tag2tagname(st->tag)) != NULL)
5895 				strlcpy(sp->pfs_1500.tagname, tagname,
5896 				    sizeof(sp->pfs_1500.tagname));
5897 			break;
5898 		default:
5899 			panic("%s: Unsupported pfsync_msg_version %d",
5900 			    __func__, msg_version);
5901 	}
5902 
5903 	sp->pfs_1301.id = st->id;
5904 	sp->pfs_1301.creatorid = st->creatorid;
5905 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5906 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5907 
5908 	if (st->rule == NULL)
5909 		sp->pfs_1301.rule = htonl(-1);
5910 	else
5911 		sp->pfs_1301.rule = htonl(st->rule->nr);
5912 	if (st->anchor == NULL)
5913 		sp->pfs_1301.anchor = htonl(-1);
5914 	else
5915 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5916 	if (st->nat_rule == NULL)
5917 		sp->pfs_1301.nat_rule = htonl(-1);
5918 	else
5919 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5920 
5921 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5922 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5923 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5924 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5925 }
5926 
5927 void
pfsync_state_export_1301(struct pfsync_state_1301 * sp,struct pf_kstate * st)5928 pfsync_state_export_1301(struct pfsync_state_1301 *sp, struct pf_kstate *st)
5929 {
5930 	bzero(sp, sizeof(*sp));
5931 	pfsync_state_export((union pfsync_state_union *)sp, st,
5932 	    PFSYNC_MSG_VERSION_1301);
5933 }
5934 
5935 void
pfsync_state_export_1400(struct pfsync_state_1400 * sp,struct pf_kstate * st)5936 pfsync_state_export_1400(struct pfsync_state_1400 *sp, struct pf_kstate *st)
5937 {
5938 	bzero(sp, sizeof(*sp));
5939 	pfsync_state_export((union pfsync_state_union *)sp, st,
5940 	    PFSYNC_MSG_VERSION_1400);
5941 }
5942 
5943 void
pfsync_state_export_1500(struct pfsync_state_1500 * sp,struct pf_kstate * st)5944 pfsync_state_export_1500(struct pfsync_state_1500 *sp, struct pf_kstate *st)
5945 {
5946 	bzero(sp, sizeof(*sp));
5947 	pfsync_state_export((union pfsync_state_union *)sp, st,
5948 	    PFSYNC_MSG_VERSION_1500);
5949 }
5950 
5951 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5952 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5953 {
5954 	bzero(sp, sizeof(*sp));
5955 
5956 	sp->version = PF_STATE_VERSION;
5957 
5958 	/* copy from state key */
5959 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5960 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5961 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5962 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5963 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5964 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5965 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5966 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5967 	sp->proto = st->key[PF_SK_WIRE]->proto;
5968 	sp->af = st->key[PF_SK_WIRE]->af;
5969 
5970 	/* copy from state */
5971 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5972 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5973 	    sizeof(sp->orig_ifname));
5974 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5975 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5976 	sp->expire = pf_state_expires(st);
5977 	if (sp->expire <= time_uptime)
5978 		sp->expire = htonl(0);
5979 	else
5980 		sp->expire = htonl(sp->expire - time_uptime);
5981 
5982 	sp->direction = st->direction;
5983 	sp->log = st->act.log;
5984 	sp->timeout = st->timeout;
5985 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5986 	sp->state_flags_compat = st->state_flags;
5987 	sp->state_flags = htons(st->state_flags);
5988 	if (st->sns[PF_SN_LIMIT] != NULL)
5989 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5990 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5991 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5992 	sp->id = st->id;
5993 	sp->creatorid = st->creatorid;
5994 	pf_state_peer_hton(&st->src, &sp->src);
5995 	pf_state_peer_hton(&st->dst, &sp->dst);
5996 
5997 	if (st->rule == NULL)
5998 		sp->rule = htonl(-1);
5999 	else
6000 		sp->rule = htonl(st->rule->nr);
6001 	if (st->anchor == NULL)
6002 		sp->anchor = htonl(-1);
6003 	else
6004 		sp->anchor = htonl(st->anchor->nr);
6005 	if (st->nat_rule == NULL)
6006 		sp->nat_rule = htonl(-1);
6007 	else
6008 		sp->nat_rule = htonl(st->nat_rule->nr);
6009 
6010 	sp->packets[0] = st->packets[0];
6011 	sp->packets[1] = st->packets[1];
6012 	sp->bytes[0] = st->bytes[0];
6013 	sp->bytes[1] = st->bytes[1];
6014 
6015 	sp->qid = htons(st->act.qid);
6016 	sp->pqid = htons(st->act.pqid);
6017 	sp->dnpipe = htons(st->act.dnpipe);
6018 	sp->dnrpipe = htons(st->act.dnrpipe);
6019 	sp->rtableid = htonl(st->act.rtableid);
6020 	sp->min_ttl = st->act.min_ttl;
6021 	sp->set_tos = st->act.set_tos;
6022 	sp->max_mss = htons(st->act.max_mss);
6023 	sp->rt = st->act.rt;
6024 	if (st->act.rt_kif)
6025 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
6026 		    sizeof(sp->rt_ifname));
6027 	sp->set_prio[0] = st->act.set_prio[0];
6028 	sp->set_prio[1] = st->act.set_prio[1];
6029 
6030 }
6031 
6032 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)6033 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
6034 {
6035 	struct pfr_ktable *kt;
6036 
6037 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
6038 
6039 	kt = aw->p.tbl;
6040 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
6041 		kt = kt->pfrkt_root;
6042 	aw->p.tbl = NULL;
6043 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
6044 		kt->pfrkt_cnt : -1;
6045 }
6046 
6047 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)6048 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
6049     size_t number, char **names)
6050 {
6051 	nvlist_t        *nvc;
6052 
6053 	nvc = nvlist_create(0);
6054 	if (nvc == NULL)
6055 		return (ENOMEM);
6056 
6057 	for (int i = 0; i < number; i++) {
6058 		nvlist_append_number_array(nvc, "counters",
6059 		    counter_u64_fetch(counters[i]));
6060 		nvlist_append_string_array(nvc, "names",
6061 		    names[i]);
6062 		nvlist_append_number_array(nvc, "ids",
6063 		    i);
6064 	}
6065 	nvlist_add_nvlist(nvl, name, nvc);
6066 	nvlist_destroy(nvc);
6067 
6068 	return (0);
6069 }
6070 
6071 static int
pf_getstatus(struct pfioc_nv * nv)6072 pf_getstatus(struct pfioc_nv *nv)
6073 {
6074 	nvlist_t        *nvl = NULL, *nvc = NULL;
6075 	void            *nvlpacked = NULL;
6076 	int              error;
6077 	struct pf_status s;
6078 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
6079 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
6080 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
6081 	time_t since;
6082 
6083 	PF_RULES_RLOCK_TRACKER;
6084 
6085 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
6086 
6087 	PF_RULES_RLOCK();
6088 
6089 	nvl = nvlist_create(0);
6090 	if (nvl == NULL)
6091 		ERROUT(ENOMEM);
6092 
6093 	since = time_second - (time_uptime - V_pf_status.since);
6094 
6095 	nvlist_add_bool(nvl, "running", V_pf_status.running);
6096 	nvlist_add_number(nvl, "since", since);
6097 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
6098 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
6099 	nvlist_add_number(nvl, "states", V_pf_status.states);
6100 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
6101 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
6102 	nvlist_add_bool(nvl, "syncookies_active",
6103 	    V_pf_status.syncookies_active);
6104 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
6105 
6106 	/* counters */
6107 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
6108 	    PFRES_MAX, pf_reasons);
6109 	if (error != 0)
6110 		ERROUT(error);
6111 
6112 	/* lcounters */
6113 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
6114 	    KLCNT_MAX, pf_lcounter);
6115 	if (error != 0)
6116 		ERROUT(error);
6117 
6118 	/* fcounters */
6119 	nvc = nvlist_create(0);
6120 	if (nvc == NULL)
6121 		ERROUT(ENOMEM);
6122 
6123 	for (int i = 0; i < FCNT_MAX; i++) {
6124 		nvlist_append_number_array(nvc, "counters",
6125 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
6126 		nvlist_append_string_array(nvc, "names",
6127 		    pf_fcounter[i]);
6128 		nvlist_append_number_array(nvc, "ids",
6129 		    i);
6130 	}
6131 	nvlist_add_nvlist(nvl, "fcounters", nvc);
6132 	nvlist_destroy(nvc);
6133 	nvc = NULL;
6134 
6135 	/* scounters */
6136 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
6137 	    SCNT_MAX, pf_fcounter);
6138 	if (error != 0)
6139 		ERROUT(error);
6140 
6141 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
6142 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
6143 	    PF_MD5_DIGEST_LENGTH);
6144 
6145 	pfi_update_status(V_pf_status.ifname, &s);
6146 
6147 	/* pcounters / bcounters */
6148 	for (int i = 0; i < 2; i++) {
6149 		for (int j = 0; j < 2; j++) {
6150 			for (int k = 0; k < 2; k++) {
6151 				nvlist_append_number_array(nvl, "pcounters",
6152 				    s.pcounters[i][j][k]);
6153 			}
6154 			nvlist_append_number_array(nvl, "bcounters",
6155 			    s.bcounters[i][j]);
6156 		}
6157 	}
6158 
6159 	nvlpacked = nvlist_pack(nvl, &nv->len);
6160 	if (nvlpacked == NULL)
6161 		ERROUT(ENOMEM);
6162 
6163 	if (nv->size == 0)
6164 		ERROUT(0);
6165 	else if (nv->size < nv->len)
6166 		ERROUT(ENOSPC);
6167 
6168 	PF_RULES_RUNLOCK();
6169 	error = copyout(nvlpacked, nv->data, nv->len);
6170 	goto done;
6171 
6172 #undef ERROUT
6173 errout:
6174 	PF_RULES_RUNLOCK();
6175 done:
6176 	free(nvlpacked, M_NVLIST);
6177 	nvlist_destroy(nvc);
6178 	nvlist_destroy(nvl);
6179 
6180 	return (error);
6181 }
6182 
6183 /*
6184  * XXX - Check for version mismatch!!!
6185  */
6186 static void
pf_clear_all_states(void)6187 pf_clear_all_states(void)
6188 {
6189 	struct epoch_tracker	 et;
6190 	struct pf_kstate	*s;
6191 	u_int i;
6192 
6193 	NET_EPOCH_ENTER(et);
6194 	for (i = 0; i <= V_pf_hashmask; i++) {
6195 		struct pf_idhash *ih = &V_pf_idhash[i];
6196 relock:
6197 		PF_HASHROW_LOCK(ih);
6198 		LIST_FOREACH(s, &ih->states, entry) {
6199 			s->timeout = PFTM_PURGE;
6200 			/* Don't send out individual delete messages. */
6201 			s->state_flags |= PFSTATE_NOSYNC;
6202 			pf_remove_state(s);
6203 			goto relock;
6204 		}
6205 		PF_HASHROW_UNLOCK(ih);
6206 	}
6207 	NET_EPOCH_EXIT(et);
6208 }
6209 
6210 static int
pf_clear_tables(void)6211 pf_clear_tables(void)
6212 {
6213 	struct pfioc_table io;
6214 	int error;
6215 
6216 	bzero(&io, sizeof(io));
6217 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6218 
6219 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6220 	    io.pfrio_flags);
6221 
6222 	return (error);
6223 }
6224 
6225 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6226 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6227 {
6228 	struct pf_ksrc_node_list	 kill;
6229 	u_int 				 killed;
6230 
6231 	LIST_INIT(&kill);
6232 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6233 		struct pf_srchash *sh = &V_pf_srchash[i];
6234 		struct pf_ksrc_node *sn, *tmp;
6235 
6236 		PF_HASHROW_LOCK(sh);
6237 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6238 			if (psnk == NULL ||
6239 			    (pf_match_addr(psnk->psnk_src.neg,
6240 			      &psnk->psnk_src.addr.v.a.addr,
6241 			      &psnk->psnk_src.addr.v.a.mask,
6242 			      &sn->addr, sn->af) &&
6243 			    pf_match_addr(psnk->psnk_dst.neg,
6244 			      &psnk->psnk_dst.addr.v.a.addr,
6245 			      &psnk->psnk_dst.addr.v.a.mask,
6246 			      &sn->raddr, sn->af))) {
6247 				pf_unlink_src_node(sn);
6248 				LIST_INSERT_HEAD(&kill, sn, entry);
6249 				sn->expire = 1;
6250 			}
6251 		PF_HASHROW_UNLOCK(sh);
6252 	}
6253 
6254 	for (int i = 0; i <= V_pf_hashmask; i++) {
6255 		struct pf_idhash *ih = &V_pf_idhash[i];
6256 		struct pf_kstate *s;
6257 
6258 		PF_HASHROW_LOCK(ih);
6259 		LIST_FOREACH(s, &ih->states, entry) {
6260 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6261 			    sn_type++) {
6262 				if (s->sns[sn_type] &&
6263 				    s->sns[sn_type]->expire == 1) {
6264 					s->sns[sn_type] = NULL;
6265 				}
6266 			}
6267 		}
6268 		PF_HASHROW_UNLOCK(ih);
6269 	}
6270 
6271 	killed = pf_free_src_nodes(&kill);
6272 
6273 	if (psnk != NULL)
6274 		psnk->psnk_killed = killed;
6275 }
6276 
6277 static int
pf_keepcounters(struct pfioc_nv * nv)6278 pf_keepcounters(struct pfioc_nv *nv)
6279 {
6280 	nvlist_t	*nvl = NULL;
6281 	void		*nvlpacked = NULL;
6282 	int		 error = 0;
6283 
6284 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6285 
6286 	if (nv->len > pf_ioctl_maxcount)
6287 		ERROUT(ENOMEM);
6288 
6289 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6290 	error = copyin(nv->data, nvlpacked, nv->len);
6291 	if (error)
6292 		ERROUT(error);
6293 
6294 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6295 	if (nvl == NULL)
6296 		ERROUT(EBADMSG);
6297 
6298 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6299 		ERROUT(EBADMSG);
6300 
6301 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6302 
6303 on_error:
6304 	nvlist_destroy(nvl);
6305 	free(nvlpacked, M_NVLIST);
6306 	return (error);
6307 }
6308 
6309 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6310 pf_clear_states(const struct pf_kstate_kill *kill)
6311 {
6312 	struct pf_state_key_cmp	 match_key;
6313 	struct pf_kstate	*s;
6314 	struct pfi_kkif	*kif;
6315 	int		 idx;
6316 	unsigned int	 killed = 0, dir;
6317 
6318 	NET_EPOCH_ASSERT();
6319 
6320 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6321 		struct pf_idhash *ih = &V_pf_idhash[i];
6322 
6323 relock_DIOCCLRSTATES:
6324 		PF_HASHROW_LOCK(ih);
6325 		LIST_FOREACH(s, &ih->states, entry) {
6326 			/* For floating states look at the original kif. */
6327 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6328 
6329 			if (kill->psk_ifname[0] &&
6330 			    strcmp(kill->psk_ifname,
6331 			    kif->pfik_name))
6332 				continue;
6333 
6334 			if (kill->psk_kill_match) {
6335 				bzero(&match_key, sizeof(match_key));
6336 
6337 				if (s->direction == PF_OUT) {
6338 					dir = PF_IN;
6339 					idx = PF_SK_STACK;
6340 				} else {
6341 					dir = PF_OUT;
6342 					idx = PF_SK_WIRE;
6343 				}
6344 
6345 				match_key.af = s->key[idx]->af;
6346 				match_key.proto = s->key[idx]->proto;
6347 				pf_addrcpy(&match_key.addr[0],
6348 				    &s->key[idx]->addr[1], match_key.af);
6349 				match_key.port[0] = s->key[idx]->port[1];
6350 				pf_addrcpy(&match_key.addr[1],
6351 				    &s->key[idx]->addr[0], match_key.af);
6352 				match_key.port[1] = s->key[idx]->port[0];
6353 			}
6354 
6355 			/*
6356 			 * Don't send out individual
6357 			 * delete messages.
6358 			 */
6359 			s->state_flags |= PFSTATE_NOSYNC;
6360 			pf_remove_state(s);
6361 			killed++;
6362 
6363 			if (kill->psk_kill_match)
6364 				killed += pf_kill_matching_state(&match_key,
6365 				    dir);
6366 
6367 			goto relock_DIOCCLRSTATES;
6368 		}
6369 		PF_HASHROW_UNLOCK(ih);
6370 	}
6371 
6372 	if (V_pfsync_clear_states_ptr != NULL)
6373 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6374 
6375 	return (killed);
6376 }
6377 
6378 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6379 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6380 {
6381 	struct pf_kstate	*s;
6382 
6383 	NET_EPOCH_ASSERT();
6384 	if (kill->psk_pfcmp.id) {
6385 		if (kill->psk_pfcmp.creatorid == 0)
6386 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6387 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6388 		    kill->psk_pfcmp.creatorid))) {
6389 			pf_remove_state(s);
6390 			*killed = 1;
6391 		}
6392 		return;
6393 	}
6394 
6395 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6396 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6397 }
6398 
6399 static int
pf_killstates_nv(struct pfioc_nv * nv)6400 pf_killstates_nv(struct pfioc_nv *nv)
6401 {
6402 	struct pf_kstate_kill	 kill;
6403 	struct epoch_tracker	 et;
6404 	nvlist_t		*nvl = NULL;
6405 	void			*nvlpacked = NULL;
6406 	int			 error = 0;
6407 	unsigned int		 killed = 0;
6408 
6409 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6410 
6411 	if (nv->len > pf_ioctl_maxcount)
6412 		ERROUT(ENOMEM);
6413 
6414 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6415 	error = copyin(nv->data, nvlpacked, nv->len);
6416 	if (error)
6417 		ERROUT(error);
6418 
6419 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6420 	if (nvl == NULL)
6421 		ERROUT(EBADMSG);
6422 
6423 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6424 	if (error)
6425 		ERROUT(error);
6426 
6427 	NET_EPOCH_ENTER(et);
6428 	pf_killstates(&kill, &killed);
6429 	NET_EPOCH_EXIT(et);
6430 
6431 	free(nvlpacked, M_NVLIST);
6432 	nvlpacked = NULL;
6433 	nvlist_destroy(nvl);
6434 	nvl = nvlist_create(0);
6435 	if (nvl == NULL)
6436 		ERROUT(ENOMEM);
6437 
6438 	nvlist_add_number(nvl, "killed", killed);
6439 
6440 	nvlpacked = nvlist_pack(nvl, &nv->len);
6441 	if (nvlpacked == NULL)
6442 		ERROUT(ENOMEM);
6443 
6444 	if (nv->size == 0)
6445 		ERROUT(0);
6446 	else if (nv->size < nv->len)
6447 		ERROUT(ENOSPC);
6448 
6449 	error = copyout(nvlpacked, nv->data, nv->len);
6450 
6451 on_error:
6452 	nvlist_destroy(nvl);
6453 	free(nvlpacked, M_NVLIST);
6454 	return (error);
6455 }
6456 
6457 static int
pf_clearstates_nv(struct pfioc_nv * nv)6458 pf_clearstates_nv(struct pfioc_nv *nv)
6459 {
6460 	struct pf_kstate_kill	 kill;
6461 	struct epoch_tracker	 et;
6462 	nvlist_t		*nvl = NULL;
6463 	void			*nvlpacked = NULL;
6464 	int			 error = 0;
6465 	unsigned int		 killed;
6466 
6467 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6468 
6469 	if (nv->len > pf_ioctl_maxcount)
6470 		ERROUT(ENOMEM);
6471 
6472 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6473 	error = copyin(nv->data, nvlpacked, nv->len);
6474 	if (error)
6475 		ERROUT(error);
6476 
6477 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6478 	if (nvl == NULL)
6479 		ERROUT(EBADMSG);
6480 
6481 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6482 	if (error)
6483 		ERROUT(error);
6484 
6485 	NET_EPOCH_ENTER(et);
6486 	killed = pf_clear_states(&kill);
6487 	NET_EPOCH_EXIT(et);
6488 
6489 	free(nvlpacked, M_NVLIST);
6490 	nvlpacked = NULL;
6491 	nvlist_destroy(nvl);
6492 	nvl = nvlist_create(0);
6493 	if (nvl == NULL)
6494 		ERROUT(ENOMEM);
6495 
6496 	nvlist_add_number(nvl, "killed", killed);
6497 
6498 	nvlpacked = nvlist_pack(nvl, &nv->len);
6499 	if (nvlpacked == NULL)
6500 		ERROUT(ENOMEM);
6501 
6502 	if (nv->size == 0)
6503 		ERROUT(0);
6504 	else if (nv->size < nv->len)
6505 		ERROUT(ENOSPC);
6506 
6507 	error = copyout(nvlpacked, nv->data, nv->len);
6508 
6509 #undef ERROUT
6510 on_error:
6511 	nvlist_destroy(nvl);
6512 	free(nvlpacked, M_NVLIST);
6513 	return (error);
6514 }
6515 
6516 static int
pf_getstate(struct pfioc_nv * nv)6517 pf_getstate(struct pfioc_nv *nv)
6518 {
6519 	nvlist_t		*nvl = NULL, *nvls;
6520 	void			*nvlpacked = NULL;
6521 	struct pf_kstate	*s = NULL;
6522 	int			 error = 0;
6523 	uint64_t		 id, creatorid;
6524 
6525 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6526 
6527 	if (nv->len > pf_ioctl_maxcount)
6528 		ERROUT(ENOMEM);
6529 
6530 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6531 	error = copyin(nv->data, nvlpacked, nv->len);
6532 	if (error)
6533 		ERROUT(error);
6534 
6535 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6536 	if (nvl == NULL)
6537 		ERROUT(EBADMSG);
6538 
6539 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6540 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6541 
6542 	s = pf_find_state_byid(id, creatorid);
6543 	if (s == NULL)
6544 		ERROUT(ENOENT);
6545 
6546 	free(nvlpacked, M_NVLIST);
6547 	nvlpacked = NULL;
6548 	nvlist_destroy(nvl);
6549 	nvl = nvlist_create(0);
6550 	if (nvl == NULL)
6551 		ERROUT(ENOMEM);
6552 
6553 	nvls = pf_state_to_nvstate(s);
6554 	if (nvls == NULL)
6555 		ERROUT(ENOMEM);
6556 
6557 	nvlist_add_nvlist(nvl, "state", nvls);
6558 	nvlist_destroy(nvls);
6559 
6560 	nvlpacked = nvlist_pack(nvl, &nv->len);
6561 	if (nvlpacked == NULL)
6562 		ERROUT(ENOMEM);
6563 
6564 	if (nv->size == 0)
6565 		ERROUT(0);
6566 	else if (nv->size < nv->len)
6567 		ERROUT(ENOSPC);
6568 
6569 	error = copyout(nvlpacked, nv->data, nv->len);
6570 
6571 #undef ERROUT
6572 errout:
6573 	if (s != NULL)
6574 		PF_STATE_UNLOCK(s);
6575 	free(nvlpacked, M_NVLIST);
6576 	nvlist_destroy(nvl);
6577 	return (error);
6578 }
6579 
6580 /*
6581  * XXX - Check for version mismatch!!!
6582  */
6583 
6584 /*
6585  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6586  */
6587 static int
shutdown_pf(void)6588 shutdown_pf(void)
6589 {
6590 	int error = 0;
6591 	u_int32_t t[5];
6592 	char nn = '\0';
6593 	struct pf_kanchor *anchor, *tmp_anchor;
6594 	struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
6595 	int rs_num;
6596 
6597 	do {
6598 		/* Unlink rules of all user defined anchors */
6599 		RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
6600 		    tmp_anchor) {
6601 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6602 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6603 				    anchor->path)) != 0) {
6604 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
6605 					    "anchor.path=%s rs_num=%d",
6606 					    __func__, anchor->path, rs_num);
6607 					goto error;	/* XXX: rollback? */
6608 				}
6609 			}
6610 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6611 				error = pf_commit_rules(t[rs_num], rs_num,
6612 				    anchor->path);
6613 				MPASS(error == 0);
6614 			}
6615 		}
6616 
6617 		/* Unlink rules of all user defined ether anchors */
6618 		RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
6619 		    &V_pf_keth_anchors, tmp_eth_anchor) {
6620 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6621 			    != 0) {
6622 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
6623 				    "anchor.path=%s", __func__,
6624 				    eth_anchor->path);
6625 				goto error;
6626 			}
6627 			error = pf_commit_eth(t[0], eth_anchor->path);
6628 			MPASS(error == 0);
6629 		}
6630 
6631 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6632 		    != 0) {
6633 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
6634 			break;
6635 		}
6636 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6637 		    != 0) {
6638 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
6639 			break;		/* XXX: rollback? */
6640 		}
6641 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6642 		    != 0) {
6643 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
6644 			break;		/* XXX: rollback? */
6645 		}
6646 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6647 		    != 0) {
6648 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
6649 			break;		/* XXX: rollback? */
6650 		}
6651 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6652 		    != 0) {
6653 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
6654 			break;		/* XXX: rollback? */
6655 		}
6656 
6657 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6658 		MPASS(error == 0);
6659 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6660 		MPASS(error == 0);
6661 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6662 		MPASS(error == 0);
6663 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6664 		MPASS(error == 0);
6665 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6666 		MPASS(error == 0);
6667 
6668 		if ((error = pf_clear_tables()) != 0)
6669 			break;
6670 
6671 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6672 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
6673 			break;
6674 		}
6675 		error = pf_commit_eth(t[0], &nn);
6676 		MPASS(error == 0);
6677 
6678 #ifdef ALTQ
6679 		if ((error = pf_begin_altq(&t[0])) != 0) {
6680 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
6681 			break;
6682 		}
6683 		pf_commit_altq(t[0]);
6684 #endif
6685 
6686 		pf_clear_all_states();
6687 
6688 		pf_kill_srcnodes(NULL);
6689 
6690 		for (int i = 0; i < PF_RULESET_MAX; i++) {
6691 			pf_rule_tree_free(pf_main_ruleset.rules[i].active.tree);
6692 			pf_rule_tree_free(pf_main_ruleset.rules[i].inactive.tree);
6693 		}
6694 
6695 		/* status does not use malloced mem so no need to cleanup */
6696 		/* fingerprints and interfaces have their own cleanup code */
6697 	} while(0);
6698 
6699 error:
6700 	return (error);
6701 }
6702 
6703 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6704 pf_check_return(int chk, struct mbuf **m)
6705 {
6706 
6707 	switch (chk) {
6708 	case PF_PASS:
6709 		if (*m == NULL)
6710 			return (PFIL_CONSUMED);
6711 		else
6712 			return (PFIL_PASS);
6713 		break;
6714 	default:
6715 		if (*m != NULL) {
6716 			m_freem(*m);
6717 			*m = NULL;
6718 		}
6719 		return (PFIL_DROPPED);
6720 	}
6721 }
6722 
6723 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6724 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6725     void *ruleset __unused, struct inpcb *inp)
6726 {
6727 	int chk;
6728 
6729 	CURVNET_ASSERT_SET();
6730 
6731 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6732 
6733 	return (pf_check_return(chk, m));
6734 }
6735 
6736 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6737 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6738     void *ruleset __unused, struct inpcb *inp)
6739 {
6740 	int chk;
6741 
6742 	CURVNET_ASSERT_SET();
6743 
6744 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6745 
6746 	return (pf_check_return(chk, m));
6747 }
6748 
6749 #ifdef INET
6750 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6751 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6752     void *ruleset __unused, struct inpcb *inp)
6753 {
6754 	int chk;
6755 
6756 	CURVNET_ASSERT_SET();
6757 
6758 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6759 
6760 	return (pf_check_return(chk, m));
6761 }
6762 
6763 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6764 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6765     void *ruleset __unused,  struct inpcb *inp)
6766 {
6767 	int chk;
6768 
6769 	CURVNET_ASSERT_SET();
6770 
6771 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6772 
6773 	return (pf_check_return(chk, m));
6774 }
6775 #endif
6776 
6777 #ifdef INET6
6778 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6779 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6780     void *ruleset __unused,  struct inpcb *inp)
6781 {
6782 	int chk;
6783 
6784 	CURVNET_ASSERT_SET();
6785 
6786 	/*
6787 	 * In case of loopback traffic IPv6 uses the real interface in
6788 	 * order to support scoped addresses. In order to support stateful
6789 	 * filtering we have change this to lo0 as it is the case in IPv4.
6790 	 */
6791 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6792 	    m, inp, NULL);
6793 
6794 	return (pf_check_return(chk, m));
6795 }
6796 
6797 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6798 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6799     void *ruleset __unused,  struct inpcb *inp)
6800 {
6801 	int chk;
6802 
6803 	CURVNET_ASSERT_SET();
6804 
6805 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6806 
6807 	return (pf_check_return(chk, m));
6808 }
6809 #endif /* INET6 */
6810 
6811 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6812 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6813 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6814 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6815 
6816 #ifdef INET
6817 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6818 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6819 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6820 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6821 #endif
6822 #ifdef INET6
6823 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6824 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6825 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6826 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6827 #endif
6828 
6829 static void
hook_pf_eth(void)6830 hook_pf_eth(void)
6831 {
6832 	struct pfil_hook_args pha = {
6833 		.pa_version = PFIL_VERSION,
6834 		.pa_modname = "pf",
6835 		.pa_type = PFIL_TYPE_ETHERNET,
6836 	};
6837 	struct pfil_link_args pla = {
6838 		.pa_version = PFIL_VERSION,
6839 	};
6840 	int ret __diagused;
6841 
6842 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6843 		return;
6844 
6845 	pha.pa_mbuf_chk = pf_eth_check_in;
6846 	pha.pa_flags = PFIL_IN;
6847 	pha.pa_rulname = "eth-in";
6848 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6849 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6850 	pla.pa_head = V_link_pfil_head;
6851 	pla.pa_hook = V_pf_eth_in_hook;
6852 	ret = pfil_link(&pla);
6853 	MPASS(ret == 0);
6854 	pha.pa_mbuf_chk = pf_eth_check_out;
6855 	pha.pa_flags = PFIL_OUT;
6856 	pha.pa_rulname = "eth-out";
6857 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6858 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6859 	pla.pa_head = V_link_pfil_head;
6860 	pla.pa_hook = V_pf_eth_out_hook;
6861 	ret = pfil_link(&pla);
6862 	MPASS(ret == 0);
6863 
6864 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6865 }
6866 
6867 static void
hook_pf(void)6868 hook_pf(void)
6869 {
6870 	struct pfil_hook_args pha = {
6871 		.pa_version = PFIL_VERSION,
6872 		.pa_modname = "pf",
6873 	};
6874 	struct pfil_link_args pla = {
6875 		.pa_version = PFIL_VERSION,
6876 	};
6877 	int ret __diagused;
6878 
6879 	if (atomic_load_bool(&V_pf_pfil_hooked))
6880 		return;
6881 
6882 #ifdef INET
6883 	pha.pa_type = PFIL_TYPE_IP4;
6884 	pha.pa_mbuf_chk = pf_check_in;
6885 	pha.pa_flags = PFIL_IN;
6886 	pha.pa_rulname = "default-in";
6887 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6888 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6889 	pla.pa_head = V_inet_pfil_head;
6890 	pla.pa_hook = V_pf_ip4_in_hook;
6891 	ret = pfil_link(&pla);
6892 	MPASS(ret == 0);
6893 	pha.pa_mbuf_chk = pf_check_out;
6894 	pha.pa_flags = PFIL_OUT;
6895 	pha.pa_rulname = "default-out";
6896 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6897 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6898 	pla.pa_head = V_inet_pfil_head;
6899 	pla.pa_hook = V_pf_ip4_out_hook;
6900 	ret = pfil_link(&pla);
6901 	MPASS(ret == 0);
6902 	if (V_pf_filter_local) {
6903 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6904 		pla.pa_head = V_inet_local_pfil_head;
6905 		pla.pa_hook = V_pf_ip4_out_hook;
6906 		ret = pfil_link(&pla);
6907 		MPASS(ret == 0);
6908 	}
6909 #endif
6910 #ifdef INET6
6911 	pha.pa_type = PFIL_TYPE_IP6;
6912 	pha.pa_mbuf_chk = pf_check6_in;
6913 	pha.pa_flags = PFIL_IN;
6914 	pha.pa_rulname = "default-in6";
6915 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6916 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6917 	pla.pa_head = V_inet6_pfil_head;
6918 	pla.pa_hook = V_pf_ip6_in_hook;
6919 	ret = pfil_link(&pla);
6920 	MPASS(ret == 0);
6921 	pha.pa_mbuf_chk = pf_check6_out;
6922 	pha.pa_rulname = "default-out6";
6923 	pha.pa_flags = PFIL_OUT;
6924 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6925 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6926 	pla.pa_head = V_inet6_pfil_head;
6927 	pla.pa_hook = V_pf_ip6_out_hook;
6928 	ret = pfil_link(&pla);
6929 	MPASS(ret == 0);
6930 	if (V_pf_filter_local) {
6931 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6932 		pla.pa_head = V_inet6_local_pfil_head;
6933 		pla.pa_hook = V_pf_ip6_out_hook;
6934 		ret = pfil_link(&pla);
6935 		MPASS(ret == 0);
6936 	}
6937 #endif
6938 
6939 	atomic_store_bool(&V_pf_pfil_hooked, true);
6940 }
6941 
6942 static void
dehook_pf_eth(void)6943 dehook_pf_eth(void)
6944 {
6945 
6946 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6947 		return;
6948 
6949 	pfil_remove_hook(V_pf_eth_in_hook);
6950 	pfil_remove_hook(V_pf_eth_out_hook);
6951 
6952 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6953 }
6954 
6955 static void
dehook_pf(void)6956 dehook_pf(void)
6957 {
6958 
6959 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6960 		return;
6961 
6962 #ifdef INET
6963 	pfil_remove_hook(V_pf_ip4_in_hook);
6964 	pfil_remove_hook(V_pf_ip4_out_hook);
6965 #endif
6966 #ifdef INET6
6967 	pfil_remove_hook(V_pf_ip6_in_hook);
6968 	pfil_remove_hook(V_pf_ip6_out_hook);
6969 #endif
6970 
6971 	atomic_store_bool(&V_pf_pfil_hooked, false);
6972 }
6973 
6974 static void
pf_load_vnet(void)6975 pf_load_vnet(void)
6976 {
6977 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6978 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6979 
6980 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6981 	rm_init_flags(&V_pf_tags_lock, "pf tags and queues", RM_RECURSE);
6982 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6983 
6984 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6985 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6986 #ifdef ALTQ
6987 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6988 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6989 #endif
6990 
6991 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6992 
6993 	pfattach_vnet();
6994 	V_pf_vnet_active = 1;
6995 }
6996 
6997 static int
pf_load(void)6998 pf_load(void)
6999 {
7000 	int error;
7001 
7002 	sx_init(&pf_end_lock, "pf end thread");
7003 
7004 	pf_mtag_initialize();
7005 
7006 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
7007 	if (pf_dev == NULL)
7008 		return (ENOMEM);
7009 
7010 	pf_end_threads = 0;
7011 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
7012 	if (error != 0)
7013 		return (error);
7014 
7015 	pfi_initialize();
7016 
7017 	return (0);
7018 }
7019 
7020 static void
pf_unload_vnet(void)7021 pf_unload_vnet(void)
7022 {
7023 	int ret __diagused;
7024 
7025 	V_pf_vnet_active = 0;
7026 	V_pf_status.running = 0;
7027 	dehook_pf();
7028 	dehook_pf_eth();
7029 
7030 	PF_RULES_WLOCK();
7031 	pf_syncookies_cleanup();
7032 	shutdown_pf();
7033 	PF_RULES_WUNLOCK();
7034 
7035 	ret = swi_remove(V_pf_swi_cookie);
7036 	MPASS(ret == 0);
7037 	ret = intr_event_destroy(V_pf_swi_ie);
7038 	MPASS(ret == 0);
7039 
7040 	pf_unload_vnet_purge();
7041 
7042 	pf_normalize_cleanup();
7043 	PF_RULES_WLOCK();
7044 	pfi_cleanup_vnet();
7045 	PF_RULES_WUNLOCK();
7046 	pfr_cleanup();
7047 	pf_osfp_flush();
7048 	pf_cleanup();
7049 	if (IS_DEFAULT_VNET(curvnet))
7050 		pf_mtag_cleanup();
7051 
7052 	pf_cleanup_tagset(&V_pf_tags);
7053 #ifdef ALTQ
7054 	pf_cleanup_tagset(&V_pf_qids);
7055 #endif
7056 	uma_zdestroy(V_pf_tag_z);
7057 
7058 #ifdef PF_WANT_32_TO_64_COUNTER
7059 	PF_RULES_WLOCK();
7060 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
7061 
7062 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
7063 	MPASS(V_pf_allkifcount == 0);
7064 
7065 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
7066 	V_pf_allrulecount--;
7067 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
7068 
7069 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
7070 	MPASS(V_pf_allrulecount == 0);
7071 
7072 	PF_RULES_WUNLOCK();
7073 
7074 	free(V_pf_kifmarker, PFI_MTYPE);
7075 	free(V_pf_rulemarker, M_PFRULE);
7076 #endif
7077 
7078 	/* Free counters last as we updated them during shutdown. */
7079 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
7080 	for (int i = 0; i < 2; i++) {
7081 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
7082 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
7083 	}
7084 	counter_u64_free(V_pf_default_rule.states_cur);
7085 	counter_u64_free(V_pf_default_rule.states_tot);
7086 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
7087 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
7088 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
7089 
7090 	for (int i = 0; i < PFRES_MAX; i++)
7091 		counter_u64_free(V_pf_status.counters[i]);
7092 	for (int i = 0; i < KLCNT_MAX; i++)
7093 		counter_u64_free(V_pf_status.lcounters[i]);
7094 	for (int i = 0; i < FCNT_MAX; i++)
7095 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
7096 	for (int i = 0; i < SCNT_MAX; i++)
7097 		counter_u64_free(V_pf_status.scounters[i]);
7098 	for (int i = 0; i < NCNT_MAX; i++)
7099 		counter_u64_free(V_pf_status.ncounters[i]);
7100 
7101 	rm_destroy(&V_pf_rules_lock);
7102 	sx_destroy(&V_pf_ioctl_lock);
7103 }
7104 
7105 static void
pf_unload(void * dummy __unused)7106 pf_unload(void *dummy __unused)
7107 {
7108 
7109 	sx_xlock(&pf_end_lock);
7110 	pf_end_threads = 1;
7111 	while (pf_end_threads < 2) {
7112 		wakeup_one(pf_purge_thread);
7113 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
7114 	}
7115 	sx_xunlock(&pf_end_lock);
7116 
7117 	pf_nl_unregister();
7118 
7119 	if (pf_dev != NULL)
7120 		destroy_dev(pf_dev);
7121 
7122 	pfi_cleanup();
7123 
7124 	sx_destroy(&pf_end_lock);
7125 }
7126 
7127 static void
vnet_pf_init(void * unused __unused)7128 vnet_pf_init(void *unused __unused)
7129 {
7130 
7131 	pf_load_vnet();
7132 }
7133 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
7134     vnet_pf_init, NULL);
7135 
7136 static void
vnet_pf_uninit(const void * unused __unused)7137 vnet_pf_uninit(const void *unused __unused)
7138 {
7139 
7140 	pf_unload_vnet();
7141 }
7142 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
7143 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
7144     vnet_pf_uninit, NULL);
7145 
7146 static int
pf_modevent(module_t mod,int type,void * data)7147 pf_modevent(module_t mod, int type, void *data)
7148 {
7149 	int error = 0;
7150 
7151 	switch(type) {
7152 	case MOD_LOAD:
7153 		error = pf_load();
7154 		pf_nl_register();
7155 		break;
7156 	case MOD_UNLOAD:
7157 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7158 		 * the vnet_pf_uninit()s */
7159 		break;
7160 	default:
7161 		error = EINVAL;
7162 		break;
7163 	}
7164 
7165 	return (error);
7166 }
7167 
7168 static moduledata_t pf_mod = {
7169 	"pf",
7170 	pf_modevent,
7171 	0
7172 };
7173 
7174 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7175 MODULE_DEPEND(pf, netlink, 1, 1, 1);
7176 MODULE_DEPEND(pf, crypto, 1, 1, 1);
7177 MODULE_VERSION(pf, PF_MODVER);
7178