xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 9f0f30bc1f5f08d25243952bad3fdc6e13a75c2a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 
191 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
192 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
193 #endif
194 
195 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
196 #define V_pf_filter_local	VNET(pf_filter_local)
197 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
198     &VNET_NAME(pf_filter_local), false,
199     "Enable filtering for packets delivered to local network stack");
200 
201 #ifdef PF_DEFAULT_TO_DROP
202 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
203 #else
204 VNET_DEFINE_STATIC(bool, default_to_drop);
205 #endif
206 #define	V_default_to_drop VNET(default_to_drop)
207 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
208     &VNET_NAME(default_to_drop), false,
209     "Make the default rule drop all packets.");
210 
211 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
212 			    unsigned int);
213 static void		 pf_cleanup_tagset(struct pf_tagset *);
214 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
215 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
216 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
217 static u_int16_t	 pf_tagname2tag(const char *);
218 static void		 tag_unref(struct pf_tagset *, u_int16_t);
219 
220 struct cdev *pf_dev;
221 
222 /*
223  * XXX - These are new and need to be checked when moveing to a new version
224  */
225 static void		 pf_clear_all_states(void);
226 static int		 pf_killstates_row(struct pf_kstate_kill *,
227 			    struct pf_idhash *);
228 static int		 pf_killstates_nv(struct pfioc_nv *);
229 static int		 pf_clearstates_nv(struct pfioc_nv *);
230 static int		 pf_getstate(struct pfioc_nv *);
231 static int		 pf_getstatus(struct pfioc_nv *);
232 static int		 pf_clear_tables(void);
233 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
234 static int		 pf_keepcounters(struct pfioc_nv *);
235 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
236 
237 /*
238  * Wrapper functions for pfil(9) hooks
239  */
240 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
241     int flags, void *ruleset __unused, struct inpcb *inp);
242 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
243     int flags, void *ruleset __unused, struct inpcb *inp);
244 #ifdef INET
245 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
246     int flags, void *ruleset __unused, struct inpcb *inp);
247 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 #endif
250 #ifdef INET6
251 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
252     int flags, void *ruleset __unused, struct inpcb *inp);
253 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 #endif
256 
257 static void		hook_pf_eth(void);
258 static void		hook_pf(void);
259 static void		dehook_pf_eth(void);
260 static void		dehook_pf(void);
261 static int		shutdown_pf(void);
262 static int		pf_load(void);
263 static void		pf_unload(void);
264 
265 static struct cdevsw pf_cdevsw = {
266 	.d_ioctl =	pfioctl,
267 	.d_name =	PF_NAME,
268 	.d_version =	D_VERSION,
269 };
270 
271 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
272 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
273 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
274 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
275 
276 /*
277  * We need a flag that is neither hooked nor running to know when
278  * the VNET is "valid".  We primarily need this to control (global)
279  * external event, e.g., eventhandlers.
280  */
281 VNET_DEFINE(int, pf_vnet_active);
282 #define V_pf_vnet_active	VNET(pf_vnet_active)
283 
284 int pf_end_threads;
285 struct proc *pf_purge_proc;
286 
287 VNET_DEFINE(struct rmlock, pf_rules_lock);
288 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
289 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
290 struct sx			pf_end_lock;
291 
292 /* pfsync */
293 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
294 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
295 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
296 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
297 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
298 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
299 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
300 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
301 
302 /* pflog */
303 pflog_packet_t			*pflog_packet_ptr = NULL;
304 
305 /*
306  * Copy a user-provided string, returning an error if truncation would occur.
307  * Avoid scanning past "sz" bytes in the source string since there's no
308  * guarantee that it's nul-terminated.
309  */
310 static int
311 pf_user_strcpy(char *dst, const char *src, size_t sz)
312 {
313 	if (strnlen(src, sz) == sz)
314 		return (EINVAL);
315 	(void)strlcpy(dst, src, sz);
316 	return (0);
317 }
318 
319 static void
320 pfattach_vnet(void)
321 {
322 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
323 
324 	bzero(&V_pf_status, sizeof(V_pf_status));
325 
326 	pf_initialize();
327 	pfr_initialize();
328 	pfi_initialize_vnet();
329 	pf_normalize_init();
330 	pf_syncookies_init();
331 
332 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
333 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
334 
335 	RB_INIT(&V_pf_anchors);
336 	pf_init_kruleset(&pf_main_ruleset);
337 
338 	pf_init_keth(V_pf_keth);
339 
340 	/* default rule should never be garbage collected */
341 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
342 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
343 	V_pf_default_rule.nr = (uint32_t)-1;
344 	V_pf_default_rule.rtableid = -1;
345 
346 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
347 	for (int i = 0; i < 2; i++) {
348 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
349 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
350 	}
351 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
352 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
353 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
354 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
355 
356 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
357 	    M_WAITOK | M_ZERO);
358 
359 #ifdef PF_WANT_32_TO_64_COUNTER
360 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
361 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
362 	PF_RULES_WLOCK();
363 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
364 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
365 	V_pf_allrulecount++;
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
367 	PF_RULES_WUNLOCK();
368 #endif
369 
370 	/* initialize default timeouts */
371 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
372 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
373 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
374 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
375 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
376 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
377 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
378 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
379 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
380 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
381 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
382 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
383 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
384 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
385 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
386 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
387 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
389 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
390 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
391 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
392 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
393 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
394 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
395 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
396 
397 	V_pf_status.debug = PF_DEBUG_URGENT;
398 	/*
399 	 * XXX This is different than in OpenBSD where reassembly is enabled by
400 	 * defult. In FreeBSD we expect people to still use scrub rules and
401 	 * switch to the new syntax later. Only when they switch they must
402 	 * explicitly enable reassemle. We could change the default once the
403 	 * scrub rule functionality is hopefully removed some day in future.
404 	 */
405 	V_pf_status.reass = 0;
406 
407 	V_pf_pfil_hooked = false;
408 	V_pf_pfil_eth_hooked = false;
409 
410 	/* XXX do our best to avoid a conflict */
411 	V_pf_status.hostid = arc4random();
412 
413 	for (int i = 0; i < PFRES_MAX; i++)
414 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
415 	for (int i = 0; i < KLCNT_MAX; i++)
416 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < FCNT_MAX; i++)
418 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
419 	for (int i = 0; i < SCNT_MAX; i++)
420 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
421 
422 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
423 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
424 		/* XXXGL: leaked all above. */
425 		return;
426 }
427 
428 static struct pf_kpool *
429 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
430     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
431     u_int8_t check_ticket, int which)
432 {
433 	struct pf_kruleset	*ruleset;
434 	struct pf_krule		*rule;
435 	int			 rs_num;
436 
437 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
438 
439 	ruleset = pf_find_kruleset(anchor);
440 	if (ruleset == NULL)
441 		return (NULL);
442 	rs_num = pf_get_ruleset_number(rule_action);
443 	if (rs_num >= PF_RULESET_MAX)
444 		return (NULL);
445 	if (active) {
446 		if (check_ticket && ticket !=
447 		    ruleset->rules[rs_num].active.ticket)
448 			return (NULL);
449 		if (r_last)
450 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
451 			    pf_krulequeue);
452 		else
453 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
454 	} else {
455 		if (check_ticket && ticket !=
456 		    ruleset->rules[rs_num].inactive.ticket)
457 			return (NULL);
458 		if (r_last)
459 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
460 			    pf_krulequeue);
461 		else
462 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
463 	}
464 	if (!r_last) {
465 		while ((rule != NULL) && (rule->nr != rule_number))
466 			rule = TAILQ_NEXT(rule, entries);
467 	}
468 	if (rule == NULL)
469 		return (NULL);
470 
471 	switch (which) {
472 	case PF_RDR:
473 		return (&rule->rdr);
474 	case PF_NAT:
475 		return (&rule->nat);
476 	case PF_RT:
477 		return (&rule->route);
478 	default:
479 		panic("Unknow pool type %d", which);
480 	}
481 }
482 
483 static void
484 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
485 {
486 	struct pf_kpooladdr	*mv_pool_pa;
487 
488 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
489 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
490 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
491 	}
492 }
493 
494 static void
495 pf_empty_kpool(struct pf_kpalist *poola)
496 {
497 	struct pf_kpooladdr *pa;
498 
499 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
500 		switch (pa->addr.type) {
501 		case PF_ADDR_DYNIFTL:
502 			pfi_dynaddr_remove(pa->addr.p.dyn);
503 			break;
504 		case PF_ADDR_TABLE:
505 			/* XXX: this could be unfinished pooladdr on pabuf */
506 			if (pa->addr.p.tbl != NULL)
507 				pfr_detach_table(pa->addr.p.tbl);
508 			break;
509 		}
510 		if (pa->kif)
511 			pfi_kkif_unref(pa->kif);
512 		TAILQ_REMOVE(poola, pa, entries);
513 		free(pa, M_PFRULE);
514 	}
515 }
516 
517 static void
518 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
519 {
520 
521 	PF_RULES_WASSERT();
522 	PF_UNLNKDRULES_ASSERT();
523 
524 	TAILQ_REMOVE(rulequeue, rule, entries);
525 
526 	rule->rule_ref |= PFRULE_REFS;
527 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
528 }
529 
530 static void
531 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
532 {
533 
534 	PF_RULES_WASSERT();
535 
536 	PF_UNLNKDRULES_LOCK();
537 	pf_unlink_rule_locked(rulequeue, rule);
538 	PF_UNLNKDRULES_UNLOCK();
539 }
540 
541 static void
542 pf_free_eth_rule(struct pf_keth_rule *rule)
543 {
544 	PF_RULES_WASSERT();
545 
546 	if (rule == NULL)
547 		return;
548 
549 	if (rule->tag)
550 		tag_unref(&V_pf_tags, rule->tag);
551 	if (rule->match_tag)
552 		tag_unref(&V_pf_tags, rule->match_tag);
553 #ifdef ALTQ
554 	pf_qid_unref(rule->qid);
555 #endif
556 
557 	if (rule->bridge_to)
558 		pfi_kkif_unref(rule->bridge_to);
559 	if (rule->kif)
560 		pfi_kkif_unref(rule->kif);
561 
562 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
563 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
564 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
565 		pfr_detach_table(rule->ipdst.addr.p.tbl);
566 
567 	counter_u64_free(rule->evaluations);
568 	for (int i = 0; i < 2; i++) {
569 		counter_u64_free(rule->packets[i]);
570 		counter_u64_free(rule->bytes[i]);
571 	}
572 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
573 	pf_keth_anchor_remove(rule);
574 
575 	free(rule, M_PFRULE);
576 }
577 
578 void
579 pf_free_rule(struct pf_krule *rule)
580 {
581 
582 	PF_RULES_WASSERT();
583 	PF_CONFIG_ASSERT();
584 
585 	if (rule->tag)
586 		tag_unref(&V_pf_tags, rule->tag);
587 	if (rule->match_tag)
588 		tag_unref(&V_pf_tags, rule->match_tag);
589 #ifdef ALTQ
590 	if (rule->pqid != rule->qid)
591 		pf_qid_unref(rule->pqid);
592 	pf_qid_unref(rule->qid);
593 #endif
594 	switch (rule->src.addr.type) {
595 	case PF_ADDR_DYNIFTL:
596 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
597 		break;
598 	case PF_ADDR_TABLE:
599 		pfr_detach_table(rule->src.addr.p.tbl);
600 		break;
601 	}
602 	switch (rule->dst.addr.type) {
603 	case PF_ADDR_DYNIFTL:
604 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
605 		break;
606 	case PF_ADDR_TABLE:
607 		pfr_detach_table(rule->dst.addr.p.tbl);
608 		break;
609 	}
610 	if (rule->overload_tbl)
611 		pfr_detach_table(rule->overload_tbl);
612 	if (rule->kif)
613 		pfi_kkif_unref(rule->kif);
614 	if (rule->rcv_kif)
615 		pfi_kkif_unref(rule->rcv_kif);
616 	pf_remove_kanchor(rule);
617 	pf_empty_kpool(&rule->rdr.list);
618 	pf_empty_kpool(&rule->nat.list);
619 	pf_empty_kpool(&rule->route.list);
620 
621 	pf_krule_free(rule);
622 }
623 
624 static void
625 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
626     unsigned int default_size)
627 {
628 	unsigned int i;
629 	unsigned int hashsize;
630 
631 	if (*tunable_size == 0 || !powerof2(*tunable_size))
632 		*tunable_size = default_size;
633 
634 	hashsize = *tunable_size;
635 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
636 	    M_WAITOK);
637 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
638 	    M_WAITOK);
639 	ts->mask = hashsize - 1;
640 	ts->seed = arc4random();
641 	for (i = 0; i < hashsize; i++) {
642 		TAILQ_INIT(&ts->namehash[i]);
643 		TAILQ_INIT(&ts->taghash[i]);
644 	}
645 	BIT_FILL(TAGID_MAX, &ts->avail);
646 }
647 
648 static void
649 pf_cleanup_tagset(struct pf_tagset *ts)
650 {
651 	unsigned int i;
652 	unsigned int hashsize;
653 	struct pf_tagname *t, *tmp;
654 
655 	/*
656 	 * Only need to clean up one of the hashes as each tag is hashed
657 	 * into each table.
658 	 */
659 	hashsize = ts->mask + 1;
660 	for (i = 0; i < hashsize; i++)
661 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
662 			uma_zfree(V_pf_tag_z, t);
663 
664 	free(ts->namehash, M_PFHASH);
665 	free(ts->taghash, M_PFHASH);
666 }
667 
668 static uint16_t
669 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
670 {
671 	size_t len;
672 
673 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
674 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
675 }
676 
677 static uint16_t
678 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
679 {
680 
681 	return (tag & ts->mask);
682 }
683 
684 static u_int16_t
685 tagname2tag(struct pf_tagset *ts, const char *tagname)
686 {
687 	struct pf_tagname	*tag;
688 	u_int32_t		 index;
689 	u_int16_t		 new_tagid;
690 
691 	PF_RULES_WASSERT();
692 
693 	index = tagname2hashindex(ts, tagname);
694 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
695 		if (strcmp(tagname, tag->name) == 0) {
696 			tag->ref++;
697 			return (tag->tag);
698 		}
699 
700 	/*
701 	 * new entry
702 	 *
703 	 * to avoid fragmentation, we do a linear search from the beginning
704 	 * and take the first free slot we find.
705 	 */
706 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
707 	/*
708 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
709 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
710 	 * set.  It may also return a bit number greater than TAGID_MAX due
711 	 * to rounding of the number of bits in the vector up to a multiple
712 	 * of the vector word size at declaration/allocation time.
713 	 */
714 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
715 		return (0);
716 
717 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
718 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
719 
720 	/* allocate and fill new struct pf_tagname */
721 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
722 	if (tag == NULL)
723 		return (0);
724 	strlcpy(tag->name, tagname, sizeof(tag->name));
725 	tag->tag = new_tagid;
726 	tag->ref = 1;
727 
728 	/* Insert into namehash */
729 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
730 
731 	/* Insert into taghash */
732 	index = tag2hashindex(ts, new_tagid);
733 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
734 
735 	return (tag->tag);
736 }
737 
738 static void
739 tag_unref(struct pf_tagset *ts, u_int16_t tag)
740 {
741 	struct pf_tagname	*t;
742 	uint16_t		 index;
743 
744 	PF_RULES_WASSERT();
745 
746 	index = tag2hashindex(ts, tag);
747 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
748 		if (tag == t->tag) {
749 			if (--t->ref == 0) {
750 				TAILQ_REMOVE(&ts->taghash[index], t,
751 				    taghash_entries);
752 				index = tagname2hashindex(ts, t->name);
753 				TAILQ_REMOVE(&ts->namehash[index], t,
754 				    namehash_entries);
755 				/* Bits are 0-based for BIT_SET() */
756 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
757 				uma_zfree(V_pf_tag_z, t);
758 			}
759 			break;
760 		}
761 }
762 
763 static uint16_t
764 pf_tagname2tag(const char *tagname)
765 {
766 	return (tagname2tag(&V_pf_tags, tagname));
767 }
768 
769 static int
770 pf_begin_eth(uint32_t *ticket, const char *anchor)
771 {
772 	struct pf_keth_rule *rule, *tmp;
773 	struct pf_keth_ruleset *rs;
774 
775 	PF_RULES_WASSERT();
776 
777 	rs = pf_find_or_create_keth_ruleset(anchor);
778 	if (rs == NULL)
779 		return (EINVAL);
780 
781 	/* Purge old inactive rules. */
782 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
783 	    tmp) {
784 		TAILQ_REMOVE(rs->inactive.rules, rule,
785 		    entries);
786 		pf_free_eth_rule(rule);
787 	}
788 
789 	*ticket = ++rs->inactive.ticket;
790 	rs->inactive.open = 1;
791 
792 	return (0);
793 }
794 
795 static int
796 pf_rollback_eth(uint32_t ticket, const char *anchor)
797 {
798 	struct pf_keth_rule *rule, *tmp;
799 	struct pf_keth_ruleset *rs;
800 
801 	PF_RULES_WASSERT();
802 
803 	rs = pf_find_keth_ruleset(anchor);
804 	if (rs == NULL)
805 		return (EINVAL);
806 
807 	if (!rs->inactive.open ||
808 	    ticket != rs->inactive.ticket)
809 		return (0);
810 
811 	/* Purge old inactive rules. */
812 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
813 	    tmp) {
814 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
815 		pf_free_eth_rule(rule);
816 	}
817 
818 	rs->inactive.open = 0;
819 
820 	pf_remove_if_empty_keth_ruleset(rs);
821 
822 	return (0);
823 }
824 
825 #define	PF_SET_SKIP_STEPS(i)					\
826 	do {							\
827 		while (head[i] != cur) {			\
828 			head[i]->skip[i].ptr = cur;		\
829 			head[i] = TAILQ_NEXT(head[i], entries);	\
830 		}						\
831 	} while (0)
832 
833 static void
834 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
835 {
836 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
837 	int i;
838 
839 	cur = TAILQ_FIRST(rules);
840 	prev = cur;
841 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
842 		head[i] = cur;
843 	while (cur != NULL) {
844 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
845 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
846 		if (cur->direction != prev->direction)
847 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
848 		if (cur->proto != prev->proto)
849 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
850 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
852 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
854 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
855 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
856 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
857 		if (cur->ipdst.neg != prev->ipdst.neg ||
858 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
859 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
860 
861 		prev = cur;
862 		cur = TAILQ_NEXT(cur, entries);
863 	}
864 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
865 		PF_SET_SKIP_STEPS(i);
866 }
867 
868 static int
869 pf_commit_eth(uint32_t ticket, const char *anchor)
870 {
871 	struct pf_keth_ruleq *rules;
872 	struct pf_keth_ruleset *rs;
873 
874 	rs = pf_find_keth_ruleset(anchor);
875 	if (rs == NULL) {
876 		return (EINVAL);
877 	}
878 
879 	if (!rs->inactive.open ||
880 	    ticket != rs->inactive.ticket)
881 		return (EBUSY);
882 
883 	PF_RULES_WASSERT();
884 
885 	pf_eth_calc_skip_steps(rs->inactive.rules);
886 
887 	rules = rs->active.rules;
888 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
889 	rs->inactive.rules = rules;
890 	rs->inactive.ticket = rs->active.ticket;
891 
892 	return (pf_rollback_eth(rs->inactive.ticket,
893 	    rs->anchor ? rs->anchor->path : ""));
894 }
895 
896 #ifdef ALTQ
897 static uint16_t
898 pf_qname2qid(const char *qname)
899 {
900 	return (tagname2tag(&V_pf_qids, qname));
901 }
902 
903 static void
904 pf_qid_unref(uint16_t qid)
905 {
906 	tag_unref(&V_pf_qids, qid);
907 }
908 
909 static int
910 pf_begin_altq(u_int32_t *ticket)
911 {
912 	struct pf_altq	*altq, *tmp;
913 	int		 error = 0;
914 
915 	PF_RULES_WASSERT();
916 
917 	/* Purge the old altq lists */
918 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
919 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
920 			/* detach and destroy the discipline */
921 			error = altq_remove(altq);
922 		}
923 		free(altq, M_PFALTQ);
924 	}
925 	TAILQ_INIT(V_pf_altq_ifs_inactive);
926 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
927 		pf_qid_unref(altq->qid);
928 		free(altq, M_PFALTQ);
929 	}
930 	TAILQ_INIT(V_pf_altqs_inactive);
931 	if (error)
932 		return (error);
933 	*ticket = ++V_ticket_altqs_inactive;
934 	V_altqs_inactive_open = 1;
935 	return (0);
936 }
937 
938 static int
939 pf_rollback_altq(u_int32_t ticket)
940 {
941 	struct pf_altq	*altq, *tmp;
942 	int		 error = 0;
943 
944 	PF_RULES_WASSERT();
945 
946 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
947 		return (0);
948 	/* Purge the old altq lists */
949 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
950 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
951 			/* detach and destroy the discipline */
952 			error = altq_remove(altq);
953 		}
954 		free(altq, M_PFALTQ);
955 	}
956 	TAILQ_INIT(V_pf_altq_ifs_inactive);
957 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
958 		pf_qid_unref(altq->qid);
959 		free(altq, M_PFALTQ);
960 	}
961 	TAILQ_INIT(V_pf_altqs_inactive);
962 	V_altqs_inactive_open = 0;
963 	return (error);
964 }
965 
966 static int
967 pf_commit_altq(u_int32_t ticket)
968 {
969 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
970 	struct pf_altq		*altq, *tmp;
971 	int			 err, error = 0;
972 
973 	PF_RULES_WASSERT();
974 
975 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
976 		return (EBUSY);
977 
978 	/* swap altqs, keep the old. */
979 	old_altqs = V_pf_altqs_active;
980 	old_altq_ifs = V_pf_altq_ifs_active;
981 	V_pf_altqs_active = V_pf_altqs_inactive;
982 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
983 	V_pf_altqs_inactive = old_altqs;
984 	V_pf_altq_ifs_inactive = old_altq_ifs;
985 	V_ticket_altqs_active = V_ticket_altqs_inactive;
986 
987 	/* Attach new disciplines */
988 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
989 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
990 			/* attach the discipline */
991 			error = altq_pfattach(altq);
992 			if (error == 0 && V_pf_altq_running)
993 				error = pf_enable_altq(altq);
994 			if (error != 0)
995 				return (error);
996 		}
997 	}
998 
999 	/* Purge the old altq lists */
1000 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1001 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1002 			/* detach and destroy the discipline */
1003 			if (V_pf_altq_running)
1004 				error = pf_disable_altq(altq);
1005 			err = altq_pfdetach(altq);
1006 			if (err != 0 && error == 0)
1007 				error = err;
1008 			err = altq_remove(altq);
1009 			if (err != 0 && error == 0)
1010 				error = err;
1011 		}
1012 		free(altq, M_PFALTQ);
1013 	}
1014 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1015 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1016 		pf_qid_unref(altq->qid);
1017 		free(altq, M_PFALTQ);
1018 	}
1019 	TAILQ_INIT(V_pf_altqs_inactive);
1020 
1021 	V_altqs_inactive_open = 0;
1022 	return (error);
1023 }
1024 
1025 static int
1026 pf_enable_altq(struct pf_altq *altq)
1027 {
1028 	struct ifnet		*ifp;
1029 	struct tb_profile	 tb;
1030 	int			 error = 0;
1031 
1032 	if ((ifp = ifunit(altq->ifname)) == NULL)
1033 		return (EINVAL);
1034 
1035 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1036 		error = altq_enable(&ifp->if_snd);
1037 
1038 	/* set tokenbucket regulator */
1039 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1040 		tb.rate = altq->ifbandwidth;
1041 		tb.depth = altq->tbrsize;
1042 		error = tbr_set(&ifp->if_snd, &tb);
1043 	}
1044 
1045 	return (error);
1046 }
1047 
1048 static int
1049 pf_disable_altq(struct pf_altq *altq)
1050 {
1051 	struct ifnet		*ifp;
1052 	struct tb_profile	 tb;
1053 	int			 error;
1054 
1055 	if ((ifp = ifunit(altq->ifname)) == NULL)
1056 		return (EINVAL);
1057 
1058 	/*
1059 	 * when the discipline is no longer referenced, it was overridden
1060 	 * by a new one.  if so, just return.
1061 	 */
1062 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1063 		return (0);
1064 
1065 	error = altq_disable(&ifp->if_snd);
1066 
1067 	if (error == 0) {
1068 		/* clear tokenbucket regulator */
1069 		tb.rate = 0;
1070 		error = tbr_set(&ifp->if_snd, &tb);
1071 	}
1072 
1073 	return (error);
1074 }
1075 
1076 static int
1077 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1078     struct pf_altq *altq)
1079 {
1080 	struct ifnet	*ifp1;
1081 	int		 error = 0;
1082 
1083 	/* Deactivate the interface in question */
1084 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1085 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1086 	    (remove && ifp1 == ifp)) {
1087 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1088 	} else {
1089 		error = altq_add(ifp1, altq);
1090 
1091 		if (ticket != V_ticket_altqs_inactive)
1092 			error = EBUSY;
1093 
1094 		if (error)
1095 			free(altq, M_PFALTQ);
1096 	}
1097 
1098 	return (error);
1099 }
1100 
1101 void
1102 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1103 {
1104 	struct pf_altq	*a1, *a2, *a3;
1105 	u_int32_t	 ticket;
1106 	int		 error = 0;
1107 
1108 	/*
1109 	 * No need to re-evaluate the configuration for events on interfaces
1110 	 * that do not support ALTQ, as it's not possible for such
1111 	 * interfaces to be part of the configuration.
1112 	 */
1113 	if (!ALTQ_IS_READY(&ifp->if_snd))
1114 		return;
1115 
1116 	/* Interrupt userland queue modifications */
1117 	if (V_altqs_inactive_open)
1118 		pf_rollback_altq(V_ticket_altqs_inactive);
1119 
1120 	/* Start new altq ruleset */
1121 	if (pf_begin_altq(&ticket))
1122 		return;
1123 
1124 	/* Copy the current active set */
1125 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1126 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1127 		if (a2 == NULL) {
1128 			error = ENOMEM;
1129 			break;
1130 		}
1131 		bcopy(a1, a2, sizeof(struct pf_altq));
1132 
1133 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1134 		if (error)
1135 			break;
1136 
1137 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1138 	}
1139 	if (error)
1140 		goto out;
1141 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1142 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1143 		if (a2 == NULL) {
1144 			error = ENOMEM;
1145 			break;
1146 		}
1147 		bcopy(a1, a2, sizeof(struct pf_altq));
1148 
1149 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1150 			error = EBUSY;
1151 			free(a2, M_PFALTQ);
1152 			break;
1153 		}
1154 		a2->altq_disc = NULL;
1155 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1156 			if (strncmp(a3->ifname, a2->ifname,
1157 				IFNAMSIZ) == 0) {
1158 				a2->altq_disc = a3->altq_disc;
1159 				break;
1160 			}
1161 		}
1162 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1163 		if (error)
1164 			break;
1165 
1166 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1167 	}
1168 
1169 out:
1170 	if (error != 0)
1171 		pf_rollback_altq(ticket);
1172 	else
1173 		pf_commit_altq(ticket);
1174 }
1175 #endif /* ALTQ */
1176 
1177 static struct pf_krule_global *
1178 pf_rule_tree_alloc(int flags)
1179 {
1180 	struct pf_krule_global *tree;
1181 
1182 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1183 	if (tree == NULL)
1184 		return (NULL);
1185 	RB_INIT(tree);
1186 	return (tree);
1187 }
1188 
1189 static void
1190 pf_rule_tree_free(struct pf_krule_global *tree)
1191 {
1192 
1193 	free(tree, M_TEMP);
1194 }
1195 
1196 static int
1197 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1198 {
1199 	struct pf_krule_global *tree;
1200 	struct pf_kruleset	*rs;
1201 	struct pf_krule		*rule;
1202 
1203 	PF_RULES_WASSERT();
1204 
1205 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1206 		return (EINVAL);
1207 	tree = pf_rule_tree_alloc(M_NOWAIT);
1208 	if (tree == NULL)
1209 		return (ENOMEM);
1210 	rs = pf_find_or_create_kruleset(anchor);
1211 	if (rs == NULL) {
1212 		free(tree, M_TEMP);
1213 		return (EINVAL);
1214 	}
1215 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1216 	rs->rules[rs_num].inactive.tree = tree;
1217 
1218 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1219 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1220 		rs->rules[rs_num].inactive.rcount--;
1221 	}
1222 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1223 	rs->rules[rs_num].inactive.open = 1;
1224 	return (0);
1225 }
1226 
1227 static int
1228 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1229 {
1230 	struct pf_kruleset	*rs;
1231 	struct pf_krule		*rule;
1232 
1233 	PF_RULES_WASSERT();
1234 
1235 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1236 		return (EINVAL);
1237 	rs = pf_find_kruleset(anchor);
1238 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1239 	    rs->rules[rs_num].inactive.ticket != ticket)
1240 		return (0);
1241 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1242 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1243 		rs->rules[rs_num].inactive.rcount--;
1244 	}
1245 	rs->rules[rs_num].inactive.open = 0;
1246 	return (0);
1247 }
1248 
1249 #define PF_MD5_UPD(st, elm)						\
1250 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1251 
1252 #define PF_MD5_UPD_STR(st, elm)						\
1253 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1254 
1255 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1256 		(stor) = htonl((st)->elm);				\
1257 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1258 } while (0)
1259 
1260 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1261 		(stor) = htons((st)->elm);				\
1262 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1263 } while (0)
1264 
1265 static void
1266 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1267 {
1268 	PF_MD5_UPD(pfr, addr.type);
1269 	switch (pfr->addr.type) {
1270 		case PF_ADDR_DYNIFTL:
1271 			PF_MD5_UPD(pfr, addr.v.ifname);
1272 			PF_MD5_UPD(pfr, addr.iflags);
1273 			break;
1274 		case PF_ADDR_TABLE:
1275 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1276 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1277 				PF_MD5_UPD(pfr, addr.v.tblname);
1278 			break;
1279 		case PF_ADDR_ADDRMASK:
1280 			/* XXX ignore af? */
1281 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1282 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1283 			break;
1284 	}
1285 
1286 	PF_MD5_UPD(pfr, port[0]);
1287 	PF_MD5_UPD(pfr, port[1]);
1288 	PF_MD5_UPD(pfr, neg);
1289 	PF_MD5_UPD(pfr, port_op);
1290 }
1291 
1292 static void
1293 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1294 {
1295 	u_int16_t x;
1296 	u_int32_t y;
1297 
1298 	pf_hash_rule_addr(ctx, &rule->src);
1299 	pf_hash_rule_addr(ctx, &rule->dst);
1300 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1301 		PF_MD5_UPD_STR(rule, label[i]);
1302 	PF_MD5_UPD_STR(rule, ifname);
1303 	PF_MD5_UPD_STR(rule, rcv_ifname);
1304 	PF_MD5_UPD_STR(rule, match_tagname);
1305 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1306 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1307 	PF_MD5_UPD_HTONL(rule, prob, y);
1308 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1309 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1310 	PF_MD5_UPD(rule, uid.op);
1311 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1312 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1313 	PF_MD5_UPD(rule, gid.op);
1314 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1315 	PF_MD5_UPD(rule, action);
1316 	PF_MD5_UPD(rule, direction);
1317 	PF_MD5_UPD(rule, af);
1318 	PF_MD5_UPD(rule, quick);
1319 	PF_MD5_UPD(rule, ifnot);
1320 	PF_MD5_UPD(rule, rcvifnot);
1321 	PF_MD5_UPD(rule, match_tag_not);
1322 	PF_MD5_UPD(rule, natpass);
1323 	PF_MD5_UPD(rule, keep_state);
1324 	PF_MD5_UPD(rule, proto);
1325 	PF_MD5_UPD(rule, type);
1326 	PF_MD5_UPD(rule, code);
1327 	PF_MD5_UPD(rule, flags);
1328 	PF_MD5_UPD(rule, flagset);
1329 	PF_MD5_UPD(rule, allow_opts);
1330 	PF_MD5_UPD(rule, rt);
1331 	PF_MD5_UPD(rule, tos);
1332 	PF_MD5_UPD(rule, scrub_flags);
1333 	PF_MD5_UPD(rule, min_ttl);
1334 	PF_MD5_UPD(rule, set_tos);
1335 	if (rule->anchor != NULL)
1336 		PF_MD5_UPD_STR(rule, anchor->path);
1337 }
1338 
1339 static void
1340 pf_hash_rule(struct pf_krule *rule)
1341 {
1342 	MD5_CTX		ctx;
1343 
1344 	MD5Init(&ctx);
1345 	pf_hash_rule_rolling(&ctx, rule);
1346 	MD5Final(rule->md5sum, &ctx);
1347 }
1348 
1349 static int
1350 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1351 {
1352 
1353 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1354 }
1355 
1356 static int
1357 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1358 {
1359 	struct pf_kruleset	*rs;
1360 	struct pf_krule		*rule, *old_rule;
1361 	struct pf_krulequeue	*old_rules;
1362 	struct pf_krule_global  *old_tree;
1363 	int			 error;
1364 	u_int32_t		 old_rcount;
1365 
1366 	PF_RULES_WASSERT();
1367 
1368 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1369 		return (EINVAL);
1370 	rs = pf_find_kruleset(anchor);
1371 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1372 	    ticket != rs->rules[rs_num].inactive.ticket)
1373 		return (EBUSY);
1374 
1375 	/* Calculate checksum for the main ruleset */
1376 	if (rs == &pf_main_ruleset) {
1377 		error = pf_setup_pfsync_matching(rs);
1378 		if (error != 0)
1379 			return (error);
1380 	}
1381 
1382 	/* Swap rules, keep the old. */
1383 	old_rules = rs->rules[rs_num].active.ptr;
1384 	old_rcount = rs->rules[rs_num].active.rcount;
1385 	old_tree = rs->rules[rs_num].active.tree;
1386 
1387 	rs->rules[rs_num].active.ptr =
1388 	    rs->rules[rs_num].inactive.ptr;
1389 	rs->rules[rs_num].active.tree =
1390 	    rs->rules[rs_num].inactive.tree;
1391 	rs->rules[rs_num].active.rcount =
1392 	    rs->rules[rs_num].inactive.rcount;
1393 
1394 	/* Attempt to preserve counter information. */
1395 	if (V_pf_status.keep_counters && old_tree != NULL) {
1396 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1397 		    entries) {
1398 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1399 			if (old_rule == NULL) {
1400 				continue;
1401 			}
1402 			pf_counter_u64_critical_enter();
1403 			pf_counter_u64_rollup_protected(&rule->evaluations,
1404 			    pf_counter_u64_fetch(&old_rule->evaluations));
1405 			pf_counter_u64_rollup_protected(&rule->packets[0],
1406 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1407 			pf_counter_u64_rollup_protected(&rule->packets[1],
1408 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1409 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1410 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1411 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1412 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1413 			pf_counter_u64_critical_exit();
1414 		}
1415 	}
1416 
1417 	rs->rules[rs_num].inactive.ptr = old_rules;
1418 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1419 	rs->rules[rs_num].inactive.rcount = old_rcount;
1420 
1421 	rs->rules[rs_num].active.ticket =
1422 	    rs->rules[rs_num].inactive.ticket;
1423 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1424 
1425 	/* Purge the old rule list. */
1426 	PF_UNLNKDRULES_LOCK();
1427 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1428 		pf_unlink_rule_locked(old_rules, rule);
1429 	PF_UNLNKDRULES_UNLOCK();
1430 	rs->rules[rs_num].inactive.rcount = 0;
1431 	rs->rules[rs_num].inactive.open = 0;
1432 	pf_remove_if_empty_kruleset(rs);
1433 	free(old_tree, M_TEMP);
1434 
1435 	return (0);
1436 }
1437 
1438 static int
1439 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1440 {
1441 	MD5_CTX			 ctx;
1442 	struct pf_krule		*rule;
1443 	int			 rs_cnt;
1444 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1445 
1446 	MD5Init(&ctx);
1447 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1448 		/* XXX PF_RULESET_SCRUB as well? */
1449 		if (rs_cnt == PF_RULESET_SCRUB)
1450 			continue;
1451 
1452 		if (rs->rules[rs_cnt].inactive.rcount) {
1453 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1454 			    entries) {
1455 				pf_hash_rule_rolling(&ctx, rule);
1456 			}
1457 		}
1458 	}
1459 
1460 	MD5Final(digest, &ctx);
1461 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1462 	return (0);
1463 }
1464 
1465 static int
1466 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1467 {
1468 	int error = 0;
1469 
1470 	switch (addr->type) {
1471 	case PF_ADDR_TABLE:
1472 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1473 		if (addr->p.tbl == NULL)
1474 			error = ENOMEM;
1475 		break;
1476 	default:
1477 		error = EINVAL;
1478 	}
1479 
1480 	return (error);
1481 }
1482 
1483 static int
1484 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1485     sa_family_t af)
1486 {
1487 	int error = 0;
1488 
1489 	switch (addr->type) {
1490 	case PF_ADDR_TABLE:
1491 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1492 		if (addr->p.tbl == NULL)
1493 			error = ENOMEM;
1494 		break;
1495 	case PF_ADDR_DYNIFTL:
1496 		error = pfi_dynaddr_setup(addr, af);
1497 		break;
1498 	}
1499 
1500 	return (error);
1501 }
1502 
1503 void
1504 pf_addr_copyout(struct pf_addr_wrap *addr)
1505 {
1506 
1507 	switch (addr->type) {
1508 	case PF_ADDR_DYNIFTL:
1509 		pfi_dynaddr_copyout(addr);
1510 		break;
1511 	case PF_ADDR_TABLE:
1512 		pf_tbladdr_copyout(addr);
1513 		break;
1514 	}
1515 }
1516 
1517 static void
1518 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1519 {
1520 	int	secs = time_uptime;
1521 
1522 	bzero(out, sizeof(struct pf_src_node));
1523 
1524 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1525 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1526 
1527 	if (in->rule != NULL)
1528 		out->rule.nr = in->rule->nr;
1529 
1530 	for (int i = 0; i < 2; i++) {
1531 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1532 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1533 	}
1534 
1535 	out->states = in->states;
1536 	out->conn = in->conn;
1537 	out->af = in->af;
1538 	out->ruletype = in->ruletype;
1539 
1540 	out->creation = secs - in->creation;
1541 	if (out->expire > secs)
1542 		out->expire -= secs;
1543 	else
1544 		out->expire = 0;
1545 
1546 	/* Adjust the connection rate estimate. */
1547 	out->conn_rate.limit = in->conn_rate.limit;
1548 	out->conn_rate.seconds = in->conn_rate.seconds;
1549 	/* If there's no limit there's no counter_rate. */
1550 	if (in->conn_rate.cr != NULL)
1551 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1552 }
1553 
1554 #ifdef ALTQ
1555 /*
1556  * Handle export of struct pf_kaltq to user binaries that may be using any
1557  * version of struct pf_altq.
1558  */
1559 static int
1560 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1561 {
1562 	u_int32_t version;
1563 
1564 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1565 		version = 0;
1566 	else
1567 		version = pa->version;
1568 
1569 	if (version > PFIOC_ALTQ_VERSION)
1570 		return (EINVAL);
1571 
1572 #define ASSIGN(x) exported_q->x = q->x
1573 #define COPY(x) \
1574 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1575 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1576 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1577 
1578 	switch (version) {
1579 	case 0: {
1580 		struct pf_altq_v0 *exported_q =
1581 		    &((struct pfioc_altq_v0 *)pa)->altq;
1582 
1583 		COPY(ifname);
1584 
1585 		ASSIGN(scheduler);
1586 		ASSIGN(tbrsize);
1587 		exported_q->tbrsize = SATU16(q->tbrsize);
1588 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1589 
1590 		COPY(qname);
1591 		COPY(parent);
1592 		ASSIGN(parent_qid);
1593 		exported_q->bandwidth = SATU32(q->bandwidth);
1594 		ASSIGN(priority);
1595 		ASSIGN(local_flags);
1596 
1597 		ASSIGN(qlimit);
1598 		ASSIGN(flags);
1599 
1600 		if (q->scheduler == ALTQT_HFSC) {
1601 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1602 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1603 			    SATU32(q->pq_u.hfsc_opts.x)
1604 
1605 			ASSIGN_OPT_SATU32(rtsc_m1);
1606 			ASSIGN_OPT(rtsc_d);
1607 			ASSIGN_OPT_SATU32(rtsc_m2);
1608 
1609 			ASSIGN_OPT_SATU32(lssc_m1);
1610 			ASSIGN_OPT(lssc_d);
1611 			ASSIGN_OPT_SATU32(lssc_m2);
1612 
1613 			ASSIGN_OPT_SATU32(ulsc_m1);
1614 			ASSIGN_OPT(ulsc_d);
1615 			ASSIGN_OPT_SATU32(ulsc_m2);
1616 
1617 			ASSIGN_OPT(flags);
1618 
1619 #undef ASSIGN_OPT
1620 #undef ASSIGN_OPT_SATU32
1621 		} else
1622 			COPY(pq_u);
1623 
1624 		ASSIGN(qid);
1625 		break;
1626 	}
1627 	case 1:	{
1628 		struct pf_altq_v1 *exported_q =
1629 		    &((struct pfioc_altq_v1 *)pa)->altq;
1630 
1631 		COPY(ifname);
1632 
1633 		ASSIGN(scheduler);
1634 		ASSIGN(tbrsize);
1635 		ASSIGN(ifbandwidth);
1636 
1637 		COPY(qname);
1638 		COPY(parent);
1639 		ASSIGN(parent_qid);
1640 		ASSIGN(bandwidth);
1641 		ASSIGN(priority);
1642 		ASSIGN(local_flags);
1643 
1644 		ASSIGN(qlimit);
1645 		ASSIGN(flags);
1646 		COPY(pq_u);
1647 
1648 		ASSIGN(qid);
1649 		break;
1650 	}
1651 	default:
1652 		panic("%s: unhandled struct pfioc_altq version", __func__);
1653 		break;
1654 	}
1655 
1656 #undef ASSIGN
1657 #undef COPY
1658 #undef SATU16
1659 #undef SATU32
1660 
1661 	return (0);
1662 }
1663 
1664 /*
1665  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1666  * that may be using any version of it.
1667  */
1668 static int
1669 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1670 {
1671 	u_int32_t version;
1672 
1673 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1674 		version = 0;
1675 	else
1676 		version = pa->version;
1677 
1678 	if (version > PFIOC_ALTQ_VERSION)
1679 		return (EINVAL);
1680 
1681 #define ASSIGN(x) q->x = imported_q->x
1682 #define COPY(x) \
1683 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1684 
1685 	switch (version) {
1686 	case 0: {
1687 		struct pf_altq_v0 *imported_q =
1688 		    &((struct pfioc_altq_v0 *)pa)->altq;
1689 
1690 		COPY(ifname);
1691 
1692 		ASSIGN(scheduler);
1693 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1694 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1695 
1696 		COPY(qname);
1697 		COPY(parent);
1698 		ASSIGN(parent_qid);
1699 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1700 		ASSIGN(priority);
1701 		ASSIGN(local_flags);
1702 
1703 		ASSIGN(qlimit);
1704 		ASSIGN(flags);
1705 
1706 		if (imported_q->scheduler == ALTQT_HFSC) {
1707 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1708 
1709 			/*
1710 			 * The m1 and m2 parameters are being copied from
1711 			 * 32-bit to 64-bit.
1712 			 */
1713 			ASSIGN_OPT(rtsc_m1);
1714 			ASSIGN_OPT(rtsc_d);
1715 			ASSIGN_OPT(rtsc_m2);
1716 
1717 			ASSIGN_OPT(lssc_m1);
1718 			ASSIGN_OPT(lssc_d);
1719 			ASSIGN_OPT(lssc_m2);
1720 
1721 			ASSIGN_OPT(ulsc_m1);
1722 			ASSIGN_OPT(ulsc_d);
1723 			ASSIGN_OPT(ulsc_m2);
1724 
1725 			ASSIGN_OPT(flags);
1726 
1727 #undef ASSIGN_OPT
1728 		} else
1729 			COPY(pq_u);
1730 
1731 		ASSIGN(qid);
1732 		break;
1733 	}
1734 	case 1: {
1735 		struct pf_altq_v1 *imported_q =
1736 		    &((struct pfioc_altq_v1 *)pa)->altq;
1737 
1738 		COPY(ifname);
1739 
1740 		ASSIGN(scheduler);
1741 		ASSIGN(tbrsize);
1742 		ASSIGN(ifbandwidth);
1743 
1744 		COPY(qname);
1745 		COPY(parent);
1746 		ASSIGN(parent_qid);
1747 		ASSIGN(bandwidth);
1748 		ASSIGN(priority);
1749 		ASSIGN(local_flags);
1750 
1751 		ASSIGN(qlimit);
1752 		ASSIGN(flags);
1753 		COPY(pq_u);
1754 
1755 		ASSIGN(qid);
1756 		break;
1757 	}
1758 	default:
1759 		panic("%s: unhandled struct pfioc_altq version", __func__);
1760 		break;
1761 	}
1762 
1763 #undef ASSIGN
1764 #undef COPY
1765 
1766 	return (0);
1767 }
1768 
1769 static struct pf_altq *
1770 pf_altq_get_nth_active(u_int32_t n)
1771 {
1772 	struct pf_altq		*altq;
1773 	u_int32_t		 nr;
1774 
1775 	nr = 0;
1776 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1777 		if (nr == n)
1778 			return (altq);
1779 		nr++;
1780 	}
1781 
1782 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1783 		if (nr == n)
1784 			return (altq);
1785 		nr++;
1786 	}
1787 
1788 	return (NULL);
1789 }
1790 #endif /* ALTQ */
1791 
1792 struct pf_krule *
1793 pf_krule_alloc(void)
1794 {
1795 	struct pf_krule *rule;
1796 
1797 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1798 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1799 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1800 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1801 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1802 	    M_WAITOK | M_ZERO);
1803 	return (rule);
1804 }
1805 
1806 void
1807 pf_krule_free(struct pf_krule *rule)
1808 {
1809 #ifdef PF_WANT_32_TO_64_COUNTER
1810 	bool wowned;
1811 #endif
1812 
1813 	if (rule == NULL)
1814 		return;
1815 
1816 #ifdef PF_WANT_32_TO_64_COUNTER
1817 	if (rule->allrulelinked) {
1818 		wowned = PF_RULES_WOWNED();
1819 		if (!wowned)
1820 			PF_RULES_WLOCK();
1821 		LIST_REMOVE(rule, allrulelist);
1822 		V_pf_allrulecount--;
1823 		if (!wowned)
1824 			PF_RULES_WUNLOCK();
1825 	}
1826 #endif
1827 
1828 	pf_counter_u64_deinit(&rule->evaluations);
1829 	for (int i = 0; i < 2; i++) {
1830 		pf_counter_u64_deinit(&rule->packets[i]);
1831 		pf_counter_u64_deinit(&rule->bytes[i]);
1832 	}
1833 	counter_u64_free(rule->states_cur);
1834 	counter_u64_free(rule->states_tot);
1835 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1836 		counter_u64_free(rule->src_nodes[sn_type]);
1837 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1838 
1839 	mtx_destroy(&rule->nat.mtx);
1840 	mtx_destroy(&rule->rdr.mtx);
1841 	mtx_destroy(&rule->route.mtx);
1842 	free(rule, M_PFRULE);
1843 }
1844 
1845 void
1846 pf_krule_clear_counters(struct pf_krule *rule)
1847 {
1848 	pf_counter_u64_zero(&rule->evaluations);
1849 	for (int i = 0; i < 2; i++) {
1850 		pf_counter_u64_zero(&rule->packets[i]);
1851 		pf_counter_u64_zero(&rule->bytes[i]);
1852 	}
1853 	counter_u64_zero(rule->states_tot);
1854 }
1855 
1856 static void
1857 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1858     struct pf_pooladdr *pool)
1859 {
1860 
1861 	bzero(pool, sizeof(*pool));
1862 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1863 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1864 }
1865 
1866 static int
1867 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1868     struct pf_kpooladdr *kpool)
1869 {
1870 	int ret;
1871 
1872 	bzero(kpool, sizeof(*kpool));
1873 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1874 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1875 	    sizeof(kpool->ifname));
1876 	return (ret);
1877 }
1878 
1879 static void
1880 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1881 {
1882 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1883 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1884 
1885 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1886 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1887 
1888 	kpool->tblidx = pool->tblidx;
1889 	kpool->proxy_port[0] = pool->proxy_port[0];
1890 	kpool->proxy_port[1] = pool->proxy_port[1];
1891 	kpool->opts = pool->opts;
1892 }
1893 
1894 static int
1895 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1896 {
1897 	int ret;
1898 
1899 #ifndef INET
1900 	if (rule->af == AF_INET) {
1901 		return (EAFNOSUPPORT);
1902 	}
1903 #endif /* INET */
1904 #ifndef INET6
1905 	if (rule->af == AF_INET6) {
1906 		return (EAFNOSUPPORT);
1907 	}
1908 #endif /* INET6 */
1909 
1910 	ret = pf_check_rule_addr(&rule->src);
1911 	if (ret != 0)
1912 		return (ret);
1913 	ret = pf_check_rule_addr(&rule->dst);
1914 	if (ret != 0)
1915 		return (ret);
1916 
1917 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1918 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1919 
1920 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1921 	if (ret != 0)
1922 		return (ret);
1923 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1924 	if (ret != 0)
1925 		return (ret);
1926 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1927 	if (ret != 0)
1928 		return (ret);
1929 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1930 	if (ret != 0)
1931 		return (ret);
1932 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1933 	    sizeof(rule->tagname));
1934 	if (ret != 0)
1935 		return (ret);
1936 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1937 	    sizeof(rule->match_tagname));
1938 	if (ret != 0)
1939 		return (ret);
1940 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1941 	    sizeof(rule->overload_tblname));
1942 	if (ret != 0)
1943 		return (ret);
1944 
1945 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1946 
1947 	/* Don't allow userspace to set evaluations, packets or bytes. */
1948 	/* kif, anchor, overload_tbl are not copied over. */
1949 
1950 	krule->os_fingerprint = rule->os_fingerprint;
1951 
1952 	krule->rtableid = rule->rtableid;
1953 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1954 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1955 	krule->max_states = rule->max_states;
1956 	krule->max_src_nodes = rule->max_src_nodes;
1957 	krule->max_src_states = rule->max_src_states;
1958 	krule->max_src_conn = rule->max_src_conn;
1959 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1960 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1961 	krule->qid = rule->qid;
1962 	krule->pqid = rule->pqid;
1963 	krule->nr = rule->nr;
1964 	krule->prob = rule->prob;
1965 	krule->cuid = rule->cuid;
1966 	krule->cpid = rule->cpid;
1967 
1968 	krule->return_icmp = rule->return_icmp;
1969 	krule->return_icmp6 = rule->return_icmp6;
1970 	krule->max_mss = rule->max_mss;
1971 	krule->tag = rule->tag;
1972 	krule->match_tag = rule->match_tag;
1973 	krule->scrub_flags = rule->scrub_flags;
1974 
1975 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1976 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1977 
1978 	krule->rule_flag = rule->rule_flag;
1979 	krule->action = rule->action;
1980 	krule->direction = rule->direction;
1981 	krule->log = rule->log;
1982 	krule->logif = rule->logif;
1983 	krule->quick = rule->quick;
1984 	krule->ifnot = rule->ifnot;
1985 	krule->match_tag_not = rule->match_tag_not;
1986 	krule->natpass = rule->natpass;
1987 
1988 	krule->keep_state = rule->keep_state;
1989 	krule->af = rule->af;
1990 	krule->proto = rule->proto;
1991 	krule->type = rule->type;
1992 	krule->code = rule->code;
1993 	krule->flags = rule->flags;
1994 	krule->flagset = rule->flagset;
1995 	krule->min_ttl = rule->min_ttl;
1996 	krule->allow_opts = rule->allow_opts;
1997 	krule->rt = rule->rt;
1998 	krule->return_ttl = rule->return_ttl;
1999 	krule->tos = rule->tos;
2000 	krule->set_tos = rule->set_tos;
2001 
2002 	krule->flush = rule->flush;
2003 	krule->prio = rule->prio;
2004 	krule->set_prio[0] = rule->set_prio[0];
2005 	krule->set_prio[1] = rule->set_prio[1];
2006 
2007 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2008 
2009 	return (0);
2010 }
2011 
2012 int
2013 pf_ioctl_getrules(struct pfioc_rule *pr)
2014 {
2015 	struct pf_kruleset	*ruleset;
2016 	struct pf_krule		*tail;
2017 	int			 rs_num;
2018 
2019 	PF_RULES_WLOCK();
2020 	ruleset = pf_find_kruleset(pr->anchor);
2021 	if (ruleset == NULL) {
2022 		PF_RULES_WUNLOCK();
2023 		return (EINVAL);
2024 	}
2025 	rs_num = pf_get_ruleset_number(pr->rule.action);
2026 	if (rs_num >= PF_RULESET_MAX) {
2027 		PF_RULES_WUNLOCK();
2028 		return (EINVAL);
2029 	}
2030 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2031 	    pf_krulequeue);
2032 	if (tail)
2033 		pr->nr = tail->nr + 1;
2034 	else
2035 		pr->nr = 0;
2036 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2037 	PF_RULES_WUNLOCK();
2038 
2039 	return (0);
2040 }
2041 
2042 static int
2043 pf_rule_checkaf(struct pf_krule *r)
2044 {
2045 	switch (r->af) {
2046 	case 0:
2047 		if (r->rule_flag & PFRULE_AFTO)
2048 			return (EPFNOSUPPORT);
2049 		break;
2050 	case AF_INET:
2051 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2052 			return (EPFNOSUPPORT);
2053 		break;
2054 #ifdef INET6
2055 	case AF_INET6:
2056 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2057 			return (EPFNOSUPPORT);
2058 		break;
2059 #endif /* INET6 */
2060 	default:
2061 		return (EPFNOSUPPORT);
2062 	}
2063 
2064 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2065 		return (EPFNOSUPPORT);
2066 
2067 	return (0);
2068 }
2069 
2070 static int
2071 pf_validate_range(uint8_t op, uint16_t port[2])
2072 {
2073 	uint16_t a = ntohs(port[0]);
2074 	uint16_t b = ntohs(port[1]);
2075 
2076 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
2077 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
2078 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
2079 		return 1;
2080 	return 0;
2081 }
2082 
2083 int
2084 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2085     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2086     uid_t uid, pid_t pid)
2087 {
2088 	struct pf_kruleset	*ruleset;
2089 	struct pf_krule		*tail;
2090 	struct pf_kpooladdr	*pa;
2091 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2092 	int			 rs_num;
2093 	int			 error = 0;
2094 
2095 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
2096 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
2097 
2098 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
2099 		ERROUT_UNLOCKED(EINVAL);
2100 
2101 	if ((error = pf_rule_checkaf(rule)))
2102 		ERROUT_UNLOCKED(error);
2103 	if (pf_validate_range(rule->src.port_op, rule->src.port))
2104 		ERROUT_UNLOCKED(EINVAL);
2105 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
2106 		ERROUT_UNLOCKED(EINVAL);
2107 
2108 	if (rule->ifname[0])
2109 		kif = pf_kkif_create(M_WAITOK);
2110 	if (rule->rcv_ifname[0])
2111 		rcv_kif = pf_kkif_create(M_WAITOK);
2112 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2113 	for (int i = 0; i < 2; i++) {
2114 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2115 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2116 	}
2117 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2118 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2119 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2120 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2121 	rule->cuid = uid;
2122 	rule->cpid = pid;
2123 	TAILQ_INIT(&rule->rdr.list);
2124 	TAILQ_INIT(&rule->nat.list);
2125 	TAILQ_INIT(&rule->route.list);
2126 
2127 	PF_CONFIG_LOCK();
2128 	PF_RULES_WLOCK();
2129 #ifdef PF_WANT_32_TO_64_COUNTER
2130 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2131 	MPASS(!rule->allrulelinked);
2132 	rule->allrulelinked = true;
2133 	V_pf_allrulecount++;
2134 #endif
2135 	ruleset = pf_find_kruleset(anchor);
2136 	if (ruleset == NULL)
2137 		ERROUT(EINVAL);
2138 	rs_num = pf_get_ruleset_number(rule->action);
2139 	if (rs_num >= PF_RULESET_MAX)
2140 		ERROUT(EINVAL);
2141 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2142 		DPFPRINTF(PF_DEBUG_MISC,
2143 		    "ticket: %d != [%d]%d", ticket, rs_num,
2144 		    ruleset->rules[rs_num].inactive.ticket);
2145 		ERROUT(EBUSY);
2146 	}
2147 	if (pool_ticket != V_ticket_pabuf) {
2148 		DPFPRINTF(PF_DEBUG_MISC,
2149 		    "pool_ticket: %d != %d", pool_ticket,
2150 		    V_ticket_pabuf);
2151 		ERROUT(EBUSY);
2152 	}
2153 	/*
2154 	 * XXXMJG hack: there is no mechanism to ensure they started the
2155 	 * transaction. Ticket checked above may happen to match by accident,
2156 	 * even if nobody called DIOCXBEGIN, let alone this process.
2157 	 * Partially work around it by checking if the RB tree got allocated,
2158 	 * see pf_begin_rules.
2159 	 */
2160 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2161 		ERROUT(EINVAL);
2162 	}
2163 
2164 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2165 	    pf_krulequeue);
2166 	if (tail)
2167 		rule->nr = tail->nr + 1;
2168 	else
2169 		rule->nr = 0;
2170 	if (rule->ifname[0]) {
2171 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2172 		kif = NULL;
2173 		pfi_kkif_ref(rule->kif);
2174 	} else
2175 		rule->kif = NULL;
2176 
2177 	if (rule->rcv_ifname[0]) {
2178 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2179 		rcv_kif = NULL;
2180 		pfi_kkif_ref(rule->rcv_kif);
2181 	} else
2182 		rule->rcv_kif = NULL;
2183 
2184 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2185 		ERROUT(EBUSY);
2186 #ifdef ALTQ
2187 	/* set queue IDs */
2188 	if (rule->qname[0] != 0) {
2189 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2190 			ERROUT(EBUSY);
2191 		else if (rule->pqname[0] != 0) {
2192 			if ((rule->pqid =
2193 			    pf_qname2qid(rule->pqname)) == 0)
2194 				ERROUT(EBUSY);
2195 		} else
2196 			rule->pqid = rule->qid;
2197 	}
2198 #endif
2199 	if (rule->tagname[0])
2200 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2201 			ERROUT(EBUSY);
2202 	if (rule->match_tagname[0])
2203 		if ((rule->match_tag =
2204 		    pf_tagname2tag(rule->match_tagname)) == 0)
2205 			ERROUT(EBUSY);
2206 	if (rule->rt && !rule->direction)
2207 		ERROUT(EINVAL);
2208 	if (!rule->log)
2209 		rule->logif = 0;
2210 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2211 	   rule->pktrate.seconds))
2212 		ERROUT(ENOMEM);
2213 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2214 		ERROUT(ENOMEM);
2215 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2216 		ERROUT(ENOMEM);
2217 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2218 		ERROUT(EINVAL);
2219 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2220 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2221 	    rule->set_prio[1] > PF_PRIO_MAX))
2222 		ERROUT(EINVAL);
2223 	for (int i = 0; i < 3; i++) {
2224 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2225 			if (pa->addr.type == PF_ADDR_TABLE) {
2226 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2227 				    pa->addr.v.tblname);
2228 				if (pa->addr.p.tbl == NULL)
2229 					ERROUT(ENOMEM);
2230 			}
2231 	}
2232 
2233 	rule->overload_tbl = NULL;
2234 	if (rule->overload_tblname[0]) {
2235 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2236 		    rule->overload_tblname)) == NULL)
2237 			ERROUT(EINVAL);
2238 		else
2239 			rule->overload_tbl->pfrkt_flags |=
2240 			    PFR_TFLAG_ACTIVE;
2241 	}
2242 
2243 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2244 
2245 	/*
2246 	 * Old version of pfctl provide route redirection pools in single
2247 	 * common redirection pool rdr. New versions use rdr only for
2248 	 * rdr-to rules.
2249 	 */
2250 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2251 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2252 	} else {
2253 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2254 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2255 	}
2256 
2257 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2258 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2259 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2260 		ERROUT(EINVAL);
2261 	}
2262 
2263 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2264 		ERROUT(EINVAL);
2265 	}
2266 
2267 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2268 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2269 		ERROUT(EINVAL);
2270 	}
2271 
2272 	MPASS(error == 0);
2273 
2274 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2275 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2276 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2277 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2278 	    rule, entries);
2279 	ruleset->rules[rs_num].inactive.rcount++;
2280 
2281 	PF_RULES_WUNLOCK();
2282 	pf_hash_rule(rule);
2283 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2284 		PF_RULES_WLOCK();
2285 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2286 		ruleset->rules[rs_num].inactive.rcount--;
2287 		pf_free_rule(rule);
2288 		rule = NULL;
2289 		ERROUT(EEXIST);
2290 	}
2291 	PF_CONFIG_UNLOCK();
2292 
2293 	return (0);
2294 
2295 #undef ERROUT
2296 #undef ERROUT_UNLOCKED
2297 errout:
2298 	PF_RULES_WUNLOCK();
2299 	PF_CONFIG_UNLOCK();
2300 errout_unlocked:
2301 	pf_kkif_free(rcv_kif);
2302 	pf_kkif_free(kif);
2303 	pf_krule_free(rule);
2304 	return (error);
2305 }
2306 
2307 static bool
2308 pf_label_match(const struct pf_krule *rule, const char *label)
2309 {
2310 	int i = 0;
2311 
2312 	while (*rule->label[i]) {
2313 		if (strcmp(rule->label[i], label) == 0)
2314 			return (true);
2315 		i++;
2316 	}
2317 
2318 	return (false);
2319 }
2320 
2321 static unsigned int
2322 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2323 {
2324 	struct pf_kstate *s;
2325 	int more = 0;
2326 
2327 	s = pf_find_state_all(key, dir, &more);
2328 	if (s == NULL)
2329 		return (0);
2330 
2331 	if (more) {
2332 		PF_STATE_UNLOCK(s);
2333 		return (0);
2334 	}
2335 
2336 	pf_remove_state(s);
2337 	return (1);
2338 }
2339 
2340 static int
2341 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2342 {
2343 	struct pf_kstate	*s;
2344 	struct pf_state_key	*sk;
2345 	struct pf_addr		*srcaddr, *dstaddr;
2346 	struct pf_state_key_cmp	 match_key;
2347 	int			 idx, killed = 0;
2348 	unsigned int		 dir;
2349 	u_int16_t		 srcport, dstport;
2350 	struct pfi_kkif		*kif;
2351 
2352 relock_DIOCKILLSTATES:
2353 	PF_HASHROW_LOCK(ih);
2354 	LIST_FOREACH(s, &ih->states, entry) {
2355 		/* For floating states look at the original kif. */
2356 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2357 
2358 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2359 		if (s->direction == PF_OUT) {
2360 			srcaddr = &sk->addr[1];
2361 			dstaddr = &sk->addr[0];
2362 			srcport = sk->port[1];
2363 			dstport = sk->port[0];
2364 		} else {
2365 			srcaddr = &sk->addr[0];
2366 			dstaddr = &sk->addr[1];
2367 			srcport = sk->port[0];
2368 			dstport = sk->port[1];
2369 		}
2370 
2371 		if (psk->psk_af && sk->af != psk->psk_af)
2372 			continue;
2373 
2374 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2375 			continue;
2376 
2377 		if (! pf_match_addr(psk->psk_src.neg,
2378 		    &psk->psk_src.addr.v.a.addr,
2379 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2380 			continue;
2381 
2382 		if (! pf_match_addr(psk->psk_dst.neg,
2383 		    &psk->psk_dst.addr.v.a.addr,
2384 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2385 			continue;
2386 
2387 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
2388 		    &psk->psk_rt_addr.addr.v.a.addr,
2389 		    &psk->psk_rt_addr.addr.v.a.mask,
2390 		    &s->act.rt_addr, sk->af))
2391 			continue;
2392 
2393 		if (psk->psk_src.port_op != 0 &&
2394 		    ! pf_match_port(psk->psk_src.port_op,
2395 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2396 			continue;
2397 
2398 		if (psk->psk_dst.port_op != 0 &&
2399 		    ! pf_match_port(psk->psk_dst.port_op,
2400 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2401 			continue;
2402 
2403 		if (psk->psk_label[0] &&
2404 		    ! pf_label_match(s->rule, psk->psk_label))
2405 			continue;
2406 
2407 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2408 		    kif->pfik_name))
2409 			continue;
2410 
2411 		if (psk->psk_kill_match) {
2412 			/* Create the key to find matching states, with lock
2413 			 * held. */
2414 
2415 			bzero(&match_key, sizeof(match_key));
2416 
2417 			if (s->direction == PF_OUT) {
2418 				dir = PF_IN;
2419 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2420 			} else {
2421 				dir = PF_OUT;
2422 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2423 			}
2424 
2425 			match_key.af = s->key[idx]->af;
2426 			match_key.proto = s->key[idx]->proto;
2427 			pf_addrcpy(&match_key.addr[0],
2428 			    &s->key[idx]->addr[1], match_key.af);
2429 			match_key.port[0] = s->key[idx]->port[1];
2430 			pf_addrcpy(&match_key.addr[1],
2431 			    &s->key[idx]->addr[0], match_key.af);
2432 			match_key.port[1] = s->key[idx]->port[0];
2433 		}
2434 
2435 		pf_remove_state(s);
2436 		killed++;
2437 
2438 		if (psk->psk_kill_match)
2439 			killed += pf_kill_matching_state(&match_key, dir);
2440 
2441 		goto relock_DIOCKILLSTATES;
2442 	}
2443 	PF_HASHROW_UNLOCK(ih);
2444 
2445 	return (killed);
2446 }
2447 
2448 void
2449 unhandled_af(int af)
2450 {
2451 	panic("unhandled af %d", af);
2452 }
2453 
2454 int
2455 pf_start(void)
2456 {
2457 	int error = 0;
2458 
2459 	sx_xlock(&V_pf_ioctl_lock);
2460 	if (V_pf_status.running)
2461 		error = EEXIST;
2462 	else {
2463 		hook_pf();
2464 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2465 			hook_pf_eth();
2466 		V_pf_status.running = 1;
2467 		V_pf_status.since = time_uptime;
2468 		new_unrhdr64(&V_pf_stateid, time_second);
2469 
2470 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
2471 	}
2472 	sx_xunlock(&V_pf_ioctl_lock);
2473 
2474 	return (error);
2475 }
2476 
2477 int
2478 pf_stop(void)
2479 {
2480 	int error = 0;
2481 
2482 	sx_xlock(&V_pf_ioctl_lock);
2483 	if (!V_pf_status.running)
2484 		error = ENOENT;
2485 	else {
2486 		V_pf_status.running = 0;
2487 		dehook_pf();
2488 		dehook_pf_eth();
2489 		V_pf_status.since = time_uptime;
2490 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
2491 	}
2492 	sx_xunlock(&V_pf_ioctl_lock);
2493 
2494 	return (error);
2495 }
2496 
2497 void
2498 pf_ioctl_clear_status(void)
2499 {
2500 	PF_RULES_WLOCK();
2501 	for (int i = 0; i < PFRES_MAX; i++)
2502 		counter_u64_zero(V_pf_status.counters[i]);
2503 	for (int i = 0; i < FCNT_MAX; i++)
2504 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2505 	for (int i = 0; i < SCNT_MAX; i++)
2506 		counter_u64_zero(V_pf_status.scounters[i]);
2507 	for (int i = 0; i < KLCNT_MAX; i++)
2508 		counter_u64_zero(V_pf_status.lcounters[i]);
2509 	V_pf_status.since = time_uptime;
2510 	if (*V_pf_status.ifname)
2511 		pfi_update_status(V_pf_status.ifname, NULL);
2512 	PF_RULES_WUNLOCK();
2513 }
2514 
2515 int
2516 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2517 {
2518 	uint32_t old;
2519 
2520 	if (timeout < 0 || timeout >= PFTM_MAX ||
2521 	    seconds < 0)
2522 		return (EINVAL);
2523 
2524 	PF_RULES_WLOCK();
2525 	old = V_pf_default_rule.timeout[timeout];
2526 	if (timeout == PFTM_INTERVAL && seconds == 0)
2527 		seconds = 1;
2528 	V_pf_default_rule.timeout[timeout] = seconds;
2529 	if (timeout == PFTM_INTERVAL && seconds < old)
2530 		wakeup(pf_purge_thread);
2531 
2532 	if (prev_seconds != NULL)
2533 		*prev_seconds = old;
2534 
2535 	PF_RULES_WUNLOCK();
2536 
2537 	return (0);
2538 }
2539 
2540 int
2541 pf_ioctl_get_timeout(int timeout, int *seconds)
2542 {
2543 	PF_RULES_RLOCK_TRACKER;
2544 
2545 	if (timeout < 0 || timeout >= PFTM_MAX)
2546 		return (EINVAL);
2547 
2548 	PF_RULES_RLOCK();
2549 	*seconds = V_pf_default_rule.timeout[timeout];
2550 	PF_RULES_RUNLOCK();
2551 
2552 	return (0);
2553 }
2554 
2555 int
2556 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2557 {
2558 
2559 	PF_RULES_WLOCK();
2560 	if (index < 0 || index >= PF_LIMIT_MAX ||
2561 	    V_pf_limits[index].zone == NULL) {
2562 		PF_RULES_WUNLOCK();
2563 		return (EINVAL);
2564 	}
2565 	uma_zone_set_max(V_pf_limits[index].zone,
2566 	    limit == 0 ? INT_MAX : limit);
2567 	if (old_limit != NULL)
2568 		*old_limit = V_pf_limits[index].limit;
2569 	V_pf_limits[index].limit = limit;
2570 	PF_RULES_WUNLOCK();
2571 
2572 	return (0);
2573 }
2574 
2575 int
2576 pf_ioctl_get_limit(int index, unsigned int *limit)
2577 {
2578 	PF_RULES_RLOCK_TRACKER;
2579 
2580 	if (index < 0 || index >= PF_LIMIT_MAX)
2581 		return (EINVAL);
2582 
2583 	PF_RULES_RLOCK();
2584 	*limit = V_pf_limits[index].limit;
2585 	PF_RULES_RUNLOCK();
2586 
2587 	return (0);
2588 }
2589 
2590 int
2591 pf_ioctl_begin_addrs(uint32_t *ticket)
2592 {
2593 	PF_RULES_WLOCK();
2594 	pf_empty_kpool(&V_pf_pabuf[0]);
2595 	pf_empty_kpool(&V_pf_pabuf[1]);
2596 	pf_empty_kpool(&V_pf_pabuf[2]);
2597 	*ticket = ++V_ticket_pabuf;
2598 	PF_RULES_WUNLOCK();
2599 
2600 	return (0);
2601 }
2602 
2603 int
2604 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2605 {
2606 	struct pf_kpooladdr	*pa = NULL;
2607 	struct pfi_kkif		*kif = NULL;
2608 	int error;
2609 
2610 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2611 	    pp->which != PF_RT)
2612 		return (EINVAL);
2613 
2614 	switch (pp->af) {
2615 #ifdef INET
2616 	case AF_INET:
2617 		/* FALLTHROUGH */
2618 #endif /* INET */
2619 #ifdef INET6
2620 	case AF_INET6:
2621 		/* FALLTHROUGH */
2622 #endif /* INET6 */
2623 	case AF_UNSPEC:
2624 		break;
2625 	default:
2626 		return (EAFNOSUPPORT);
2627 	}
2628 
2629 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2630 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2631 	    pp->addr.addr.type != PF_ADDR_TABLE)
2632 		return (EINVAL);
2633 
2634 	if (pp->addr.addr.p.dyn != NULL)
2635 		return (EINVAL);
2636 
2637 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2638 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2639 	if (error != 0)
2640 		goto out;
2641 	if (pa->ifname[0])
2642 		kif = pf_kkif_create(M_WAITOK);
2643 	PF_RULES_WLOCK();
2644 	if (pp->ticket != V_ticket_pabuf) {
2645 		PF_RULES_WUNLOCK();
2646 		if (pa->ifname[0])
2647 			pf_kkif_free(kif);
2648 		error = EBUSY;
2649 		goto out;
2650 	}
2651 	if (pa->ifname[0]) {
2652 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2653 		kif = NULL;
2654 		pfi_kkif_ref(pa->kif);
2655 	} else
2656 		pa->kif = NULL;
2657 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2658 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2659 		if (pa->ifname[0])
2660 			pfi_kkif_unref(pa->kif);
2661 		PF_RULES_WUNLOCK();
2662 		goto out;
2663 	}
2664 	switch (pp->which) {
2665 	case PF_NAT:
2666 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2667 		break;
2668 	case PF_RDR:
2669 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2670 		break;
2671 	case PF_RT:
2672 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2673 		break;
2674 	}
2675 	PF_RULES_WUNLOCK();
2676 
2677 	return (0);
2678 
2679 out:
2680 	free(pa, M_PFRULE);
2681 	return (error);
2682 }
2683 
2684 int
2685 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2686 {
2687 	struct pf_kpool		*pool;
2688 	struct pf_kpooladdr	*pa;
2689 
2690 	PF_RULES_RLOCK_TRACKER;
2691 
2692 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2693 	    pp->which != PF_RT)
2694 		return (EINVAL);
2695 
2696 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2697 	pp->nr = 0;
2698 
2699 	PF_RULES_RLOCK();
2700 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2701 	    pp->r_num, 0, 1, 0, pp->which);
2702 	if (pool == NULL) {
2703 		PF_RULES_RUNLOCK();
2704 		return (EBUSY);
2705 	}
2706 	TAILQ_FOREACH(pa, &pool->list, entries)
2707 		pp->nr++;
2708 	PF_RULES_RUNLOCK();
2709 
2710 	return (0);
2711 }
2712 
2713 int
2714 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2715 {
2716 	struct pf_kpool		*pool;
2717 	struct pf_kpooladdr	*pa;
2718 	u_int32_t		 nr = 0;
2719 
2720 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2721 	    pp->which != PF_RT)
2722 		return (EINVAL);
2723 
2724 	PF_RULES_RLOCK_TRACKER;
2725 
2726 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
2727 
2728 	PF_RULES_RLOCK();
2729 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2730 	    pp->r_num, 0, 1, 1, pp->which);
2731 	if (pool == NULL) {
2732 		PF_RULES_RUNLOCK();
2733 		return (EBUSY);
2734 	}
2735 	pa = TAILQ_FIRST(&pool->list);
2736 	while ((pa != NULL) && (nr < pp->nr)) {
2737 		pa = TAILQ_NEXT(pa, entries);
2738 		nr++;
2739 	}
2740 	if (pa == NULL) {
2741 		PF_RULES_RUNLOCK();
2742 		return (EBUSY);
2743 	}
2744 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2745 	pf_addr_copyout(&pp->addr.addr);
2746 	PF_RULES_RUNLOCK();
2747 
2748 	return (0);
2749 }
2750 
2751 int
2752 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2753 {
2754 	struct pf_kruleset	*ruleset;
2755 	struct pf_kanchor	*anchor;
2756 
2757 	PF_RULES_RLOCK_TRACKER;
2758 
2759 	pr->path[sizeof(pr->path) - 1] = '\0';
2760 
2761 	PF_RULES_RLOCK();
2762 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2763 		PF_RULES_RUNLOCK();
2764 		return (ENOENT);
2765 	}
2766 	pr->nr = 0;
2767 	if (ruleset == &pf_main_ruleset) {
2768 		/* XXX kludge for pf_main_ruleset */
2769 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2770 			if (anchor->parent == NULL)
2771 				pr->nr++;
2772 	} else {
2773 		RB_FOREACH(anchor, pf_kanchor_node,
2774 		    &ruleset->anchor->children)
2775 			pr->nr++;
2776 	}
2777 	PF_RULES_RUNLOCK();
2778 
2779 	return (0);
2780 }
2781 
2782 int
2783 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2784 {
2785 	struct pf_kruleset	*ruleset;
2786 	struct pf_kanchor	*anchor;
2787 	u_int32_t		 nr = 0;
2788 	int			 error = 0;
2789 
2790 	PF_RULES_RLOCK_TRACKER;
2791 
2792 	PF_RULES_RLOCK();
2793 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2794 		PF_RULES_RUNLOCK();
2795 		return (ENOENT);
2796 	}
2797 
2798 	pr->name[0] = '\0';
2799 	if (ruleset == &pf_main_ruleset) {
2800 		/* XXX kludge for pf_main_ruleset */
2801 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2802 			if (anchor->parent == NULL && nr++ == pr->nr) {
2803 				strlcpy(pr->name, anchor->name,
2804 				    sizeof(pr->name));
2805 				break;
2806 			}
2807 	} else {
2808 		RB_FOREACH(anchor, pf_kanchor_node,
2809 		    &ruleset->anchor->children)
2810 			if (nr++ == pr->nr) {
2811 				strlcpy(pr->name, anchor->name,
2812 				    sizeof(pr->name));
2813 				break;
2814 			}
2815 	}
2816 	if (!pr->name[0])
2817 		error = EBUSY;
2818 	PF_RULES_RUNLOCK();
2819 
2820 	return (error);
2821 }
2822 
2823 int
2824 pf_ioctl_natlook(struct pfioc_natlook *pnl)
2825 {
2826 	struct pf_state_key	*sk;
2827 	struct pf_kstate	*state;
2828 	struct pf_state_key_cmp	 key;
2829 	int			 m = 0, direction = pnl->direction;
2830 	int			 sidx, didx;
2831 
2832 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
2833 	sidx = (direction == PF_IN) ? 1 : 0;
2834 	didx = (direction == PF_IN) ? 0 : 1;
2835 
2836 	if (!pnl->proto ||
2837 	    PF_AZERO(&pnl->saddr, pnl->af) ||
2838 	    PF_AZERO(&pnl->daddr, pnl->af) ||
2839 	    ((pnl->proto == IPPROTO_TCP ||
2840 	    pnl->proto == IPPROTO_UDP) &&
2841 	    (!pnl->dport || !pnl->sport)))
2842 		return (EINVAL);
2843 
2844 	switch (pnl->direction) {
2845 	case PF_IN:
2846 	case PF_OUT:
2847 	case PF_INOUT:
2848 		break;
2849 	default:
2850 		return (EINVAL);
2851 	}
2852 
2853 	switch (pnl->af) {
2854 #ifdef INET
2855 	case AF_INET:
2856 		break;
2857 #endif /* INET */
2858 #ifdef INET6
2859 	case AF_INET6:
2860 		break;
2861 #endif /* INET6 */
2862 	default:
2863 		return (EAFNOSUPPORT);
2864 	}
2865 
2866 	bzero(&key, sizeof(key));
2867 	key.af = pnl->af;
2868 	key.proto = pnl->proto;
2869 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
2870 	key.port[sidx] = pnl->sport;
2871 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
2872 	key.port[didx] = pnl->dport;
2873 
2874 	state = pf_find_state_all(&key, direction, &m);
2875 	if (state == NULL)
2876 		return (ENOENT);
2877 
2878 	if (m > 1) {
2879 		PF_STATE_UNLOCK(state);
2880 		return (E2BIG);	/* more than one state */
2881 	}
2882 
2883 	sk = state->key[sidx];
2884 	pf_addrcpy(&pnl->rsaddr,
2885 	    &sk->addr[sidx], sk->af);
2886 	pnl->rsport = sk->port[sidx];
2887 	pf_addrcpy(&pnl->rdaddr,
2888 	    &sk->addr[didx], sk->af);
2889 	pnl->rdport = sk->port[didx];
2890 	PF_STATE_UNLOCK(state);
2891 
2892 	return (0);
2893 }
2894 
2895 static int
2896 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2897 {
2898 	int			 error = 0;
2899 	PF_RULES_RLOCK_TRACKER;
2900 
2901 #define	ERROUT_IOCTL(target, x)					\
2902     do {								\
2903 	    error = (x);						\
2904 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2905 	    goto target;						\
2906     } while (0)
2907 
2908 
2909 	/* XXX keep in sync with switch() below */
2910 	if (securelevel_gt(td->td_ucred, 2))
2911 		switch (cmd) {
2912 		case DIOCGETRULES:
2913 		case DIOCGETRULENV:
2914 		case DIOCGETADDRS:
2915 		case DIOCGETADDR:
2916 		case DIOCGETSTATE:
2917 		case DIOCGETSTATENV:
2918 		case DIOCSETSTATUSIF:
2919 		case DIOCGETSTATUSNV:
2920 		case DIOCCLRSTATUS:
2921 		case DIOCNATLOOK:
2922 		case DIOCSETDEBUG:
2923 #ifdef COMPAT_FREEBSD14
2924 		case DIOCGETSTATES:
2925 		case DIOCGETSTATESV2:
2926 #endif
2927 		case DIOCGETTIMEOUT:
2928 		case DIOCCLRRULECTRS:
2929 		case DIOCGETLIMIT:
2930 		case DIOCGETALTQSV0:
2931 		case DIOCGETALTQSV1:
2932 		case DIOCGETALTQV0:
2933 		case DIOCGETALTQV1:
2934 		case DIOCGETQSTATSV0:
2935 		case DIOCGETQSTATSV1:
2936 		case DIOCGETRULESETS:
2937 		case DIOCGETRULESET:
2938 		case DIOCRGETTABLES:
2939 		case DIOCRGETTSTATS:
2940 		case DIOCRCLRTSTATS:
2941 		case DIOCRCLRADDRS:
2942 		case DIOCRADDADDRS:
2943 		case DIOCRDELADDRS:
2944 		case DIOCRSETADDRS:
2945 		case DIOCRGETADDRS:
2946 		case DIOCRGETASTATS:
2947 		case DIOCRCLRASTATS:
2948 		case DIOCRTSTADDRS:
2949 		case DIOCOSFPGET:
2950 		case DIOCGETSRCNODES:
2951 		case DIOCCLRSRCNODES:
2952 		case DIOCGETSYNCOOKIES:
2953 		case DIOCIGETIFACES:
2954 		case DIOCGIFSPEEDV0:
2955 		case DIOCGIFSPEEDV1:
2956 		case DIOCSETIFFLAG:
2957 		case DIOCCLRIFFLAG:
2958 		case DIOCGETETHRULES:
2959 		case DIOCGETETHRULE:
2960 		case DIOCGETETHRULESETS:
2961 		case DIOCGETETHRULESET:
2962 			break;
2963 		case DIOCRCLRTABLES:
2964 		case DIOCRADDTABLES:
2965 		case DIOCRDELTABLES:
2966 		case DIOCRSETTFLAGS:
2967 			if (((struct pfioc_table *)addr)->pfrio_flags &
2968 			    PFR_FLAG_DUMMY)
2969 				break; /* dummy operation ok */
2970 			return (EPERM);
2971 		default:
2972 			return (EPERM);
2973 		}
2974 
2975 	if (!(flags & FWRITE))
2976 		switch (cmd) {
2977 		case DIOCGETRULES:
2978 		case DIOCGETADDRS:
2979 		case DIOCGETADDR:
2980 		case DIOCGETSTATE:
2981 		case DIOCGETSTATENV:
2982 		case DIOCGETSTATUSNV:
2983 #ifdef COMPAT_FREEBSD14
2984 		case DIOCGETSTATES:
2985 		case DIOCGETSTATESV2:
2986 #endif
2987 		case DIOCGETTIMEOUT:
2988 		case DIOCGETLIMIT:
2989 		case DIOCGETALTQSV0:
2990 		case DIOCGETALTQSV1:
2991 		case DIOCGETALTQV0:
2992 		case DIOCGETALTQV1:
2993 		case DIOCGETQSTATSV0:
2994 		case DIOCGETQSTATSV1:
2995 		case DIOCGETRULESETS:
2996 		case DIOCGETRULESET:
2997 		case DIOCNATLOOK:
2998 		case DIOCRGETTABLES:
2999 		case DIOCRGETTSTATS:
3000 		case DIOCRGETADDRS:
3001 		case DIOCRGETASTATS:
3002 		case DIOCRTSTADDRS:
3003 		case DIOCOSFPGET:
3004 		case DIOCGETSRCNODES:
3005 		case DIOCGETSYNCOOKIES:
3006 		case DIOCIGETIFACES:
3007 		case DIOCGIFSPEEDV1:
3008 		case DIOCGIFSPEEDV0:
3009 		case DIOCGETRULENV:
3010 		case DIOCGETETHRULES:
3011 		case DIOCGETETHRULE:
3012 		case DIOCGETETHRULESETS:
3013 		case DIOCGETETHRULESET:
3014 			break;
3015 		case DIOCRCLRTABLES:
3016 		case DIOCRADDTABLES:
3017 		case DIOCRDELTABLES:
3018 		case DIOCRCLRTSTATS:
3019 		case DIOCRCLRADDRS:
3020 		case DIOCRADDADDRS:
3021 		case DIOCRDELADDRS:
3022 		case DIOCRSETADDRS:
3023 		case DIOCRSETTFLAGS:
3024 			if (((struct pfioc_table *)addr)->pfrio_flags &
3025 			    PFR_FLAG_DUMMY) {
3026 				flags |= FWRITE; /* need write lock for dummy */
3027 				break; /* dummy operation ok */
3028 			}
3029 			return (EACCES);
3030 		default:
3031 			return (EACCES);
3032 		}
3033 
3034 	CURVNET_SET(TD_TO_VNET(td));
3035 
3036 	switch (cmd) {
3037 #ifdef COMPAT_FREEBSD14
3038 	case DIOCSTART:
3039 		error = pf_start();
3040 		break;
3041 
3042 	case DIOCSTOP:
3043 		error = pf_stop();
3044 		break;
3045 #endif
3046 
3047 	case DIOCGETETHRULES: {
3048 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3049 		nvlist_t		*nvl;
3050 		void			*packed;
3051 		struct pf_keth_rule	*tail;
3052 		struct pf_keth_ruleset	*rs;
3053 		u_int32_t		 ticket, nr;
3054 		const char		*anchor = "";
3055 
3056 		nvl = NULL;
3057 		packed = NULL;
3058 
3059 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
3060 
3061 		if (nv->len > pf_ioctl_maxcount)
3062 			ERROUT(ENOMEM);
3063 
3064 		/* Copy the request in */
3065 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
3066 		error = copyin(nv->data, packed, nv->len);
3067 		if (error)
3068 			ERROUT(error);
3069 
3070 		nvl = nvlist_unpack(packed, nv->len, 0);
3071 		if (nvl == NULL)
3072 			ERROUT(EBADMSG);
3073 
3074 		if (! nvlist_exists_string(nvl, "anchor"))
3075 			ERROUT(EBADMSG);
3076 
3077 		anchor = nvlist_get_string(nvl, "anchor");
3078 
3079 		rs = pf_find_keth_ruleset(anchor);
3080 
3081 		nvlist_destroy(nvl);
3082 		nvl = NULL;
3083 		free(packed, M_NVLIST);
3084 		packed = NULL;
3085 
3086 		if (rs == NULL)
3087 			ERROUT(ENOENT);
3088 
3089 		/* Reply */
3090 		nvl = nvlist_create(0);
3091 		if (nvl == NULL)
3092 			ERROUT(ENOMEM);
3093 
3094 		PF_RULES_RLOCK();
3095 
3096 		ticket = rs->active.ticket;
3097 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3098 		if (tail)
3099 			nr = tail->nr + 1;
3100 		else
3101 			nr = 0;
3102 
3103 		PF_RULES_RUNLOCK();
3104 
3105 		nvlist_add_number(nvl, "ticket", ticket);
3106 		nvlist_add_number(nvl, "nr", nr);
3107 
3108 		packed = nvlist_pack(nvl, &nv->len);
3109 		if (packed == NULL)
3110 			ERROUT(ENOMEM);
3111 
3112 		if (nv->size == 0)
3113 			ERROUT(0);
3114 		else if (nv->size < nv->len)
3115 			ERROUT(ENOSPC);
3116 
3117 		error = copyout(packed, nv->data, nv->len);
3118 
3119 #undef ERROUT
3120 DIOCGETETHRULES_error:
3121 		free(packed, M_NVLIST);
3122 		nvlist_destroy(nvl);
3123 		break;
3124 	}
3125 
3126 	case DIOCGETETHRULE: {
3127 		struct epoch_tracker	 et;
3128 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3129 		nvlist_t		*nvl = NULL;
3130 		void			*nvlpacked = NULL;
3131 		struct pf_keth_rule	*rule = NULL;
3132 		struct pf_keth_ruleset	*rs;
3133 		u_int32_t		 ticket, nr;
3134 		bool			 clear = false;
3135 		const char		*anchor;
3136 
3137 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3138 
3139 		if (nv->len > pf_ioctl_maxcount)
3140 			ERROUT(ENOMEM);
3141 
3142 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3143 		error = copyin(nv->data, nvlpacked, nv->len);
3144 		if (error)
3145 			ERROUT(error);
3146 
3147 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3148 		if (nvl == NULL)
3149 			ERROUT(EBADMSG);
3150 		if (! nvlist_exists_number(nvl, "ticket"))
3151 			ERROUT(EBADMSG);
3152 		ticket = nvlist_get_number(nvl, "ticket");
3153 		if (! nvlist_exists_string(nvl, "anchor"))
3154 			ERROUT(EBADMSG);
3155 		anchor = nvlist_get_string(nvl, "anchor");
3156 
3157 		if (nvlist_exists_bool(nvl, "clear"))
3158 			clear = nvlist_get_bool(nvl, "clear");
3159 
3160 		if (clear && !(flags & FWRITE))
3161 			ERROUT(EACCES);
3162 
3163 		if (! nvlist_exists_number(nvl, "nr"))
3164 			ERROUT(EBADMSG);
3165 		nr = nvlist_get_number(nvl, "nr");
3166 
3167 		PF_RULES_RLOCK();
3168 		rs = pf_find_keth_ruleset(anchor);
3169 		if (rs == NULL) {
3170 			PF_RULES_RUNLOCK();
3171 			ERROUT(ENOENT);
3172 		}
3173 		if (ticket != rs->active.ticket) {
3174 			PF_RULES_RUNLOCK();
3175 			ERROUT(EBUSY);
3176 		}
3177 
3178 		nvlist_destroy(nvl);
3179 		nvl = NULL;
3180 		free(nvlpacked, M_NVLIST);
3181 		nvlpacked = NULL;
3182 
3183 		rule = TAILQ_FIRST(rs->active.rules);
3184 		while ((rule != NULL) && (rule->nr != nr))
3185 			rule = TAILQ_NEXT(rule, entries);
3186 		if (rule == NULL) {
3187 			PF_RULES_RUNLOCK();
3188 			ERROUT(ENOENT);
3189 		}
3190 		/* Make sure rule can't go away. */
3191 		NET_EPOCH_ENTER(et);
3192 		PF_RULES_RUNLOCK();
3193 		nvl = pf_keth_rule_to_nveth_rule(rule);
3194 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3195 			NET_EPOCH_EXIT(et);
3196 			ERROUT(EBUSY);
3197 		}
3198 		NET_EPOCH_EXIT(et);
3199 		if (nvl == NULL)
3200 			ERROUT(ENOMEM);
3201 
3202 		nvlpacked = nvlist_pack(nvl, &nv->len);
3203 		if (nvlpacked == NULL)
3204 			ERROUT(ENOMEM);
3205 
3206 		if (nv->size == 0)
3207 			ERROUT(0);
3208 		else if (nv->size < nv->len)
3209 			ERROUT(ENOSPC);
3210 
3211 		error = copyout(nvlpacked, nv->data, nv->len);
3212 		if (error == 0 && clear) {
3213 			counter_u64_zero(rule->evaluations);
3214 			for (int i = 0; i < 2; i++) {
3215 				counter_u64_zero(rule->packets[i]);
3216 				counter_u64_zero(rule->bytes[i]);
3217 			}
3218 		}
3219 
3220 #undef ERROUT
3221 DIOCGETETHRULE_error:
3222 		free(nvlpacked, M_NVLIST);
3223 		nvlist_destroy(nvl);
3224 		break;
3225 	}
3226 
3227 	case DIOCADDETHRULE: {
3228 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3229 		nvlist_t		*nvl = NULL;
3230 		void			*nvlpacked = NULL;
3231 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3232 		struct pf_keth_ruleset	*ruleset = NULL;
3233 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3234 		const char		*anchor = "", *anchor_call = "";
3235 
3236 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3237 
3238 		if (nv->len > pf_ioctl_maxcount)
3239 			ERROUT(ENOMEM);
3240 
3241 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3242 		error = copyin(nv->data, nvlpacked, nv->len);
3243 		if (error)
3244 			ERROUT(error);
3245 
3246 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3247 		if (nvl == NULL)
3248 			ERROUT(EBADMSG);
3249 
3250 		if (! nvlist_exists_number(nvl, "ticket"))
3251 			ERROUT(EBADMSG);
3252 
3253 		if (nvlist_exists_string(nvl, "anchor"))
3254 			anchor = nvlist_get_string(nvl, "anchor");
3255 		if (nvlist_exists_string(nvl, "anchor_call"))
3256 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3257 
3258 		ruleset = pf_find_keth_ruleset(anchor);
3259 		if (ruleset == NULL)
3260 			ERROUT(EINVAL);
3261 
3262 		if (nvlist_get_number(nvl, "ticket") !=
3263 		    ruleset->inactive.ticket) {
3264 			DPFPRINTF(PF_DEBUG_MISC,
3265 			    "ticket: %d != %d",
3266 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3267 			    ruleset->inactive.ticket);
3268 			ERROUT(EBUSY);
3269 		}
3270 
3271 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3272 		rule->timestamp = NULL;
3273 
3274 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3275 		if (error != 0)
3276 			ERROUT(error);
3277 
3278 		if (rule->ifname[0])
3279 			kif = pf_kkif_create(M_WAITOK);
3280 		if (rule->bridge_to_name[0])
3281 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3282 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3283 		for (int i = 0; i < 2; i++) {
3284 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3285 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3286 		}
3287 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3288 		    M_WAITOK | M_ZERO);
3289 
3290 		PF_RULES_WLOCK();
3291 
3292 		if (rule->ifname[0]) {
3293 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3294 			pfi_kkif_ref(rule->kif);
3295 		} else
3296 			rule->kif = NULL;
3297 		if (rule->bridge_to_name[0]) {
3298 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3299 			    rule->bridge_to_name);
3300 			pfi_kkif_ref(rule->bridge_to);
3301 		} else
3302 			rule->bridge_to = NULL;
3303 
3304 #ifdef ALTQ
3305 		/* set queue IDs */
3306 		if (rule->qname[0] != 0) {
3307 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3308 				error = EBUSY;
3309 			else
3310 				rule->qid = rule->qid;
3311 		}
3312 #endif
3313 		if (rule->tagname[0])
3314 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3315 				error = EBUSY;
3316 		if (rule->match_tagname[0])
3317 			if ((rule->match_tag = pf_tagname2tag(
3318 			    rule->match_tagname)) == 0)
3319 				error = EBUSY;
3320 
3321 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3322 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3323 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3324 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3325 
3326 		if (error) {
3327 			pf_free_eth_rule(rule);
3328 			PF_RULES_WUNLOCK();
3329 			ERROUT(error);
3330 		}
3331 
3332 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3333 			pf_free_eth_rule(rule);
3334 			PF_RULES_WUNLOCK();
3335 			ERROUT(EINVAL);
3336 		}
3337 
3338 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3339 		if (tail)
3340 			rule->nr = tail->nr + 1;
3341 		else
3342 			rule->nr = 0;
3343 
3344 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3345 
3346 		PF_RULES_WUNLOCK();
3347 
3348 #undef ERROUT
3349 DIOCADDETHRULE_error:
3350 		nvlist_destroy(nvl);
3351 		free(nvlpacked, M_NVLIST);
3352 		break;
3353 	}
3354 
3355 	case DIOCGETETHRULESETS: {
3356 		struct epoch_tracker	 et;
3357 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3358 		nvlist_t		*nvl = NULL;
3359 		void			*nvlpacked = NULL;
3360 		struct pf_keth_ruleset	*ruleset;
3361 		struct pf_keth_anchor	*anchor;
3362 		int			 nr = 0;
3363 
3364 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3365 
3366 		if (nv->len > pf_ioctl_maxcount)
3367 			ERROUT(ENOMEM);
3368 
3369 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3370 		error = copyin(nv->data, nvlpacked, nv->len);
3371 		if (error)
3372 			ERROUT(error);
3373 
3374 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3375 		if (nvl == NULL)
3376 			ERROUT(EBADMSG);
3377 		if (! nvlist_exists_string(nvl, "path"))
3378 			ERROUT(EBADMSG);
3379 
3380 		NET_EPOCH_ENTER(et);
3381 
3382 		if ((ruleset = pf_find_keth_ruleset(
3383 		    nvlist_get_string(nvl, "path"))) == NULL) {
3384 			NET_EPOCH_EXIT(et);
3385 			ERROUT(ENOENT);
3386 		}
3387 
3388 		if (ruleset->anchor == NULL) {
3389 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3390 				if (anchor->parent == NULL)
3391 					nr++;
3392 		} else {
3393 			RB_FOREACH(anchor, pf_keth_anchor_node,
3394 			    &ruleset->anchor->children)
3395 				nr++;
3396 		}
3397 
3398 		NET_EPOCH_EXIT(et);
3399 
3400 		nvlist_destroy(nvl);
3401 		nvl = NULL;
3402 		free(nvlpacked, M_NVLIST);
3403 		nvlpacked = NULL;
3404 
3405 		nvl = nvlist_create(0);
3406 		if (nvl == NULL)
3407 			ERROUT(ENOMEM);
3408 
3409 		nvlist_add_number(nvl, "nr", nr);
3410 
3411 		nvlpacked = nvlist_pack(nvl, &nv->len);
3412 		if (nvlpacked == NULL)
3413 			ERROUT(ENOMEM);
3414 
3415 		if (nv->size == 0)
3416 			ERROUT(0);
3417 		else if (nv->size < nv->len)
3418 			ERROUT(ENOSPC);
3419 
3420 		error = copyout(nvlpacked, nv->data, nv->len);
3421 
3422 #undef ERROUT
3423 DIOCGETETHRULESETS_error:
3424 		free(nvlpacked, M_NVLIST);
3425 		nvlist_destroy(nvl);
3426 		break;
3427 	}
3428 
3429 	case DIOCGETETHRULESET: {
3430 		struct epoch_tracker	 et;
3431 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3432 		nvlist_t		*nvl = NULL;
3433 		void			*nvlpacked = NULL;
3434 		struct pf_keth_ruleset	*ruleset;
3435 		struct pf_keth_anchor	*anchor;
3436 		int			 nr = 0, req_nr = 0;
3437 		bool			 found = false;
3438 
3439 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3440 
3441 		if (nv->len > pf_ioctl_maxcount)
3442 			ERROUT(ENOMEM);
3443 
3444 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3445 		error = copyin(nv->data, nvlpacked, nv->len);
3446 		if (error)
3447 			ERROUT(error);
3448 
3449 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3450 		if (nvl == NULL)
3451 			ERROUT(EBADMSG);
3452 		if (! nvlist_exists_string(nvl, "path"))
3453 			ERROUT(EBADMSG);
3454 		if (! nvlist_exists_number(nvl, "nr"))
3455 			ERROUT(EBADMSG);
3456 
3457 		req_nr = nvlist_get_number(nvl, "nr");
3458 
3459 		NET_EPOCH_ENTER(et);
3460 
3461 		if ((ruleset = pf_find_keth_ruleset(
3462 		    nvlist_get_string(nvl, "path"))) == NULL) {
3463 			NET_EPOCH_EXIT(et);
3464 			ERROUT(ENOENT);
3465 		}
3466 
3467 		nvlist_destroy(nvl);
3468 		nvl = NULL;
3469 		free(nvlpacked, M_NVLIST);
3470 		nvlpacked = NULL;
3471 
3472 		nvl = nvlist_create(0);
3473 		if (nvl == NULL) {
3474 			NET_EPOCH_EXIT(et);
3475 			ERROUT(ENOMEM);
3476 		}
3477 
3478 		if (ruleset->anchor == NULL) {
3479 			RB_FOREACH(anchor, pf_keth_anchor_global,
3480 			    &V_pf_keth_anchors) {
3481 				if (anchor->parent == NULL && nr++ == req_nr) {
3482 					found = true;
3483 					break;
3484 				}
3485 			}
3486 		} else {
3487 			RB_FOREACH(anchor, pf_keth_anchor_node,
3488 			     &ruleset->anchor->children) {
3489 				if (nr++ == req_nr) {
3490 					found = true;
3491 					break;
3492 				}
3493 			}
3494 		}
3495 
3496 		NET_EPOCH_EXIT(et);
3497 		if (found) {
3498 			nvlist_add_number(nvl, "nr", nr);
3499 			nvlist_add_string(nvl, "name", anchor->name);
3500 			if (ruleset->anchor)
3501 				nvlist_add_string(nvl, "path",
3502 				    ruleset->anchor->path);
3503 			else
3504 				nvlist_add_string(nvl, "path", "");
3505 		} else {
3506 			ERROUT(EBUSY);
3507 		}
3508 
3509 		nvlpacked = nvlist_pack(nvl, &nv->len);
3510 		if (nvlpacked == NULL)
3511 			ERROUT(ENOMEM);
3512 
3513 		if (nv->size == 0)
3514 			ERROUT(0);
3515 		else if (nv->size < nv->len)
3516 			ERROUT(ENOSPC);
3517 
3518 		error = copyout(nvlpacked, nv->data, nv->len);
3519 
3520 #undef ERROUT
3521 DIOCGETETHRULESET_error:
3522 		free(nvlpacked, M_NVLIST);
3523 		nvlist_destroy(nvl);
3524 		break;
3525 	}
3526 
3527 	case DIOCADDRULENV: {
3528 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3529 		nvlist_t	*nvl = NULL;
3530 		void		*nvlpacked = NULL;
3531 		struct pf_krule	*rule = NULL;
3532 		const char	*anchor = "", *anchor_call = "";
3533 		uint32_t	 ticket = 0, pool_ticket = 0;
3534 
3535 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3536 
3537 		if (nv->len > pf_ioctl_maxcount)
3538 			ERROUT(ENOMEM);
3539 
3540 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3541 		error = copyin(nv->data, nvlpacked, nv->len);
3542 		if (error)
3543 			ERROUT(error);
3544 
3545 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3546 		if (nvl == NULL)
3547 			ERROUT(EBADMSG);
3548 
3549 		if (! nvlist_exists_number(nvl, "ticket"))
3550 			ERROUT(EINVAL);
3551 		ticket = nvlist_get_number(nvl, "ticket");
3552 
3553 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3554 			ERROUT(EINVAL);
3555 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3556 
3557 		if (! nvlist_exists_nvlist(nvl, "rule"))
3558 			ERROUT(EINVAL);
3559 
3560 		rule = pf_krule_alloc();
3561 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3562 		    rule);
3563 		if (error)
3564 			ERROUT(error);
3565 
3566 		if (nvlist_exists_string(nvl, "anchor"))
3567 			anchor = nvlist_get_string(nvl, "anchor");
3568 		if (nvlist_exists_string(nvl, "anchor_call"))
3569 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3570 
3571 		if ((error = nvlist_error(nvl)))
3572 			ERROUT(error);
3573 
3574 		/* Frees rule on error */
3575 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3576 		    anchor_call, td->td_ucred->cr_ruid,
3577 		    td->td_proc ? td->td_proc->p_pid : 0);
3578 
3579 		nvlist_destroy(nvl);
3580 		free(nvlpacked, M_NVLIST);
3581 		break;
3582 #undef ERROUT
3583 DIOCADDRULENV_error:
3584 		pf_krule_free(rule);
3585 		nvlist_destroy(nvl);
3586 		free(nvlpacked, M_NVLIST);
3587 
3588 		break;
3589 	}
3590 	case DIOCADDRULE: {
3591 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3592 		struct pf_krule		*rule;
3593 
3594 		rule = pf_krule_alloc();
3595 		error = pf_rule_to_krule(&pr->rule, rule);
3596 		if (error != 0) {
3597 			pf_krule_free(rule);
3598 			goto fail;
3599 		}
3600 
3601 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3602 
3603 		/* Frees rule on error */
3604 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3605 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3606 		    td->td_proc ? td->td_proc->p_pid : 0);
3607 		break;
3608 	}
3609 
3610 	case DIOCGETRULES: {
3611 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3612 
3613 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
3614 
3615 		error = pf_ioctl_getrules(pr);
3616 
3617 		break;
3618 	}
3619 
3620 	case DIOCGETRULENV: {
3621 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3622 		nvlist_t		*nvrule = NULL;
3623 		nvlist_t		*nvl = NULL;
3624 		struct pf_kruleset	*ruleset;
3625 		struct pf_krule		*rule;
3626 		void			*nvlpacked = NULL;
3627 		int			 rs_num, nr;
3628 		bool			 clear_counter = false;
3629 
3630 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3631 
3632 		if (nv->len > pf_ioctl_maxcount)
3633 			ERROUT(ENOMEM);
3634 
3635 		/* Copy the request in */
3636 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3637 		error = copyin(nv->data, nvlpacked, nv->len);
3638 		if (error)
3639 			ERROUT(error);
3640 
3641 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3642 		if (nvl == NULL)
3643 			ERROUT(EBADMSG);
3644 
3645 		if (! nvlist_exists_string(nvl, "anchor"))
3646 			ERROUT(EBADMSG);
3647 		if (! nvlist_exists_number(nvl, "ruleset"))
3648 			ERROUT(EBADMSG);
3649 		if (! nvlist_exists_number(nvl, "ticket"))
3650 			ERROUT(EBADMSG);
3651 		if (! nvlist_exists_number(nvl, "nr"))
3652 			ERROUT(EBADMSG);
3653 
3654 		if (nvlist_exists_bool(nvl, "clear_counter"))
3655 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3656 
3657 		if (clear_counter && !(flags & FWRITE))
3658 			ERROUT(EACCES);
3659 
3660 		nr = nvlist_get_number(nvl, "nr");
3661 
3662 		PF_RULES_WLOCK();
3663 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3664 		if (ruleset == NULL) {
3665 			PF_RULES_WUNLOCK();
3666 			ERROUT(ENOENT);
3667 		}
3668 
3669 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3670 		if (rs_num >= PF_RULESET_MAX) {
3671 			PF_RULES_WUNLOCK();
3672 			ERROUT(EINVAL);
3673 		}
3674 
3675 		if (nvlist_get_number(nvl, "ticket") !=
3676 		    ruleset->rules[rs_num].active.ticket) {
3677 			PF_RULES_WUNLOCK();
3678 			ERROUT(EBUSY);
3679 		}
3680 
3681 		if ((error = nvlist_error(nvl))) {
3682 			PF_RULES_WUNLOCK();
3683 			ERROUT(error);
3684 		}
3685 
3686 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3687 		while ((rule != NULL) && (rule->nr != nr))
3688 			rule = TAILQ_NEXT(rule, entries);
3689 		if (rule == NULL) {
3690 			PF_RULES_WUNLOCK();
3691 			ERROUT(EBUSY);
3692 		}
3693 
3694 		nvrule = pf_krule_to_nvrule(rule);
3695 
3696 		nvlist_destroy(nvl);
3697 		nvl = nvlist_create(0);
3698 		if (nvl == NULL) {
3699 			PF_RULES_WUNLOCK();
3700 			ERROUT(ENOMEM);
3701 		}
3702 		nvlist_add_number(nvl, "nr", nr);
3703 		nvlist_add_nvlist(nvl, "rule", nvrule);
3704 		nvlist_destroy(nvrule);
3705 		nvrule = NULL;
3706 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3707 			PF_RULES_WUNLOCK();
3708 			ERROUT(EBUSY);
3709 		}
3710 
3711 		free(nvlpacked, M_NVLIST);
3712 		nvlpacked = nvlist_pack(nvl, &nv->len);
3713 		if (nvlpacked == NULL) {
3714 			PF_RULES_WUNLOCK();
3715 			ERROUT(ENOMEM);
3716 		}
3717 
3718 		if (nv->size == 0) {
3719 			PF_RULES_WUNLOCK();
3720 			ERROUT(0);
3721 		}
3722 		else if (nv->size < nv->len) {
3723 			PF_RULES_WUNLOCK();
3724 			ERROUT(ENOSPC);
3725 		}
3726 
3727 		if (clear_counter)
3728 			pf_krule_clear_counters(rule);
3729 
3730 		PF_RULES_WUNLOCK();
3731 
3732 		error = copyout(nvlpacked, nv->data, nv->len);
3733 
3734 #undef ERROUT
3735 DIOCGETRULENV_error:
3736 		free(nvlpacked, M_NVLIST);
3737 		nvlist_destroy(nvrule);
3738 		nvlist_destroy(nvl);
3739 
3740 		break;
3741 	}
3742 
3743 	case DIOCCHANGERULE: {
3744 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3745 		struct pf_kruleset	*ruleset;
3746 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3747 		struct pfi_kkif		*kif = NULL;
3748 		struct pf_kpooladdr	*pa;
3749 		u_int32_t		 nr = 0;
3750 		int			 rs_num;
3751 
3752 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
3753 
3754 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3755 		    pcr->action > PF_CHANGE_GET_TICKET) {
3756 			error = EINVAL;
3757 			goto fail;
3758 		}
3759 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3760 			error = EINVAL;
3761 			goto fail;
3762 		}
3763 
3764 		if (pcr->action != PF_CHANGE_REMOVE) {
3765 			newrule = pf_krule_alloc();
3766 			error = pf_rule_to_krule(&pcr->rule, newrule);
3767 			if (error != 0) {
3768 				pf_krule_free(newrule);
3769 				goto fail;
3770 			}
3771 
3772 			if ((error = pf_rule_checkaf(newrule))) {
3773 				pf_krule_free(newrule);
3774 				goto fail;
3775 			}
3776 			if (newrule->ifname[0])
3777 				kif = pf_kkif_create(M_WAITOK);
3778 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3779 			for (int i = 0; i < 2; i++) {
3780 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3781 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3782 			}
3783 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3784 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3785 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3786 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3787 			newrule->cuid = td->td_ucred->cr_ruid;
3788 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3789 			TAILQ_INIT(&newrule->nat.list);
3790 			TAILQ_INIT(&newrule->rdr.list);
3791 			TAILQ_INIT(&newrule->route.list);
3792 		}
3793 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3794 
3795 		PF_CONFIG_LOCK();
3796 		PF_RULES_WLOCK();
3797 #ifdef PF_WANT_32_TO_64_COUNTER
3798 		if (newrule != NULL) {
3799 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3800 			newrule->allrulelinked = true;
3801 			V_pf_allrulecount++;
3802 		}
3803 #endif
3804 
3805 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3806 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3807 		    pcr->pool_ticket != V_ticket_pabuf)
3808 			ERROUT(EBUSY);
3809 
3810 		ruleset = pf_find_kruleset(pcr->anchor);
3811 		if (ruleset == NULL)
3812 			ERROUT(EINVAL);
3813 
3814 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3815 		if (rs_num >= PF_RULESET_MAX)
3816 			ERROUT(EINVAL);
3817 
3818 		/*
3819 		 * XXXMJG: there is no guarantee that the ruleset was
3820 		 * created by the usual route of calling DIOCXBEGIN.
3821 		 * As a result it is possible the rule tree will not
3822 		 * be allocated yet. Hack around it by doing it here.
3823 		 * Note it is fine to let the tree persist in case of
3824 		 * error as it will be freed down the road on future
3825 		 * updates (if need be).
3826 		 */
3827 		if (ruleset->rules[rs_num].active.tree == NULL) {
3828 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3829 			if (ruleset->rules[rs_num].active.tree == NULL) {
3830 				ERROUT(ENOMEM);
3831 			}
3832 		}
3833 
3834 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3835 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3836 			ERROUT(0);
3837 		} else if (pcr->ticket !=
3838 			    ruleset->rules[rs_num].active.ticket)
3839 				ERROUT(EINVAL);
3840 
3841 		if (pcr->action != PF_CHANGE_REMOVE) {
3842 			if (newrule->ifname[0]) {
3843 				newrule->kif = pfi_kkif_attach(kif,
3844 				    newrule->ifname);
3845 				kif = NULL;
3846 				pfi_kkif_ref(newrule->kif);
3847 			} else
3848 				newrule->kif = NULL;
3849 
3850 			if (newrule->rtableid > 0 &&
3851 			    newrule->rtableid >= rt_numfibs)
3852 				error = EBUSY;
3853 
3854 #ifdef ALTQ
3855 			/* set queue IDs */
3856 			if (newrule->qname[0] != 0) {
3857 				if ((newrule->qid =
3858 				    pf_qname2qid(newrule->qname)) == 0)
3859 					error = EBUSY;
3860 				else if (newrule->pqname[0] != 0) {
3861 					if ((newrule->pqid =
3862 					    pf_qname2qid(newrule->pqname)) == 0)
3863 						error = EBUSY;
3864 				} else
3865 					newrule->pqid = newrule->qid;
3866 			}
3867 #endif /* ALTQ */
3868 			if (newrule->tagname[0])
3869 				if ((newrule->tag =
3870 				    pf_tagname2tag(newrule->tagname)) == 0)
3871 					error = EBUSY;
3872 			if (newrule->match_tagname[0])
3873 				if ((newrule->match_tag = pf_tagname2tag(
3874 				    newrule->match_tagname)) == 0)
3875 					error = EBUSY;
3876 			if (newrule->rt && !newrule->direction)
3877 				error = EINVAL;
3878 			if (!newrule->log)
3879 				newrule->logif = 0;
3880 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3881 				error = ENOMEM;
3882 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3883 				error = ENOMEM;
3884 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3885 				error = EINVAL;
3886 			for (int i = 0; i < 3; i++) {
3887 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3888 					if (pa->addr.type == PF_ADDR_TABLE) {
3889 						pa->addr.p.tbl =
3890 						    pfr_attach_table(ruleset,
3891 						    pa->addr.v.tblname);
3892 						if (pa->addr.p.tbl == NULL)
3893 							error = ENOMEM;
3894 					}
3895 			}
3896 
3897 			newrule->overload_tbl = NULL;
3898 			if (newrule->overload_tblname[0]) {
3899 				if ((newrule->overload_tbl = pfr_attach_table(
3900 				    ruleset, newrule->overload_tblname)) ==
3901 				    NULL)
3902 					error = EINVAL;
3903 				else
3904 					newrule->overload_tbl->pfrkt_flags |=
3905 					    PFR_TFLAG_ACTIVE;
3906 			}
3907 
3908 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3909 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3910 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3911 			if (((((newrule->action == PF_NAT) ||
3912 			    (newrule->action == PF_RDR) ||
3913 			    (newrule->action == PF_BINAT) ||
3914 			    (newrule->rt > PF_NOPFROUTE)) &&
3915 			    !newrule->anchor)) &&
3916 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3917 				error = EINVAL;
3918 
3919 			if (error) {
3920 				pf_free_rule(newrule);
3921 				PF_RULES_WUNLOCK();
3922 				PF_CONFIG_UNLOCK();
3923 				goto fail;
3924 			}
3925 
3926 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3927 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3928 		}
3929 		pf_empty_kpool(&V_pf_pabuf[0]);
3930 		pf_empty_kpool(&V_pf_pabuf[1]);
3931 		pf_empty_kpool(&V_pf_pabuf[2]);
3932 
3933 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3934 			oldrule = TAILQ_FIRST(
3935 			    ruleset->rules[rs_num].active.ptr);
3936 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3937 			oldrule = TAILQ_LAST(
3938 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3939 		else {
3940 			oldrule = TAILQ_FIRST(
3941 			    ruleset->rules[rs_num].active.ptr);
3942 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3943 				oldrule = TAILQ_NEXT(oldrule, entries);
3944 			if (oldrule == NULL) {
3945 				if (newrule != NULL)
3946 					pf_free_rule(newrule);
3947 				PF_RULES_WUNLOCK();
3948 				PF_CONFIG_UNLOCK();
3949 				error = EINVAL;
3950 				goto fail;
3951 			}
3952 		}
3953 
3954 		if (pcr->action == PF_CHANGE_REMOVE) {
3955 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3956 			    oldrule);
3957 			RB_REMOVE(pf_krule_global,
3958 			    ruleset->rules[rs_num].active.tree, oldrule);
3959 			ruleset->rules[rs_num].active.rcount--;
3960 		} else {
3961 			pf_hash_rule(newrule);
3962 			if (RB_INSERT(pf_krule_global,
3963 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3964 				pf_free_rule(newrule);
3965 				PF_RULES_WUNLOCK();
3966 				PF_CONFIG_UNLOCK();
3967 				error = EEXIST;
3968 				goto fail;
3969 			}
3970 
3971 			if (oldrule == NULL)
3972 				TAILQ_INSERT_TAIL(
3973 				    ruleset->rules[rs_num].active.ptr,
3974 				    newrule, entries);
3975 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3976 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3977 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3978 			else
3979 				TAILQ_INSERT_AFTER(
3980 				    ruleset->rules[rs_num].active.ptr,
3981 				    oldrule, newrule, entries);
3982 			ruleset->rules[rs_num].active.rcount++;
3983 		}
3984 
3985 		nr = 0;
3986 		TAILQ_FOREACH(oldrule,
3987 		    ruleset->rules[rs_num].active.ptr, entries)
3988 			oldrule->nr = nr++;
3989 
3990 		ruleset->rules[rs_num].active.ticket++;
3991 
3992 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3993 		pf_remove_if_empty_kruleset(ruleset);
3994 
3995 		PF_RULES_WUNLOCK();
3996 		PF_CONFIG_UNLOCK();
3997 		break;
3998 
3999 #undef ERROUT
4000 DIOCCHANGERULE_error:
4001 		PF_RULES_WUNLOCK();
4002 		PF_CONFIG_UNLOCK();
4003 		pf_krule_free(newrule);
4004 		pf_kkif_free(kif);
4005 		break;
4006 	}
4007 
4008 	case DIOCCLRSTATESNV: {
4009 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4010 		break;
4011 	}
4012 
4013 	case DIOCKILLSTATESNV: {
4014 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4015 		break;
4016 	}
4017 
4018 	case DIOCADDSTATE: {
4019 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4020 		struct pfsync_state_1301	*sp = &ps->state;
4021 
4022 		if (sp->timeout >= PFTM_MAX) {
4023 			error = EINVAL;
4024 			goto fail;
4025 		}
4026 		if (V_pfsync_state_import_ptr != NULL) {
4027 			PF_RULES_RLOCK();
4028 			error = V_pfsync_state_import_ptr(
4029 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4030 			    PFSYNC_MSG_VERSION_1301);
4031 			PF_RULES_RUNLOCK();
4032 		} else
4033 			error = EOPNOTSUPP;
4034 		break;
4035 	}
4036 
4037 	case DIOCGETSTATE: {
4038 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4039 		struct pf_kstate	*s;
4040 
4041 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4042 		if (s == NULL) {
4043 			error = ENOENT;
4044 			goto fail;
4045 		}
4046 
4047 		pfsync_state_export((union pfsync_state_union*)&ps->state,
4048 		    s, PFSYNC_MSG_VERSION_1301);
4049 		PF_STATE_UNLOCK(s);
4050 		break;
4051 	}
4052 
4053 	case DIOCGETSTATENV: {
4054 		error = pf_getstate((struct pfioc_nv *)addr);
4055 		break;
4056 	}
4057 
4058 #ifdef COMPAT_FREEBSD14
4059 	case DIOCGETSTATES: {
4060 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4061 		struct pf_kstate	*s;
4062 		struct pfsync_state_1301	*pstore, *p;
4063 		int			 i, nr;
4064 		size_t			 slice_count = 16, count;
4065 		void			*out;
4066 
4067 		if (ps->ps_len <= 0) {
4068 			nr = uma_zone_get_cur(V_pf_state_z);
4069 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4070 			break;
4071 		}
4072 
4073 		out = ps->ps_states;
4074 		pstore = mallocarray(slice_count,
4075 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
4076 		nr = 0;
4077 
4078 		for (i = 0; i <= V_pf_hashmask; i++) {
4079 			struct pf_idhash *ih = &V_pf_idhash[i];
4080 
4081 DIOCGETSTATES_retry:
4082 			p = pstore;
4083 
4084 			if (LIST_EMPTY(&ih->states))
4085 				continue;
4086 
4087 			PF_HASHROW_LOCK(ih);
4088 			count = 0;
4089 			LIST_FOREACH(s, &ih->states, entry) {
4090 				if (s->timeout == PFTM_UNLINKED)
4091 					continue;
4092 				count++;
4093 			}
4094 
4095 			if (count > slice_count) {
4096 				PF_HASHROW_UNLOCK(ih);
4097 				free(pstore, M_TEMP);
4098 				slice_count = count * 2;
4099 				pstore = mallocarray(slice_count,
4100 				    sizeof(struct pfsync_state_1301), M_TEMP,
4101 				    M_WAITOK | M_ZERO);
4102 				goto DIOCGETSTATES_retry;
4103 			}
4104 
4105 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4106 				PF_HASHROW_UNLOCK(ih);
4107 				goto DIOCGETSTATES_full;
4108 			}
4109 
4110 			LIST_FOREACH(s, &ih->states, entry) {
4111 				if (s->timeout == PFTM_UNLINKED)
4112 					continue;
4113 
4114 				pfsync_state_export((union pfsync_state_union*)p,
4115 				    s, PFSYNC_MSG_VERSION_1301);
4116 				p++;
4117 				nr++;
4118 			}
4119 			PF_HASHROW_UNLOCK(ih);
4120 			error = copyout(pstore, out,
4121 			    sizeof(struct pfsync_state_1301) * count);
4122 			if (error)
4123 				goto fail;
4124 			out = ps->ps_states + nr;
4125 		}
4126 DIOCGETSTATES_full:
4127 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4128 		free(pstore, M_TEMP);
4129 
4130 		break;
4131 	}
4132 
4133 	case DIOCGETSTATESV2: {
4134 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4135 		struct pf_kstate	*s;
4136 		struct pf_state_export	*pstore, *p;
4137 		int i, nr;
4138 		size_t slice_count = 16, count;
4139 		void *out;
4140 
4141 		if (ps->ps_req_version > PF_STATE_VERSION) {
4142 			error = ENOTSUP;
4143 			goto fail;
4144 		}
4145 
4146 		if (ps->ps_len <= 0) {
4147 			nr = uma_zone_get_cur(V_pf_state_z);
4148 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4149 			break;
4150 		}
4151 
4152 		out = ps->ps_states;
4153 		pstore = mallocarray(slice_count,
4154 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
4155 		nr = 0;
4156 
4157 		for (i = 0; i <= V_pf_hashmask; i++) {
4158 			struct pf_idhash *ih = &V_pf_idhash[i];
4159 
4160 DIOCGETSTATESV2_retry:
4161 			p = pstore;
4162 
4163 			if (LIST_EMPTY(&ih->states))
4164 				continue;
4165 
4166 			PF_HASHROW_LOCK(ih);
4167 			count = 0;
4168 			LIST_FOREACH(s, &ih->states, entry) {
4169 				if (s->timeout == PFTM_UNLINKED)
4170 					continue;
4171 				count++;
4172 			}
4173 
4174 			if (count > slice_count) {
4175 				PF_HASHROW_UNLOCK(ih);
4176 				free(pstore, M_TEMP);
4177 				slice_count = count * 2;
4178 				pstore = mallocarray(slice_count,
4179 				    sizeof(struct pf_state_export), M_TEMP,
4180 				    M_WAITOK | M_ZERO);
4181 				goto DIOCGETSTATESV2_retry;
4182 			}
4183 
4184 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4185 				PF_HASHROW_UNLOCK(ih);
4186 				goto DIOCGETSTATESV2_full;
4187 			}
4188 
4189 			LIST_FOREACH(s, &ih->states, entry) {
4190 				if (s->timeout == PFTM_UNLINKED)
4191 					continue;
4192 
4193 				pf_state_export(p, s);
4194 				p++;
4195 				nr++;
4196 			}
4197 			PF_HASHROW_UNLOCK(ih);
4198 			error = copyout(pstore, out,
4199 			    sizeof(struct pf_state_export) * count);
4200 			if (error)
4201 				goto fail;
4202 			out = ps->ps_states + nr;
4203 		}
4204 DIOCGETSTATESV2_full:
4205 		ps->ps_len = nr * sizeof(struct pf_state_export);
4206 		free(pstore, M_TEMP);
4207 
4208 		break;
4209 	}
4210 #endif
4211 	case DIOCGETSTATUSNV: {
4212 		error = pf_getstatus((struct pfioc_nv *)addr);
4213 		break;
4214 	}
4215 
4216 	case DIOCSETSTATUSIF: {
4217 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4218 
4219 		if (pi->ifname[0] == 0) {
4220 			bzero(V_pf_status.ifname, IFNAMSIZ);
4221 			break;
4222 		}
4223 		PF_RULES_WLOCK();
4224 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4225 		PF_RULES_WUNLOCK();
4226 		break;
4227 	}
4228 
4229 	case DIOCCLRSTATUS: {
4230 		pf_ioctl_clear_status();
4231 		break;
4232 	}
4233 
4234 	case DIOCNATLOOK: {
4235 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4236 
4237 		error = pf_ioctl_natlook(pnl);
4238 		break;
4239 	}
4240 
4241 	case DIOCSETTIMEOUT: {
4242 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4243 
4244 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4245 		    &pt->seconds);
4246 		break;
4247 	}
4248 
4249 	case DIOCGETTIMEOUT: {
4250 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4251 
4252 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4253 		break;
4254 	}
4255 
4256 	case DIOCGETLIMIT: {
4257 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4258 
4259 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4260 		break;
4261 	}
4262 
4263 	case DIOCSETLIMIT: {
4264 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4265 		unsigned int old_limit;
4266 
4267 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4268 		pl->limit = old_limit;
4269 		break;
4270 	}
4271 
4272 	case DIOCSETDEBUG: {
4273 		u_int32_t	*level = (u_int32_t *)addr;
4274 
4275 		PF_RULES_WLOCK();
4276 		V_pf_status.debug = *level;
4277 		PF_RULES_WUNLOCK();
4278 		break;
4279 	}
4280 
4281 	case DIOCCLRRULECTRS: {
4282 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4283 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4284 		struct pf_krule		*rule;
4285 
4286 		PF_RULES_WLOCK();
4287 		TAILQ_FOREACH(rule,
4288 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4289 			pf_counter_u64_zero(&rule->evaluations);
4290 			for (int i = 0; i < 2; i++) {
4291 				pf_counter_u64_zero(&rule->packets[i]);
4292 				pf_counter_u64_zero(&rule->bytes[i]);
4293 			}
4294 		}
4295 		PF_RULES_WUNLOCK();
4296 		break;
4297 	}
4298 
4299 	case DIOCGIFSPEEDV0:
4300 	case DIOCGIFSPEEDV1: {
4301 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4302 		struct pf_ifspeed_v1	ps;
4303 		struct ifnet		*ifp;
4304 
4305 		if (psp->ifname[0] == '\0') {
4306 			error = EINVAL;
4307 			goto fail;
4308 		}
4309 
4310 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4311 		if (error != 0)
4312 			goto fail;
4313 		ifp = ifunit(ps.ifname);
4314 		if (ifp != NULL) {
4315 			psp->baudrate32 =
4316 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4317 			if (cmd == DIOCGIFSPEEDV1)
4318 				psp->baudrate = ifp->if_baudrate;
4319 		} else {
4320 			error = EINVAL;
4321 		}
4322 		break;
4323 	}
4324 
4325 #ifdef ALTQ
4326 	case DIOCSTARTALTQ: {
4327 		struct pf_altq		*altq;
4328 
4329 		PF_RULES_WLOCK();
4330 		/* enable all altq interfaces on active list */
4331 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4332 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4333 				error = pf_enable_altq(altq);
4334 				if (error != 0)
4335 					break;
4336 			}
4337 		}
4338 		if (error == 0)
4339 			V_pf_altq_running = 1;
4340 		PF_RULES_WUNLOCK();
4341 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
4342 		break;
4343 	}
4344 
4345 	case DIOCSTOPALTQ: {
4346 		struct pf_altq		*altq;
4347 
4348 		PF_RULES_WLOCK();
4349 		/* disable all altq interfaces on active list */
4350 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4351 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4352 				error = pf_disable_altq(altq);
4353 				if (error != 0)
4354 					break;
4355 			}
4356 		}
4357 		if (error == 0)
4358 			V_pf_altq_running = 0;
4359 		PF_RULES_WUNLOCK();
4360 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
4361 		break;
4362 	}
4363 
4364 	case DIOCADDALTQV0:
4365 	case DIOCADDALTQV1: {
4366 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4367 		struct pf_altq		*altq, *a;
4368 		struct ifnet		*ifp;
4369 
4370 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4371 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4372 		if (error)
4373 			goto fail;
4374 		altq->local_flags = 0;
4375 
4376 		PF_RULES_WLOCK();
4377 		if (pa->ticket != V_ticket_altqs_inactive) {
4378 			PF_RULES_WUNLOCK();
4379 			free(altq, M_PFALTQ);
4380 			error = EBUSY;
4381 			goto fail;
4382 		}
4383 
4384 		/*
4385 		 * if this is for a queue, find the discipline and
4386 		 * copy the necessary fields
4387 		 */
4388 		if (altq->qname[0] != 0) {
4389 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4390 				PF_RULES_WUNLOCK();
4391 				error = EBUSY;
4392 				free(altq, M_PFALTQ);
4393 				goto fail;
4394 			}
4395 			altq->altq_disc = NULL;
4396 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4397 				if (strncmp(a->ifname, altq->ifname,
4398 				    IFNAMSIZ) == 0) {
4399 					altq->altq_disc = a->altq_disc;
4400 					break;
4401 				}
4402 			}
4403 		}
4404 
4405 		if ((ifp = ifunit(altq->ifname)) == NULL)
4406 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4407 		else
4408 			error = altq_add(ifp, altq);
4409 
4410 		if (error) {
4411 			PF_RULES_WUNLOCK();
4412 			free(altq, M_PFALTQ);
4413 			goto fail;
4414 		}
4415 
4416 		if (altq->qname[0] != 0)
4417 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4418 		else
4419 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4420 		/* version error check done on import above */
4421 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4422 		PF_RULES_WUNLOCK();
4423 		break;
4424 	}
4425 
4426 	case DIOCGETALTQSV0:
4427 	case DIOCGETALTQSV1: {
4428 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4429 		struct pf_altq		*altq;
4430 
4431 		PF_RULES_RLOCK();
4432 		pa->nr = 0;
4433 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4434 			pa->nr++;
4435 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4436 			pa->nr++;
4437 		pa->ticket = V_ticket_altqs_active;
4438 		PF_RULES_RUNLOCK();
4439 		break;
4440 	}
4441 
4442 	case DIOCGETALTQV0:
4443 	case DIOCGETALTQV1: {
4444 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4445 		struct pf_altq		*altq;
4446 
4447 		PF_RULES_RLOCK();
4448 		if (pa->ticket != V_ticket_altqs_active) {
4449 			PF_RULES_RUNLOCK();
4450 			error = EBUSY;
4451 			goto fail;
4452 		}
4453 		altq = pf_altq_get_nth_active(pa->nr);
4454 		if (altq == NULL) {
4455 			PF_RULES_RUNLOCK();
4456 			error = EBUSY;
4457 			goto fail;
4458 		}
4459 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4460 		PF_RULES_RUNLOCK();
4461 		break;
4462 	}
4463 
4464 	case DIOCCHANGEALTQV0:
4465 	case DIOCCHANGEALTQV1:
4466 		/* CHANGEALTQ not supported yet! */
4467 		error = ENODEV;
4468 		break;
4469 
4470 	case DIOCGETQSTATSV0:
4471 	case DIOCGETQSTATSV1: {
4472 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4473 		struct pf_altq		*altq;
4474 		int			 nbytes;
4475 		u_int32_t		 version;
4476 
4477 		PF_RULES_RLOCK();
4478 		if (pq->ticket != V_ticket_altqs_active) {
4479 			PF_RULES_RUNLOCK();
4480 			error = EBUSY;
4481 			goto fail;
4482 		}
4483 		nbytes = pq->nbytes;
4484 		altq = pf_altq_get_nth_active(pq->nr);
4485 		if (altq == NULL) {
4486 			PF_RULES_RUNLOCK();
4487 			error = EBUSY;
4488 			goto fail;
4489 		}
4490 
4491 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4492 			PF_RULES_RUNLOCK();
4493 			error = ENXIO;
4494 			goto fail;
4495 		}
4496 		PF_RULES_RUNLOCK();
4497 		if (cmd == DIOCGETQSTATSV0)
4498 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4499 		else
4500 			version = pq->version;
4501 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4502 		if (error == 0) {
4503 			pq->scheduler = altq->scheduler;
4504 			pq->nbytes = nbytes;
4505 		}
4506 		break;
4507 	}
4508 #endif /* ALTQ */
4509 
4510 	case DIOCBEGINADDRS: {
4511 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4512 
4513 		error = pf_ioctl_begin_addrs(&pp->ticket);
4514 		break;
4515 	}
4516 
4517 	case DIOCADDADDR: {
4518 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4519 		struct pf_nl_pooladdr npp = {};
4520 
4521 		npp.which = PF_RDR;
4522 		memcpy(&npp, pp, sizeof(*pp));
4523 		error = pf_ioctl_add_addr(&npp);
4524 		break;
4525 	}
4526 
4527 	case DIOCGETADDRS: {
4528 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4529 		struct pf_nl_pooladdr npp = {};
4530 
4531 		npp.which = PF_RDR;
4532 		memcpy(&npp, pp, sizeof(*pp));
4533 		error = pf_ioctl_get_addrs(&npp);
4534 		memcpy(pp, &npp, sizeof(*pp));
4535 
4536 		break;
4537 	}
4538 
4539 	case DIOCGETADDR: {
4540 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4541 		struct pf_nl_pooladdr npp = {};
4542 
4543 		npp.which = PF_RDR;
4544 		memcpy(&npp, pp, sizeof(*pp));
4545 		error = pf_ioctl_get_addr(&npp);
4546 		memcpy(pp, &npp, sizeof(*pp));
4547 
4548 		break;
4549 	}
4550 
4551 	case DIOCCHANGEADDR: {
4552 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4553 		struct pf_kpool		*pool;
4554 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4555 		struct pf_kruleset	*ruleset;
4556 		struct pfi_kkif		*kif = NULL;
4557 
4558 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
4559 
4560 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4561 		    pca->action > PF_CHANGE_REMOVE) {
4562 			error = EINVAL;
4563 			goto fail;
4564 		}
4565 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4566 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4567 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4568 			error = EINVAL;
4569 			goto fail;
4570 		}
4571 		if (pca->addr.addr.p.dyn != NULL) {
4572 			error = EINVAL;
4573 			goto fail;
4574 		}
4575 
4576 		if (pca->action != PF_CHANGE_REMOVE) {
4577 #ifndef INET
4578 			if (pca->af == AF_INET) {
4579 				error = EAFNOSUPPORT;
4580 				goto fail;
4581 			}
4582 #endif /* INET */
4583 #ifndef INET6
4584 			if (pca->af == AF_INET6) {
4585 				error = EAFNOSUPPORT;
4586 				goto fail;
4587 			}
4588 #endif /* INET6 */
4589 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4590 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4591 			if (newpa->ifname[0])
4592 				kif = pf_kkif_create(M_WAITOK);
4593 			newpa->kif = NULL;
4594 		}
4595 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4596 		PF_RULES_WLOCK();
4597 		ruleset = pf_find_kruleset(pca->anchor);
4598 		if (ruleset == NULL)
4599 			ERROUT(EBUSY);
4600 
4601 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4602 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4603 		if (pool == NULL)
4604 			ERROUT(EBUSY);
4605 
4606 		if (pca->action != PF_CHANGE_REMOVE) {
4607 			if (newpa->ifname[0]) {
4608 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4609 				pfi_kkif_ref(newpa->kif);
4610 				kif = NULL;
4611 			}
4612 
4613 			switch (newpa->addr.type) {
4614 			case PF_ADDR_DYNIFTL:
4615 				error = pfi_dynaddr_setup(&newpa->addr,
4616 				    pca->af);
4617 				break;
4618 			case PF_ADDR_TABLE:
4619 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4620 				    newpa->addr.v.tblname);
4621 				if (newpa->addr.p.tbl == NULL)
4622 					error = ENOMEM;
4623 				break;
4624 			}
4625 			if (error)
4626 				goto DIOCCHANGEADDR_error;
4627 		}
4628 
4629 		switch (pca->action) {
4630 		case PF_CHANGE_ADD_HEAD:
4631 			oldpa = TAILQ_FIRST(&pool->list);
4632 			break;
4633 		case PF_CHANGE_ADD_TAIL:
4634 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4635 			break;
4636 		default:
4637 			oldpa = TAILQ_FIRST(&pool->list);
4638 			for (int i = 0; oldpa && i < pca->nr; i++)
4639 				oldpa = TAILQ_NEXT(oldpa, entries);
4640 
4641 			if (oldpa == NULL)
4642 				ERROUT(EINVAL);
4643 		}
4644 
4645 		if (pca->action == PF_CHANGE_REMOVE) {
4646 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4647 			switch (oldpa->addr.type) {
4648 			case PF_ADDR_DYNIFTL:
4649 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4650 				break;
4651 			case PF_ADDR_TABLE:
4652 				pfr_detach_table(oldpa->addr.p.tbl);
4653 				break;
4654 			}
4655 			if (oldpa->kif)
4656 				pfi_kkif_unref(oldpa->kif);
4657 			free(oldpa, M_PFRULE);
4658 		} else {
4659 			if (oldpa == NULL)
4660 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4661 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4662 			    pca->action == PF_CHANGE_ADD_BEFORE)
4663 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4664 			else
4665 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4666 				    newpa, entries);
4667 		}
4668 
4669 		pool->cur = TAILQ_FIRST(&pool->list);
4670 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4671 		PF_RULES_WUNLOCK();
4672 		break;
4673 
4674 #undef ERROUT
4675 DIOCCHANGEADDR_error:
4676 		if (newpa != NULL) {
4677 			if (newpa->kif)
4678 				pfi_kkif_unref(newpa->kif);
4679 			free(newpa, M_PFRULE);
4680 		}
4681 		PF_RULES_WUNLOCK();
4682 		pf_kkif_free(kif);
4683 		break;
4684 	}
4685 
4686 	case DIOCGETRULESETS: {
4687 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4688 
4689 		pr->path[sizeof(pr->path) - 1] = '\0';
4690 
4691 		error = pf_ioctl_get_rulesets(pr);
4692 		break;
4693 	}
4694 
4695 	case DIOCGETRULESET: {
4696 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4697 
4698 		pr->path[sizeof(pr->path) - 1] = '\0';
4699 
4700 		error = pf_ioctl_get_ruleset(pr);
4701 		break;
4702 	}
4703 
4704 	case DIOCRCLRTABLES: {
4705 		struct pfioc_table *io = (struct pfioc_table *)addr;
4706 
4707 		if (io->pfrio_esize != 0) {
4708 			error = ENODEV;
4709 			goto fail;
4710 		}
4711 		PF_RULES_WLOCK();
4712 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4713 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4714 		PF_RULES_WUNLOCK();
4715 		break;
4716 	}
4717 
4718 	case DIOCRADDTABLES: {
4719 		struct pfioc_table *io = (struct pfioc_table *)addr;
4720 		struct pfr_table *pfrts;
4721 		size_t totlen;
4722 
4723 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4724 			error = ENODEV;
4725 			goto fail;
4726 		}
4727 
4728 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4729 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4730 			error = ENOMEM;
4731 			goto fail;
4732 		}
4733 
4734 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4735 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4736 		    M_TEMP, M_WAITOK);
4737 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4738 		if (error) {
4739 			free(pfrts, M_TEMP);
4740 			goto fail;
4741 		}
4742 		PF_RULES_WLOCK();
4743 		error = pfr_add_tables(pfrts, io->pfrio_size,
4744 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4745 		PF_RULES_WUNLOCK();
4746 		free(pfrts, M_TEMP);
4747 		break;
4748 	}
4749 
4750 	case DIOCRDELTABLES: {
4751 		struct pfioc_table *io = (struct pfioc_table *)addr;
4752 		struct pfr_table *pfrts;
4753 		size_t totlen;
4754 
4755 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4756 			error = ENODEV;
4757 			goto fail;
4758 		}
4759 
4760 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4761 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4762 			error = ENOMEM;
4763 			goto fail;
4764 		}
4765 
4766 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4767 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4768 		    M_TEMP, M_WAITOK);
4769 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4770 		if (error) {
4771 			free(pfrts, M_TEMP);
4772 			goto fail;
4773 		}
4774 		PF_RULES_WLOCK();
4775 		error = pfr_del_tables(pfrts, io->pfrio_size,
4776 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4777 		PF_RULES_WUNLOCK();
4778 		free(pfrts, M_TEMP);
4779 		break;
4780 	}
4781 
4782 	case DIOCRGETTABLES: {
4783 		struct pfioc_table *io = (struct pfioc_table *)addr;
4784 		struct pfr_table *pfrts;
4785 		size_t totlen;
4786 		int n;
4787 
4788 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4789 			error = ENODEV;
4790 			goto fail;
4791 		}
4792 		PF_RULES_RLOCK();
4793 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4794 		if (n < 0) {
4795 			PF_RULES_RUNLOCK();
4796 			error = EINVAL;
4797 			goto fail;
4798 		}
4799 		io->pfrio_size = min(io->pfrio_size, n);
4800 
4801 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4802 
4803 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4804 		    M_TEMP, M_NOWAIT | M_ZERO);
4805 		if (pfrts == NULL) {
4806 			error = ENOMEM;
4807 			PF_RULES_RUNLOCK();
4808 			goto fail;
4809 		}
4810 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4811 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4812 		PF_RULES_RUNLOCK();
4813 		if (error == 0)
4814 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4815 		free(pfrts, M_TEMP);
4816 		break;
4817 	}
4818 
4819 	case DIOCRGETTSTATS: {
4820 		struct pfioc_table *io = (struct pfioc_table *)addr;
4821 		struct pfr_tstats *pfrtstats;
4822 		size_t totlen;
4823 		int n;
4824 
4825 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4826 			error = ENODEV;
4827 			goto fail;
4828 		}
4829 		PF_TABLE_STATS_LOCK();
4830 		PF_RULES_RLOCK();
4831 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4832 		if (n < 0) {
4833 			PF_RULES_RUNLOCK();
4834 			PF_TABLE_STATS_UNLOCK();
4835 			error = EINVAL;
4836 			goto fail;
4837 		}
4838 		io->pfrio_size = min(io->pfrio_size, n);
4839 
4840 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4841 		pfrtstats = mallocarray(io->pfrio_size,
4842 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4843 		if (pfrtstats == NULL) {
4844 			error = ENOMEM;
4845 			PF_RULES_RUNLOCK();
4846 			PF_TABLE_STATS_UNLOCK();
4847 			goto fail;
4848 		}
4849 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4850 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4851 		PF_RULES_RUNLOCK();
4852 		PF_TABLE_STATS_UNLOCK();
4853 		if (error == 0)
4854 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4855 		free(pfrtstats, M_TEMP);
4856 		break;
4857 	}
4858 
4859 	case DIOCRCLRTSTATS: {
4860 		struct pfioc_table *io = (struct pfioc_table *)addr;
4861 		struct pfr_table *pfrts;
4862 		size_t totlen;
4863 
4864 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4865 			error = ENODEV;
4866 			goto fail;
4867 		}
4868 
4869 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4870 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4871 			/* We used to count tables and use the minimum required
4872 			 * size, so we didn't fail on overly large requests.
4873 			 * Keep doing so. */
4874 			io->pfrio_size = pf_ioctl_maxcount;
4875 			goto fail;
4876 		}
4877 
4878 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4879 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4880 		    M_TEMP, M_WAITOK);
4881 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4882 		if (error) {
4883 			free(pfrts, M_TEMP);
4884 			goto fail;
4885 		}
4886 
4887 		PF_TABLE_STATS_LOCK();
4888 		PF_RULES_RLOCK();
4889 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4890 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4891 		PF_RULES_RUNLOCK();
4892 		PF_TABLE_STATS_UNLOCK();
4893 		free(pfrts, M_TEMP);
4894 		break;
4895 	}
4896 
4897 	case DIOCRSETTFLAGS: {
4898 		struct pfioc_table *io = (struct pfioc_table *)addr;
4899 		struct pfr_table *pfrts;
4900 		size_t totlen;
4901 		int n;
4902 
4903 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4904 			error = ENODEV;
4905 			goto fail;
4906 		}
4907 
4908 		PF_RULES_RLOCK();
4909 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4910 		if (n < 0) {
4911 			PF_RULES_RUNLOCK();
4912 			error = EINVAL;
4913 			goto fail;
4914 		}
4915 
4916 		io->pfrio_size = min(io->pfrio_size, n);
4917 		PF_RULES_RUNLOCK();
4918 
4919 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4920 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4921 		    M_TEMP, M_WAITOK);
4922 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4923 		if (error) {
4924 			free(pfrts, M_TEMP);
4925 			goto fail;
4926 		}
4927 		PF_RULES_WLOCK();
4928 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4929 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4930 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4931 		PF_RULES_WUNLOCK();
4932 		free(pfrts, M_TEMP);
4933 		break;
4934 	}
4935 
4936 	case DIOCRCLRADDRS: {
4937 		struct pfioc_table *io = (struct pfioc_table *)addr;
4938 
4939 		if (io->pfrio_esize != 0) {
4940 			error = ENODEV;
4941 			goto fail;
4942 		}
4943 		PF_RULES_WLOCK();
4944 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4945 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4946 		PF_RULES_WUNLOCK();
4947 		break;
4948 	}
4949 
4950 	case DIOCRADDADDRS: {
4951 		struct pfioc_table *io = (struct pfioc_table *)addr;
4952 		struct pfr_addr *pfras;
4953 		size_t totlen;
4954 
4955 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4956 			error = ENODEV;
4957 			goto fail;
4958 		}
4959 		if (io->pfrio_size < 0 ||
4960 		    io->pfrio_size > pf_ioctl_maxcount ||
4961 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4962 			error = EINVAL;
4963 			goto fail;
4964 		}
4965 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4966 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4967 		    M_TEMP, M_WAITOK);
4968 		error = copyin(io->pfrio_buffer, pfras, totlen);
4969 		if (error) {
4970 			free(pfras, M_TEMP);
4971 			goto fail;
4972 		}
4973 		PF_RULES_WLOCK();
4974 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4975 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4976 		    PFR_FLAG_USERIOCTL);
4977 		PF_RULES_WUNLOCK();
4978 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4979 			error = copyout(pfras, io->pfrio_buffer, totlen);
4980 		free(pfras, M_TEMP);
4981 		break;
4982 	}
4983 
4984 	case DIOCRDELADDRS: {
4985 		struct pfioc_table *io = (struct pfioc_table *)addr;
4986 		struct pfr_addr *pfras;
4987 		size_t totlen;
4988 
4989 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4990 			error = ENODEV;
4991 			goto fail;
4992 		}
4993 		if (io->pfrio_size < 0 ||
4994 		    io->pfrio_size > pf_ioctl_maxcount ||
4995 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4996 			error = EINVAL;
4997 			goto fail;
4998 		}
4999 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5000 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5001 		    M_TEMP, M_WAITOK);
5002 		error = copyin(io->pfrio_buffer, pfras, totlen);
5003 		if (error) {
5004 			free(pfras, M_TEMP);
5005 			goto fail;
5006 		}
5007 		PF_RULES_WLOCK();
5008 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5009 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5010 		    PFR_FLAG_USERIOCTL);
5011 		PF_RULES_WUNLOCK();
5012 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5013 			error = copyout(pfras, io->pfrio_buffer, totlen);
5014 		free(pfras, M_TEMP);
5015 		break;
5016 	}
5017 
5018 	case DIOCRSETADDRS: {
5019 		struct pfioc_table *io = (struct pfioc_table *)addr;
5020 		struct pfr_addr *pfras;
5021 		size_t totlen, count;
5022 
5023 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5024 			error = ENODEV;
5025 			goto fail;
5026 		}
5027 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5028 			error = EINVAL;
5029 			goto fail;
5030 		}
5031 		count = max(io->pfrio_size, io->pfrio_size2);
5032 		if (count > pf_ioctl_maxcount ||
5033 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5034 			error = EINVAL;
5035 			goto fail;
5036 		}
5037 		totlen = count * sizeof(struct pfr_addr);
5038 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
5039 		    M_WAITOK);
5040 		error = copyin(io->pfrio_buffer, pfras, totlen);
5041 		if (error) {
5042 			free(pfras, M_TEMP);
5043 			goto fail;
5044 		}
5045 		PF_RULES_WLOCK();
5046 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5047 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
5048 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
5049 		    PFR_FLAG_USERIOCTL, 0);
5050 		PF_RULES_WUNLOCK();
5051 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5052 			error = copyout(pfras, io->pfrio_buffer, totlen);
5053 		free(pfras, M_TEMP);
5054 		break;
5055 	}
5056 
5057 	case DIOCRGETADDRS: {
5058 		struct pfioc_table *io = (struct pfioc_table *)addr;
5059 		struct pfr_addr *pfras;
5060 		size_t totlen;
5061 
5062 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5063 			error = ENODEV;
5064 			goto fail;
5065 		}
5066 		if (io->pfrio_size < 0 ||
5067 		    io->pfrio_size > pf_ioctl_maxcount ||
5068 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5069 			error = EINVAL;
5070 			goto fail;
5071 		}
5072 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5073 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5074 		    M_TEMP, M_WAITOK | M_ZERO);
5075 		PF_RULES_RLOCK();
5076 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5077 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5078 		PF_RULES_RUNLOCK();
5079 		if (error == 0)
5080 			error = copyout(pfras, io->pfrio_buffer, totlen);
5081 		free(pfras, M_TEMP);
5082 		break;
5083 	}
5084 
5085 	case DIOCRGETASTATS: {
5086 		struct pfioc_table *io = (struct pfioc_table *)addr;
5087 		struct pfr_astats *pfrastats;
5088 		size_t totlen;
5089 
5090 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5091 			error = ENODEV;
5092 			goto fail;
5093 		}
5094 		if (io->pfrio_size < 0 ||
5095 		    io->pfrio_size > pf_ioctl_maxcount ||
5096 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5097 			error = EINVAL;
5098 			goto fail;
5099 		}
5100 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5101 		pfrastats = mallocarray(io->pfrio_size,
5102 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5103 		PF_RULES_RLOCK();
5104 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5105 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5106 		PF_RULES_RUNLOCK();
5107 		if (error == 0)
5108 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5109 		free(pfrastats, M_TEMP);
5110 		break;
5111 	}
5112 
5113 	case DIOCRCLRASTATS: {
5114 		struct pfioc_table *io = (struct pfioc_table *)addr;
5115 		struct pfr_addr *pfras;
5116 		size_t totlen;
5117 
5118 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5119 			error = ENODEV;
5120 			goto fail;
5121 		}
5122 		if (io->pfrio_size < 0 ||
5123 		    io->pfrio_size > pf_ioctl_maxcount ||
5124 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5125 			error = EINVAL;
5126 			goto fail;
5127 		}
5128 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5129 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5130 		    M_TEMP, M_WAITOK);
5131 		error = copyin(io->pfrio_buffer, pfras, totlen);
5132 		if (error) {
5133 			free(pfras, M_TEMP);
5134 			goto fail;
5135 		}
5136 		PF_RULES_WLOCK();
5137 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5138 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5139 		    PFR_FLAG_USERIOCTL);
5140 		PF_RULES_WUNLOCK();
5141 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5142 			error = copyout(pfras, io->pfrio_buffer, totlen);
5143 		free(pfras, M_TEMP);
5144 		break;
5145 	}
5146 
5147 	case DIOCRTSTADDRS: {
5148 		struct pfioc_table *io = (struct pfioc_table *)addr;
5149 		struct pfr_addr *pfras;
5150 		size_t totlen;
5151 
5152 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5153 			error = ENODEV;
5154 			goto fail;
5155 		}
5156 		if (io->pfrio_size < 0 ||
5157 		    io->pfrio_size > pf_ioctl_maxcount ||
5158 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5159 			error = EINVAL;
5160 			goto fail;
5161 		}
5162 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5163 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5164 		    M_TEMP, M_WAITOK);
5165 		error = copyin(io->pfrio_buffer, pfras, totlen);
5166 		if (error) {
5167 			free(pfras, M_TEMP);
5168 			goto fail;
5169 		}
5170 		PF_RULES_RLOCK();
5171 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5172 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5173 		    PFR_FLAG_USERIOCTL);
5174 		PF_RULES_RUNLOCK();
5175 		if (error == 0)
5176 			error = copyout(pfras, io->pfrio_buffer, totlen);
5177 		free(pfras, M_TEMP);
5178 		break;
5179 	}
5180 
5181 	case DIOCRINADEFINE: {
5182 		struct pfioc_table *io = (struct pfioc_table *)addr;
5183 		struct pfr_addr *pfras;
5184 		size_t totlen;
5185 
5186 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5187 			error = ENODEV;
5188 			goto fail;
5189 		}
5190 		if (io->pfrio_size < 0 ||
5191 		    io->pfrio_size > pf_ioctl_maxcount ||
5192 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5193 			error = EINVAL;
5194 			goto fail;
5195 		}
5196 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5197 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5198 		    M_TEMP, M_WAITOK);
5199 		error = copyin(io->pfrio_buffer, pfras, totlen);
5200 		if (error) {
5201 			free(pfras, M_TEMP);
5202 			goto fail;
5203 		}
5204 		PF_RULES_WLOCK();
5205 		error = pfr_ina_define(&io->pfrio_table, pfras,
5206 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5207 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5208 		PF_RULES_WUNLOCK();
5209 		free(pfras, M_TEMP);
5210 		break;
5211 	}
5212 
5213 	case DIOCOSFPADD: {
5214 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5215 		PF_RULES_WLOCK();
5216 		error = pf_osfp_add(io);
5217 		PF_RULES_WUNLOCK();
5218 		break;
5219 	}
5220 
5221 	case DIOCOSFPGET: {
5222 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5223 		PF_RULES_RLOCK();
5224 		error = pf_osfp_get(io);
5225 		PF_RULES_RUNLOCK();
5226 		break;
5227 	}
5228 
5229 	case DIOCXBEGIN: {
5230 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5231 		struct pfioc_trans_e	*ioes, *ioe;
5232 		size_t			 totlen;
5233 		int			 i;
5234 
5235 		if (io->esize != sizeof(*ioe)) {
5236 			error = ENODEV;
5237 			goto fail;
5238 		}
5239 		if (io->size < 0 ||
5240 		    io->size > pf_ioctl_maxcount ||
5241 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5242 			error = EINVAL;
5243 			goto fail;
5244 		}
5245 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5246 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5247 		    M_TEMP, M_WAITOK);
5248 		error = copyin(io->array, ioes, totlen);
5249 		if (error) {
5250 			free(ioes, M_TEMP);
5251 			goto fail;
5252 		}
5253 		PF_RULES_WLOCK();
5254 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5255 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5256 			switch (ioe->rs_num) {
5257 			case PF_RULESET_ETH:
5258 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5259 					PF_RULES_WUNLOCK();
5260 					free(ioes, M_TEMP);
5261 					goto fail;
5262 				}
5263 				break;
5264 #ifdef ALTQ
5265 			case PF_RULESET_ALTQ:
5266 				if (ioe->anchor[0]) {
5267 					PF_RULES_WUNLOCK();
5268 					free(ioes, M_TEMP);
5269 					error = EINVAL;
5270 					goto fail;
5271 				}
5272 				if ((error = pf_begin_altq(&ioe->ticket))) {
5273 					PF_RULES_WUNLOCK();
5274 					free(ioes, M_TEMP);
5275 					goto fail;
5276 				}
5277 				break;
5278 #endif /* ALTQ */
5279 			case PF_RULESET_TABLE:
5280 			    {
5281 				struct pfr_table table;
5282 
5283 				bzero(&table, sizeof(table));
5284 				strlcpy(table.pfrt_anchor, ioe->anchor,
5285 				    sizeof(table.pfrt_anchor));
5286 				if ((error = pfr_ina_begin(&table,
5287 				    &ioe->ticket, NULL, 0))) {
5288 					PF_RULES_WUNLOCK();
5289 					free(ioes, M_TEMP);
5290 					goto fail;
5291 				}
5292 				break;
5293 			    }
5294 			default:
5295 				if ((error = pf_begin_rules(&ioe->ticket,
5296 				    ioe->rs_num, ioe->anchor))) {
5297 					PF_RULES_WUNLOCK();
5298 					free(ioes, M_TEMP);
5299 					goto fail;
5300 				}
5301 				break;
5302 			}
5303 		}
5304 		PF_RULES_WUNLOCK();
5305 		error = copyout(ioes, io->array, totlen);
5306 		free(ioes, M_TEMP);
5307 		break;
5308 	}
5309 
5310 	case DIOCXROLLBACK: {
5311 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5312 		struct pfioc_trans_e	*ioe, *ioes;
5313 		size_t			 totlen;
5314 		int			 i;
5315 
5316 		if (io->esize != sizeof(*ioe)) {
5317 			error = ENODEV;
5318 			goto fail;
5319 		}
5320 		if (io->size < 0 ||
5321 		    io->size > pf_ioctl_maxcount ||
5322 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5323 			error = EINVAL;
5324 			goto fail;
5325 		}
5326 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5327 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5328 		    M_TEMP, M_WAITOK);
5329 		error = copyin(io->array, ioes, totlen);
5330 		if (error) {
5331 			free(ioes, M_TEMP);
5332 			goto fail;
5333 		}
5334 		PF_RULES_WLOCK();
5335 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5336 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5337 			switch (ioe->rs_num) {
5338 			case PF_RULESET_ETH:
5339 				if ((error = pf_rollback_eth(ioe->ticket,
5340 				    ioe->anchor))) {
5341 					PF_RULES_WUNLOCK();
5342 					free(ioes, M_TEMP);
5343 					goto fail; /* really bad */
5344 				}
5345 				break;
5346 #ifdef ALTQ
5347 			case PF_RULESET_ALTQ:
5348 				if (ioe->anchor[0]) {
5349 					PF_RULES_WUNLOCK();
5350 					free(ioes, M_TEMP);
5351 					error = EINVAL;
5352 					goto fail;
5353 				}
5354 				if ((error = pf_rollback_altq(ioe->ticket))) {
5355 					PF_RULES_WUNLOCK();
5356 					free(ioes, M_TEMP);
5357 					goto fail; /* really bad */
5358 				}
5359 				break;
5360 #endif /* ALTQ */
5361 			case PF_RULESET_TABLE:
5362 			    {
5363 				struct pfr_table table;
5364 
5365 				bzero(&table, sizeof(table));
5366 				strlcpy(table.pfrt_anchor, ioe->anchor,
5367 				    sizeof(table.pfrt_anchor));
5368 				if ((error = pfr_ina_rollback(&table,
5369 				    ioe->ticket, NULL, 0))) {
5370 					PF_RULES_WUNLOCK();
5371 					free(ioes, M_TEMP);
5372 					goto fail; /* really bad */
5373 				}
5374 				break;
5375 			    }
5376 			default:
5377 				if ((error = pf_rollback_rules(ioe->ticket,
5378 				    ioe->rs_num, ioe->anchor))) {
5379 					PF_RULES_WUNLOCK();
5380 					free(ioes, M_TEMP);
5381 					goto fail; /* really bad */
5382 				}
5383 				break;
5384 			}
5385 		}
5386 		PF_RULES_WUNLOCK();
5387 		free(ioes, M_TEMP);
5388 		break;
5389 	}
5390 
5391 	case DIOCXCOMMIT: {
5392 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5393 		struct pfioc_trans_e	*ioe, *ioes;
5394 		struct pf_kruleset	*rs;
5395 		struct pf_keth_ruleset	*ers;
5396 		size_t			 totlen;
5397 		int			 i;
5398 
5399 		if (io->esize != sizeof(*ioe)) {
5400 			error = ENODEV;
5401 			goto fail;
5402 		}
5403 
5404 		if (io->size < 0 ||
5405 		    io->size > pf_ioctl_maxcount ||
5406 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5407 			error = EINVAL;
5408 			goto fail;
5409 		}
5410 
5411 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5412 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5413 		    M_TEMP, M_WAITOK);
5414 		error = copyin(io->array, ioes, totlen);
5415 		if (error) {
5416 			free(ioes, M_TEMP);
5417 			goto fail;
5418 		}
5419 		PF_RULES_WLOCK();
5420 		/* First makes sure everything will succeed. */
5421 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5422 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5423 			switch (ioe->rs_num) {
5424 			case PF_RULESET_ETH:
5425 				ers = pf_find_keth_ruleset(ioe->anchor);
5426 				if (ers == NULL || ioe->ticket == 0 ||
5427 				    ioe->ticket != ers->inactive.ticket) {
5428 					PF_RULES_WUNLOCK();
5429 					free(ioes, M_TEMP);
5430 					error = EINVAL;
5431 					goto fail;
5432 				}
5433 				break;
5434 #ifdef ALTQ
5435 			case PF_RULESET_ALTQ:
5436 				if (ioe->anchor[0]) {
5437 					PF_RULES_WUNLOCK();
5438 					free(ioes, M_TEMP);
5439 					error = EINVAL;
5440 					goto fail;
5441 				}
5442 				if (!V_altqs_inactive_open || ioe->ticket !=
5443 				    V_ticket_altqs_inactive) {
5444 					PF_RULES_WUNLOCK();
5445 					free(ioes, M_TEMP);
5446 					error = EBUSY;
5447 					goto fail;
5448 				}
5449 				break;
5450 #endif /* ALTQ */
5451 			case PF_RULESET_TABLE:
5452 				rs = pf_find_kruleset(ioe->anchor);
5453 				if (rs == NULL || !rs->topen || ioe->ticket !=
5454 				    rs->tticket) {
5455 					PF_RULES_WUNLOCK();
5456 					free(ioes, M_TEMP);
5457 					error = EBUSY;
5458 					goto fail;
5459 				}
5460 				break;
5461 			default:
5462 				if (ioe->rs_num < 0 || ioe->rs_num >=
5463 				    PF_RULESET_MAX) {
5464 					PF_RULES_WUNLOCK();
5465 					free(ioes, M_TEMP);
5466 					error = EINVAL;
5467 					goto fail;
5468 				}
5469 				rs = pf_find_kruleset(ioe->anchor);
5470 				if (rs == NULL ||
5471 				    !rs->rules[ioe->rs_num].inactive.open ||
5472 				    rs->rules[ioe->rs_num].inactive.ticket !=
5473 				    ioe->ticket) {
5474 					PF_RULES_WUNLOCK();
5475 					free(ioes, M_TEMP);
5476 					error = EBUSY;
5477 					goto fail;
5478 				}
5479 				break;
5480 			}
5481 		}
5482 		/* Now do the commit - no errors should happen here. */
5483 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5484 			switch (ioe->rs_num) {
5485 			case PF_RULESET_ETH:
5486 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5487 					PF_RULES_WUNLOCK();
5488 					free(ioes, M_TEMP);
5489 					goto fail; /* really bad */
5490 				}
5491 				break;
5492 #ifdef ALTQ
5493 			case PF_RULESET_ALTQ:
5494 				if ((error = pf_commit_altq(ioe->ticket))) {
5495 					PF_RULES_WUNLOCK();
5496 					free(ioes, M_TEMP);
5497 					goto fail; /* really bad */
5498 				}
5499 				break;
5500 #endif /* ALTQ */
5501 			case PF_RULESET_TABLE:
5502 			    {
5503 				struct pfr_table table;
5504 
5505 				bzero(&table, sizeof(table));
5506 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5507 				    sizeof(table.pfrt_anchor));
5508 				if ((error = pfr_ina_commit(&table,
5509 				    ioe->ticket, NULL, NULL, 0))) {
5510 					PF_RULES_WUNLOCK();
5511 					free(ioes, M_TEMP);
5512 					goto fail; /* really bad */
5513 				}
5514 				break;
5515 			    }
5516 			default:
5517 				if ((error = pf_commit_rules(ioe->ticket,
5518 				    ioe->rs_num, ioe->anchor))) {
5519 					PF_RULES_WUNLOCK();
5520 					free(ioes, M_TEMP);
5521 					goto fail; /* really bad */
5522 				}
5523 				break;
5524 			}
5525 		}
5526 		PF_RULES_WUNLOCK();
5527 
5528 		/* Only hook into EtherNet taffic if we've got rules for it. */
5529 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5530 			hook_pf_eth();
5531 		else
5532 			dehook_pf_eth();
5533 
5534 		free(ioes, M_TEMP);
5535 		break;
5536 	}
5537 
5538 	case DIOCGETSRCNODES: {
5539 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5540 		struct pf_srchash	*sh;
5541 		struct pf_ksrc_node	*n;
5542 		struct pf_src_node	*p, *pstore;
5543 		uint32_t		 i, nr = 0;
5544 
5545 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5546 				i++, sh++) {
5547 			PF_HASHROW_LOCK(sh);
5548 			LIST_FOREACH(n, &sh->nodes, entry)
5549 				nr++;
5550 			PF_HASHROW_UNLOCK(sh);
5551 		}
5552 
5553 		psn->psn_len = min(psn->psn_len,
5554 		    sizeof(struct pf_src_node) * nr);
5555 
5556 		if (psn->psn_len == 0) {
5557 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5558 			goto fail;
5559 		}
5560 
5561 		nr = 0;
5562 
5563 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5564 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5565 		    i++, sh++) {
5566 		    PF_HASHROW_LOCK(sh);
5567 		    LIST_FOREACH(n, &sh->nodes, entry) {
5568 
5569 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5570 				break;
5571 
5572 			pf_src_node_copy(n, p);
5573 
5574 			p++;
5575 			nr++;
5576 		    }
5577 		    PF_HASHROW_UNLOCK(sh);
5578 		}
5579 		error = copyout(pstore, psn->psn_src_nodes,
5580 		    sizeof(struct pf_src_node) * nr);
5581 		if (error) {
5582 			free(pstore, M_TEMP);
5583 			goto fail;
5584 		}
5585 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5586 		free(pstore, M_TEMP);
5587 		break;
5588 	}
5589 
5590 	case DIOCCLRSRCNODES: {
5591 		pf_kill_srcnodes(NULL);
5592 		break;
5593 	}
5594 
5595 	case DIOCKILLSRCNODES:
5596 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5597 		break;
5598 
5599 #ifdef COMPAT_FREEBSD13
5600 	case DIOCKEEPCOUNTERS_FREEBSD13:
5601 #endif
5602 	case DIOCKEEPCOUNTERS:
5603 		error = pf_keepcounters((struct pfioc_nv *)addr);
5604 		break;
5605 
5606 	case DIOCGETSYNCOOKIES:
5607 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5608 		break;
5609 
5610 	case DIOCSETSYNCOOKIES:
5611 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5612 		break;
5613 
5614 	case DIOCSETHOSTID: {
5615 		u_int32_t	*hostid = (u_int32_t *)addr;
5616 
5617 		PF_RULES_WLOCK();
5618 		if (*hostid == 0)
5619 			V_pf_status.hostid = arc4random();
5620 		else
5621 			V_pf_status.hostid = *hostid;
5622 		PF_RULES_WUNLOCK();
5623 		break;
5624 	}
5625 
5626 	case DIOCOSFPFLUSH:
5627 		PF_RULES_WLOCK();
5628 		pf_osfp_flush();
5629 		PF_RULES_WUNLOCK();
5630 		break;
5631 
5632 	case DIOCIGETIFACES: {
5633 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5634 		struct pfi_kif *ifstore;
5635 		size_t bufsiz;
5636 
5637 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5638 			error = ENODEV;
5639 			goto fail;
5640 		}
5641 
5642 		if (io->pfiio_size < 0 ||
5643 		    io->pfiio_size > pf_ioctl_maxcount ||
5644 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5645 			error = EINVAL;
5646 			goto fail;
5647 		}
5648 
5649 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5650 
5651 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5652 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5653 		    M_TEMP, M_WAITOK | M_ZERO);
5654 
5655 		PF_RULES_RLOCK();
5656 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5657 		PF_RULES_RUNLOCK();
5658 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5659 		free(ifstore, M_TEMP);
5660 		break;
5661 	}
5662 
5663 	case DIOCSETIFFLAG: {
5664 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5665 
5666 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5667 
5668 		PF_RULES_WLOCK();
5669 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5670 		PF_RULES_WUNLOCK();
5671 		break;
5672 	}
5673 
5674 	case DIOCCLRIFFLAG: {
5675 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5676 
5677 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5678 
5679 		PF_RULES_WLOCK();
5680 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5681 		PF_RULES_WUNLOCK();
5682 		break;
5683 	}
5684 
5685 	case DIOCSETREASS: {
5686 		u_int32_t	*reass = (u_int32_t *)addr;
5687 
5688 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5689 		/* Removal of DF flag without reassembly enabled is not a
5690 		 * valid combination. Disable reassembly in such case. */
5691 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5692 			V_pf_status.reass = 0;
5693 		break;
5694 	}
5695 
5696 	default:
5697 		error = ENODEV;
5698 		break;
5699 	}
5700 fail:
5701 	CURVNET_RESTORE();
5702 
5703 #undef ERROUT_IOCTL
5704 
5705 	return (error);
5706 }
5707 
5708 void
5709 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5710 {
5711 	bzero(sp, sizeof(union pfsync_state_union));
5712 
5713 	/* copy from state key */
5714 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5715 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5716 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5717 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5718 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5719 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5720 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5721 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5722 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5723 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5724 
5725 	/* copy from state */
5726 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5727 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5728 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5729 	sp->pfs_1301.expire = pf_state_expires(st);
5730 	if (sp->pfs_1301.expire <= time_uptime)
5731 		sp->pfs_1301.expire = htonl(0);
5732 	else
5733 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5734 
5735 	sp->pfs_1301.direction = st->direction;
5736 	sp->pfs_1301.log = st->act.log;
5737 	sp->pfs_1301.timeout = st->timeout;
5738 
5739 	switch (msg_version) {
5740 		case PFSYNC_MSG_VERSION_1301:
5741 			sp->pfs_1301.state_flags = st->state_flags;
5742 			break;
5743 		case PFSYNC_MSG_VERSION_1400:
5744 			sp->pfs_1400.state_flags = htons(st->state_flags);
5745 			sp->pfs_1400.qid = htons(st->act.qid);
5746 			sp->pfs_1400.pqid = htons(st->act.pqid);
5747 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5748 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5749 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5750 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5751 			sp->pfs_1400.set_tos = st->act.set_tos;
5752 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5753 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5754 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5755 			sp->pfs_1400.rt = st->act.rt;
5756 			if (st->act.rt_kif)
5757 				strlcpy(sp->pfs_1400.rt_ifname,
5758 				    st->act.rt_kif->pfik_name,
5759 				    sizeof(sp->pfs_1400.rt_ifname));
5760 			break;
5761 		default:
5762 			panic("%s: Unsupported pfsync_msg_version %d",
5763 			    __func__, msg_version);
5764 	}
5765 
5766 	/*
5767 	 * XXX Why do we bother pfsyncing source node information if source
5768 	 * nodes are not synced? Showing users that there is source tracking
5769 	 * when there is none seems useless.
5770 	 */
5771 	if (st->sns[PF_SN_LIMIT] != NULL)
5772 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5773 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5774 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5775 
5776 	sp->pfs_1301.id = st->id;
5777 	sp->pfs_1301.creatorid = st->creatorid;
5778 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5779 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5780 
5781 	if (st->rule == NULL)
5782 		sp->pfs_1301.rule = htonl(-1);
5783 	else
5784 		sp->pfs_1301.rule = htonl(st->rule->nr);
5785 	if (st->anchor == NULL)
5786 		sp->pfs_1301.anchor = htonl(-1);
5787 	else
5788 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5789 	if (st->nat_rule == NULL)
5790 		sp->pfs_1301.nat_rule = htonl(-1);
5791 	else
5792 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5793 
5794 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5795 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5796 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5797 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5798 }
5799 
5800 void
5801 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5802 {
5803 	bzero(sp, sizeof(*sp));
5804 
5805 	sp->version = PF_STATE_VERSION;
5806 
5807 	/* copy from state key */
5808 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5809 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5810 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5811 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5812 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5813 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5814 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5815 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5816 	sp->proto = st->key[PF_SK_WIRE]->proto;
5817 	sp->af = st->key[PF_SK_WIRE]->af;
5818 
5819 	/* copy from state */
5820 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5821 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5822 	    sizeof(sp->orig_ifname));
5823 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5824 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5825 	sp->expire = pf_state_expires(st);
5826 	if (sp->expire <= time_uptime)
5827 		sp->expire = htonl(0);
5828 	else
5829 		sp->expire = htonl(sp->expire - time_uptime);
5830 
5831 	sp->direction = st->direction;
5832 	sp->log = st->act.log;
5833 	sp->timeout = st->timeout;
5834 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5835 	sp->state_flags_compat = st->state_flags;
5836 	sp->state_flags = htons(st->state_flags);
5837 	if (st->sns[PF_SN_LIMIT] != NULL)
5838 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5839 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5840 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5841 	sp->id = st->id;
5842 	sp->creatorid = st->creatorid;
5843 	pf_state_peer_hton(&st->src, &sp->src);
5844 	pf_state_peer_hton(&st->dst, &sp->dst);
5845 
5846 	if (st->rule == NULL)
5847 		sp->rule = htonl(-1);
5848 	else
5849 		sp->rule = htonl(st->rule->nr);
5850 	if (st->anchor == NULL)
5851 		sp->anchor = htonl(-1);
5852 	else
5853 		sp->anchor = htonl(st->anchor->nr);
5854 	if (st->nat_rule == NULL)
5855 		sp->nat_rule = htonl(-1);
5856 	else
5857 		sp->nat_rule = htonl(st->nat_rule->nr);
5858 
5859 	sp->packets[0] = st->packets[0];
5860 	sp->packets[1] = st->packets[1];
5861 	sp->bytes[0] = st->bytes[0];
5862 	sp->bytes[1] = st->bytes[1];
5863 
5864 	sp->qid = htons(st->act.qid);
5865 	sp->pqid = htons(st->act.pqid);
5866 	sp->dnpipe = htons(st->act.dnpipe);
5867 	sp->dnrpipe = htons(st->act.dnrpipe);
5868 	sp->rtableid = htonl(st->act.rtableid);
5869 	sp->min_ttl = st->act.min_ttl;
5870 	sp->set_tos = st->act.set_tos;
5871 	sp->max_mss = htons(st->act.max_mss);
5872 	sp->rt = st->act.rt;
5873 	if (st->act.rt_kif)
5874 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5875 		    sizeof(sp->rt_ifname));
5876 	sp->set_prio[0] = st->act.set_prio[0];
5877 	sp->set_prio[1] = st->act.set_prio[1];
5878 
5879 }
5880 
5881 static void
5882 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5883 {
5884 	struct pfr_ktable *kt;
5885 
5886 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5887 
5888 	kt = aw->p.tbl;
5889 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5890 		kt = kt->pfrkt_root;
5891 	aw->p.tbl = NULL;
5892 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5893 		kt->pfrkt_cnt : -1;
5894 }
5895 
5896 static int
5897 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5898     size_t number, char **names)
5899 {
5900 	nvlist_t        *nvc;
5901 
5902 	nvc = nvlist_create(0);
5903 	if (nvc == NULL)
5904 		return (ENOMEM);
5905 
5906 	for (int i = 0; i < number; i++) {
5907 		nvlist_append_number_array(nvc, "counters",
5908 		    counter_u64_fetch(counters[i]));
5909 		nvlist_append_string_array(nvc, "names",
5910 		    names[i]);
5911 		nvlist_append_number_array(nvc, "ids",
5912 		    i);
5913 	}
5914 	nvlist_add_nvlist(nvl, name, nvc);
5915 	nvlist_destroy(nvc);
5916 
5917 	return (0);
5918 }
5919 
5920 static int
5921 pf_getstatus(struct pfioc_nv *nv)
5922 {
5923 	nvlist_t        *nvl = NULL, *nvc = NULL;
5924 	void            *nvlpacked = NULL;
5925 	int              error;
5926 	struct pf_status s;
5927 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5928 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5929 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5930 	time_t since;
5931 
5932 	PF_RULES_RLOCK_TRACKER;
5933 
5934 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5935 
5936 	PF_RULES_RLOCK();
5937 
5938 	nvl = nvlist_create(0);
5939 	if (nvl == NULL)
5940 		ERROUT(ENOMEM);
5941 
5942 	since = time_second - (time_uptime - V_pf_status.since);
5943 
5944 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5945 	nvlist_add_number(nvl, "since", since);
5946 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5947 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5948 	nvlist_add_number(nvl, "states", V_pf_status.states);
5949 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5950 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5951 	nvlist_add_bool(nvl, "syncookies_active",
5952 	    V_pf_status.syncookies_active);
5953 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5954 
5955 	/* counters */
5956 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5957 	    PFRES_MAX, pf_reasons);
5958 	if (error != 0)
5959 		ERROUT(error);
5960 
5961 	/* lcounters */
5962 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5963 	    KLCNT_MAX, pf_lcounter);
5964 	if (error != 0)
5965 		ERROUT(error);
5966 
5967 	/* fcounters */
5968 	nvc = nvlist_create(0);
5969 	if (nvc == NULL)
5970 		ERROUT(ENOMEM);
5971 
5972 	for (int i = 0; i < FCNT_MAX; i++) {
5973 		nvlist_append_number_array(nvc, "counters",
5974 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5975 		nvlist_append_string_array(nvc, "names",
5976 		    pf_fcounter[i]);
5977 		nvlist_append_number_array(nvc, "ids",
5978 		    i);
5979 	}
5980 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5981 	nvlist_destroy(nvc);
5982 	nvc = NULL;
5983 
5984 	/* scounters */
5985 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5986 	    SCNT_MAX, pf_fcounter);
5987 	if (error != 0)
5988 		ERROUT(error);
5989 
5990 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5991 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5992 	    PF_MD5_DIGEST_LENGTH);
5993 
5994 	pfi_update_status(V_pf_status.ifname, &s);
5995 
5996 	/* pcounters / bcounters */
5997 	for (int i = 0; i < 2; i++) {
5998 		for (int j = 0; j < 2; j++) {
5999 			for (int k = 0; k < 2; k++) {
6000 				nvlist_append_number_array(nvl, "pcounters",
6001 				    s.pcounters[i][j][k]);
6002 			}
6003 			nvlist_append_number_array(nvl, "bcounters",
6004 			    s.bcounters[i][j]);
6005 		}
6006 	}
6007 
6008 	nvlpacked = nvlist_pack(nvl, &nv->len);
6009 	if (nvlpacked == NULL)
6010 		ERROUT(ENOMEM);
6011 
6012 	if (nv->size == 0)
6013 		ERROUT(0);
6014 	else if (nv->size < nv->len)
6015 		ERROUT(ENOSPC);
6016 
6017 	PF_RULES_RUNLOCK();
6018 	error = copyout(nvlpacked, nv->data, nv->len);
6019 	goto done;
6020 
6021 #undef ERROUT
6022 errout:
6023 	PF_RULES_RUNLOCK();
6024 done:
6025 	free(nvlpacked, M_NVLIST);
6026 	nvlist_destroy(nvc);
6027 	nvlist_destroy(nvl);
6028 
6029 	return (error);
6030 }
6031 
6032 /*
6033  * XXX - Check for version mismatch!!!
6034  */
6035 static void
6036 pf_clear_all_states(void)
6037 {
6038 	struct epoch_tracker	 et;
6039 	struct pf_kstate	*s;
6040 	u_int i;
6041 
6042 	NET_EPOCH_ENTER(et);
6043 	for (i = 0; i <= V_pf_hashmask; i++) {
6044 		struct pf_idhash *ih = &V_pf_idhash[i];
6045 relock:
6046 		PF_HASHROW_LOCK(ih);
6047 		LIST_FOREACH(s, &ih->states, entry) {
6048 			s->timeout = PFTM_PURGE;
6049 			/* Don't send out individual delete messages. */
6050 			s->state_flags |= PFSTATE_NOSYNC;
6051 			pf_remove_state(s);
6052 			goto relock;
6053 		}
6054 		PF_HASHROW_UNLOCK(ih);
6055 	}
6056 	NET_EPOCH_EXIT(et);
6057 }
6058 
6059 static int
6060 pf_clear_tables(void)
6061 {
6062 	struct pfioc_table io;
6063 	int error;
6064 
6065 	bzero(&io, sizeof(io));
6066 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6067 
6068 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6069 	    io.pfrio_flags);
6070 
6071 	return (error);
6072 }
6073 
6074 static void
6075 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6076 {
6077 	struct pf_ksrc_node_list	 kill;
6078 	u_int 				 killed;
6079 
6080 	LIST_INIT(&kill);
6081 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6082 		struct pf_srchash *sh = &V_pf_srchash[i];
6083 		struct pf_ksrc_node *sn, *tmp;
6084 
6085 		PF_HASHROW_LOCK(sh);
6086 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6087 			if (psnk == NULL ||
6088 			    (pf_match_addr(psnk->psnk_src.neg,
6089 			      &psnk->psnk_src.addr.v.a.addr,
6090 			      &psnk->psnk_src.addr.v.a.mask,
6091 			      &sn->addr, sn->af) &&
6092 			    pf_match_addr(psnk->psnk_dst.neg,
6093 			      &psnk->psnk_dst.addr.v.a.addr,
6094 			      &psnk->psnk_dst.addr.v.a.mask,
6095 			      &sn->raddr, sn->af))) {
6096 				pf_unlink_src_node(sn);
6097 				LIST_INSERT_HEAD(&kill, sn, entry);
6098 				sn->expire = 1;
6099 			}
6100 		PF_HASHROW_UNLOCK(sh);
6101 	}
6102 
6103 	for (int i = 0; i <= V_pf_hashmask; i++) {
6104 		struct pf_idhash *ih = &V_pf_idhash[i];
6105 		struct pf_kstate *s;
6106 
6107 		PF_HASHROW_LOCK(ih);
6108 		LIST_FOREACH(s, &ih->states, entry) {
6109 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6110 			    sn_type++) {
6111 				if (s->sns[sn_type] &&
6112 				    s->sns[sn_type]->expire == 1) {
6113 					s->sns[sn_type] = NULL;
6114 				}
6115 			}
6116 		}
6117 		PF_HASHROW_UNLOCK(ih);
6118 	}
6119 
6120 	killed = pf_free_src_nodes(&kill);
6121 
6122 	if (psnk != NULL)
6123 		psnk->psnk_killed = killed;
6124 }
6125 
6126 static int
6127 pf_keepcounters(struct pfioc_nv *nv)
6128 {
6129 	nvlist_t	*nvl = NULL;
6130 	void		*nvlpacked = NULL;
6131 	int		 error = 0;
6132 
6133 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6134 
6135 	if (nv->len > pf_ioctl_maxcount)
6136 		ERROUT(ENOMEM);
6137 
6138 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6139 	error = copyin(nv->data, nvlpacked, nv->len);
6140 	if (error)
6141 		ERROUT(error);
6142 
6143 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6144 	if (nvl == NULL)
6145 		ERROUT(EBADMSG);
6146 
6147 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6148 		ERROUT(EBADMSG);
6149 
6150 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6151 
6152 on_error:
6153 	nvlist_destroy(nvl);
6154 	free(nvlpacked, M_NVLIST);
6155 	return (error);
6156 }
6157 
6158 unsigned int
6159 pf_clear_states(const struct pf_kstate_kill *kill)
6160 {
6161 	struct pf_state_key_cmp	 match_key;
6162 	struct pf_kstate	*s;
6163 	struct pfi_kkif	*kif;
6164 	int		 idx;
6165 	unsigned int	 killed = 0, dir;
6166 
6167 	NET_EPOCH_ASSERT();
6168 
6169 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6170 		struct pf_idhash *ih = &V_pf_idhash[i];
6171 
6172 relock_DIOCCLRSTATES:
6173 		PF_HASHROW_LOCK(ih);
6174 		LIST_FOREACH(s, &ih->states, entry) {
6175 			/* For floating states look at the original kif. */
6176 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6177 
6178 			if (kill->psk_ifname[0] &&
6179 			    strcmp(kill->psk_ifname,
6180 			    kif->pfik_name))
6181 				continue;
6182 
6183 			if (kill->psk_kill_match) {
6184 				bzero(&match_key, sizeof(match_key));
6185 
6186 				if (s->direction == PF_OUT) {
6187 					dir = PF_IN;
6188 					idx = PF_SK_STACK;
6189 				} else {
6190 					dir = PF_OUT;
6191 					idx = PF_SK_WIRE;
6192 				}
6193 
6194 				match_key.af = s->key[idx]->af;
6195 				match_key.proto = s->key[idx]->proto;
6196 				pf_addrcpy(&match_key.addr[0],
6197 				    &s->key[idx]->addr[1], match_key.af);
6198 				match_key.port[0] = s->key[idx]->port[1];
6199 				pf_addrcpy(&match_key.addr[1],
6200 				    &s->key[idx]->addr[0], match_key.af);
6201 				match_key.port[1] = s->key[idx]->port[0];
6202 			}
6203 
6204 			/*
6205 			 * Don't send out individual
6206 			 * delete messages.
6207 			 */
6208 			s->state_flags |= PFSTATE_NOSYNC;
6209 			pf_remove_state(s);
6210 			killed++;
6211 
6212 			if (kill->psk_kill_match)
6213 				killed += pf_kill_matching_state(&match_key,
6214 				    dir);
6215 
6216 			goto relock_DIOCCLRSTATES;
6217 		}
6218 		PF_HASHROW_UNLOCK(ih);
6219 	}
6220 
6221 	if (V_pfsync_clear_states_ptr != NULL)
6222 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6223 
6224 	return (killed);
6225 }
6226 
6227 void
6228 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6229 {
6230 	struct pf_kstate	*s;
6231 
6232 	NET_EPOCH_ASSERT();
6233 	if (kill->psk_pfcmp.id) {
6234 		if (kill->psk_pfcmp.creatorid == 0)
6235 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6236 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6237 		    kill->psk_pfcmp.creatorid))) {
6238 			pf_remove_state(s);
6239 			*killed = 1;
6240 		}
6241 		return;
6242 	}
6243 
6244 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6245 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6246 }
6247 
6248 static int
6249 pf_killstates_nv(struct pfioc_nv *nv)
6250 {
6251 	struct pf_kstate_kill	 kill;
6252 	struct epoch_tracker	 et;
6253 	nvlist_t		*nvl = NULL;
6254 	void			*nvlpacked = NULL;
6255 	int			 error = 0;
6256 	unsigned int		 killed = 0;
6257 
6258 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6259 
6260 	if (nv->len > pf_ioctl_maxcount)
6261 		ERROUT(ENOMEM);
6262 
6263 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6264 	error = copyin(nv->data, nvlpacked, nv->len);
6265 	if (error)
6266 		ERROUT(error);
6267 
6268 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6269 	if (nvl == NULL)
6270 		ERROUT(EBADMSG);
6271 
6272 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6273 	if (error)
6274 		ERROUT(error);
6275 
6276 	NET_EPOCH_ENTER(et);
6277 	pf_killstates(&kill, &killed);
6278 	NET_EPOCH_EXIT(et);
6279 
6280 	free(nvlpacked, M_NVLIST);
6281 	nvlpacked = NULL;
6282 	nvlist_destroy(nvl);
6283 	nvl = nvlist_create(0);
6284 	if (nvl == NULL)
6285 		ERROUT(ENOMEM);
6286 
6287 	nvlist_add_number(nvl, "killed", killed);
6288 
6289 	nvlpacked = nvlist_pack(nvl, &nv->len);
6290 	if (nvlpacked == NULL)
6291 		ERROUT(ENOMEM);
6292 
6293 	if (nv->size == 0)
6294 		ERROUT(0);
6295 	else if (nv->size < nv->len)
6296 		ERROUT(ENOSPC);
6297 
6298 	error = copyout(nvlpacked, nv->data, nv->len);
6299 
6300 on_error:
6301 	nvlist_destroy(nvl);
6302 	free(nvlpacked, M_NVLIST);
6303 	return (error);
6304 }
6305 
6306 static int
6307 pf_clearstates_nv(struct pfioc_nv *nv)
6308 {
6309 	struct pf_kstate_kill	 kill;
6310 	struct epoch_tracker	 et;
6311 	nvlist_t		*nvl = NULL;
6312 	void			*nvlpacked = NULL;
6313 	int			 error = 0;
6314 	unsigned int		 killed;
6315 
6316 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6317 
6318 	if (nv->len > pf_ioctl_maxcount)
6319 		ERROUT(ENOMEM);
6320 
6321 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6322 	error = copyin(nv->data, nvlpacked, nv->len);
6323 	if (error)
6324 		ERROUT(error);
6325 
6326 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6327 	if (nvl == NULL)
6328 		ERROUT(EBADMSG);
6329 
6330 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6331 	if (error)
6332 		ERROUT(error);
6333 
6334 	NET_EPOCH_ENTER(et);
6335 	killed = pf_clear_states(&kill);
6336 	NET_EPOCH_EXIT(et);
6337 
6338 	free(nvlpacked, M_NVLIST);
6339 	nvlpacked = NULL;
6340 	nvlist_destroy(nvl);
6341 	nvl = nvlist_create(0);
6342 	if (nvl == NULL)
6343 		ERROUT(ENOMEM);
6344 
6345 	nvlist_add_number(nvl, "killed", killed);
6346 
6347 	nvlpacked = nvlist_pack(nvl, &nv->len);
6348 	if (nvlpacked == NULL)
6349 		ERROUT(ENOMEM);
6350 
6351 	if (nv->size == 0)
6352 		ERROUT(0);
6353 	else if (nv->size < nv->len)
6354 		ERROUT(ENOSPC);
6355 
6356 	error = copyout(nvlpacked, nv->data, nv->len);
6357 
6358 #undef ERROUT
6359 on_error:
6360 	nvlist_destroy(nvl);
6361 	free(nvlpacked, M_NVLIST);
6362 	return (error);
6363 }
6364 
6365 static int
6366 pf_getstate(struct pfioc_nv *nv)
6367 {
6368 	nvlist_t		*nvl = NULL, *nvls;
6369 	void			*nvlpacked = NULL;
6370 	struct pf_kstate	*s = NULL;
6371 	int			 error = 0;
6372 	uint64_t		 id, creatorid;
6373 
6374 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6375 
6376 	if (nv->len > pf_ioctl_maxcount)
6377 		ERROUT(ENOMEM);
6378 
6379 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6380 	error = copyin(nv->data, nvlpacked, nv->len);
6381 	if (error)
6382 		ERROUT(error);
6383 
6384 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6385 	if (nvl == NULL)
6386 		ERROUT(EBADMSG);
6387 
6388 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6389 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6390 
6391 	s = pf_find_state_byid(id, creatorid);
6392 	if (s == NULL)
6393 		ERROUT(ENOENT);
6394 
6395 	free(nvlpacked, M_NVLIST);
6396 	nvlpacked = NULL;
6397 	nvlist_destroy(nvl);
6398 	nvl = nvlist_create(0);
6399 	if (nvl == NULL)
6400 		ERROUT(ENOMEM);
6401 
6402 	nvls = pf_state_to_nvstate(s);
6403 	if (nvls == NULL)
6404 		ERROUT(ENOMEM);
6405 
6406 	nvlist_add_nvlist(nvl, "state", nvls);
6407 	nvlist_destroy(nvls);
6408 
6409 	nvlpacked = nvlist_pack(nvl, &nv->len);
6410 	if (nvlpacked == NULL)
6411 		ERROUT(ENOMEM);
6412 
6413 	if (nv->size == 0)
6414 		ERROUT(0);
6415 	else if (nv->size < nv->len)
6416 		ERROUT(ENOSPC);
6417 
6418 	error = copyout(nvlpacked, nv->data, nv->len);
6419 
6420 #undef ERROUT
6421 errout:
6422 	if (s != NULL)
6423 		PF_STATE_UNLOCK(s);
6424 	free(nvlpacked, M_NVLIST);
6425 	nvlist_destroy(nvl);
6426 	return (error);
6427 }
6428 
6429 /*
6430  * XXX - Check for version mismatch!!!
6431  */
6432 
6433 /*
6434  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6435  */
6436 static int
6437 shutdown_pf(void)
6438 {
6439 	int error = 0;
6440 	u_int32_t t[5];
6441 	char nn = '\0';
6442 	struct pf_kanchor *anchor;
6443 	struct pf_keth_anchor *eth_anchor;
6444 	int rs_num;
6445 
6446 	do {
6447 		/* Unlink rules of all user defined anchors */
6448 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6449 			/* Wildcard based anchors may not have a respective
6450 			 * explicit anchor rule or they may be left empty
6451 			 * without rules. It leads to anchor.refcnt=0, and the
6452 			 * rest of the logic does not expect it. */
6453 			if (anchor->refcnt == 0)
6454 				anchor->refcnt = 1;
6455 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6456 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6457 				    anchor->path)) != 0) {
6458 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
6459 					    "anchor.path=%s rs_num=%d",
6460 					    __func__, anchor->path, rs_num);
6461 					goto error;	/* XXX: rollback? */
6462 				}
6463 			}
6464 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6465 				error = pf_commit_rules(t[rs_num], rs_num,
6466 				    anchor->path);
6467 				MPASS(error == 0);
6468 			}
6469 		}
6470 
6471 		/* Unlink rules of all user defined ether anchors */
6472 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6473 		    &V_pf_keth_anchors) {
6474 			/* Wildcard based anchors may not have a respective
6475 			 * explicit anchor rule or they may be left empty
6476 			 * without rules. It leads to anchor.refcnt=0, and the
6477 			 * rest of the logic does not expect it. */
6478 			if (eth_anchor->refcnt == 0)
6479 				eth_anchor->refcnt = 1;
6480 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6481 			    != 0) {
6482 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
6483 				    "anchor.path=%s", __func__,
6484 				    eth_anchor->path);
6485 				goto error;
6486 			}
6487 			error = pf_commit_eth(t[0], eth_anchor->path);
6488 			MPASS(error == 0);
6489 		}
6490 
6491 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6492 		    != 0) {
6493 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
6494 			break;
6495 		}
6496 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6497 		    != 0) {
6498 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
6499 			break;		/* XXX: rollback? */
6500 		}
6501 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6502 		    != 0) {
6503 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
6504 			break;		/* XXX: rollback? */
6505 		}
6506 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6507 		    != 0) {
6508 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
6509 			break;		/* XXX: rollback? */
6510 		}
6511 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6512 		    != 0) {
6513 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
6514 			break;		/* XXX: rollback? */
6515 		}
6516 
6517 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6518 		MPASS(error == 0);
6519 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6520 		MPASS(error == 0);
6521 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6522 		MPASS(error == 0);
6523 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6524 		MPASS(error == 0);
6525 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6526 		MPASS(error == 0);
6527 
6528 		if ((error = pf_clear_tables()) != 0)
6529 			break;
6530 
6531 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6532 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
6533 			break;
6534 		}
6535 		error = pf_commit_eth(t[0], &nn);
6536 		MPASS(error == 0);
6537 
6538 #ifdef ALTQ
6539 		if ((error = pf_begin_altq(&t[0])) != 0) {
6540 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
6541 			break;
6542 		}
6543 		pf_commit_altq(t[0]);
6544 #endif
6545 
6546 		pf_clear_all_states();
6547 
6548 		pf_kill_srcnodes(NULL);
6549 
6550 		/* status does not use malloced mem so no need to cleanup */
6551 		/* fingerprints and interfaces have their own cleanup code */
6552 	} while(0);
6553 
6554 error:
6555 	return (error);
6556 }
6557 
6558 static pfil_return_t
6559 pf_check_return(int chk, struct mbuf **m)
6560 {
6561 
6562 	switch (chk) {
6563 	case PF_PASS:
6564 		if (*m == NULL)
6565 			return (PFIL_CONSUMED);
6566 		else
6567 			return (PFIL_PASS);
6568 		break;
6569 	default:
6570 		if (*m != NULL) {
6571 			m_freem(*m);
6572 			*m = NULL;
6573 		}
6574 		return (PFIL_DROPPED);
6575 	}
6576 }
6577 
6578 static pfil_return_t
6579 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6580     void *ruleset __unused, struct inpcb *inp)
6581 {
6582 	int chk;
6583 
6584 	CURVNET_ASSERT_SET();
6585 
6586 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6587 
6588 	return (pf_check_return(chk, m));
6589 }
6590 
6591 static pfil_return_t
6592 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6593     void *ruleset __unused, struct inpcb *inp)
6594 {
6595 	int chk;
6596 
6597 	CURVNET_ASSERT_SET();
6598 
6599 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6600 
6601 	return (pf_check_return(chk, m));
6602 }
6603 
6604 #ifdef INET
6605 static pfil_return_t
6606 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6607     void *ruleset __unused, struct inpcb *inp)
6608 {
6609 	int chk;
6610 
6611 	CURVNET_ASSERT_SET();
6612 
6613 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6614 
6615 	return (pf_check_return(chk, m));
6616 }
6617 
6618 static pfil_return_t
6619 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6620     void *ruleset __unused,  struct inpcb *inp)
6621 {
6622 	int chk;
6623 
6624 	CURVNET_ASSERT_SET();
6625 
6626 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6627 
6628 	return (pf_check_return(chk, m));
6629 }
6630 #endif
6631 
6632 #ifdef INET6
6633 static pfil_return_t
6634 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6635     void *ruleset __unused,  struct inpcb *inp)
6636 {
6637 	int chk;
6638 
6639 	CURVNET_ASSERT_SET();
6640 
6641 	/*
6642 	 * In case of loopback traffic IPv6 uses the real interface in
6643 	 * order to support scoped addresses. In order to support stateful
6644 	 * filtering we have change this to lo0 as it is the case in IPv4.
6645 	 */
6646 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6647 	    m, inp, NULL);
6648 
6649 	return (pf_check_return(chk, m));
6650 }
6651 
6652 static pfil_return_t
6653 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6654     void *ruleset __unused,  struct inpcb *inp)
6655 {
6656 	int chk;
6657 
6658 	CURVNET_ASSERT_SET();
6659 
6660 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6661 
6662 	return (pf_check_return(chk, m));
6663 }
6664 #endif /* INET6 */
6665 
6666 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6667 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6668 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6669 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6670 
6671 #ifdef INET
6672 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6673 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6674 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6675 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6676 #endif
6677 #ifdef INET6
6678 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6679 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6680 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6681 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6682 #endif
6683 
6684 static void
6685 hook_pf_eth(void)
6686 {
6687 	struct pfil_hook_args pha = {
6688 		.pa_version = PFIL_VERSION,
6689 		.pa_modname = "pf",
6690 		.pa_type = PFIL_TYPE_ETHERNET,
6691 	};
6692 	struct pfil_link_args pla = {
6693 		.pa_version = PFIL_VERSION,
6694 	};
6695 	int ret __diagused;
6696 
6697 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6698 		return;
6699 
6700 	pha.pa_mbuf_chk = pf_eth_check_in;
6701 	pha.pa_flags = PFIL_IN;
6702 	pha.pa_rulname = "eth-in";
6703 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6704 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6705 	pla.pa_head = V_link_pfil_head;
6706 	pla.pa_hook = V_pf_eth_in_hook;
6707 	ret = pfil_link(&pla);
6708 	MPASS(ret == 0);
6709 	pha.pa_mbuf_chk = pf_eth_check_out;
6710 	pha.pa_flags = PFIL_OUT;
6711 	pha.pa_rulname = "eth-out";
6712 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6713 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6714 	pla.pa_head = V_link_pfil_head;
6715 	pla.pa_hook = V_pf_eth_out_hook;
6716 	ret = pfil_link(&pla);
6717 	MPASS(ret == 0);
6718 
6719 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6720 }
6721 
6722 static void
6723 hook_pf(void)
6724 {
6725 	struct pfil_hook_args pha = {
6726 		.pa_version = PFIL_VERSION,
6727 		.pa_modname = "pf",
6728 	};
6729 	struct pfil_link_args pla = {
6730 		.pa_version = PFIL_VERSION,
6731 	};
6732 	int ret __diagused;
6733 
6734 	if (atomic_load_bool(&V_pf_pfil_hooked))
6735 		return;
6736 
6737 #ifdef INET
6738 	pha.pa_type = PFIL_TYPE_IP4;
6739 	pha.pa_mbuf_chk = pf_check_in;
6740 	pha.pa_flags = PFIL_IN;
6741 	pha.pa_rulname = "default-in";
6742 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6743 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6744 	pla.pa_head = V_inet_pfil_head;
6745 	pla.pa_hook = V_pf_ip4_in_hook;
6746 	ret = pfil_link(&pla);
6747 	MPASS(ret == 0);
6748 	pha.pa_mbuf_chk = pf_check_out;
6749 	pha.pa_flags = PFIL_OUT;
6750 	pha.pa_rulname = "default-out";
6751 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6752 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6753 	pla.pa_head = V_inet_pfil_head;
6754 	pla.pa_hook = V_pf_ip4_out_hook;
6755 	ret = pfil_link(&pla);
6756 	MPASS(ret == 0);
6757 	if (V_pf_filter_local) {
6758 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6759 		pla.pa_head = V_inet_local_pfil_head;
6760 		pla.pa_hook = V_pf_ip4_out_hook;
6761 		ret = pfil_link(&pla);
6762 		MPASS(ret == 0);
6763 	}
6764 #endif
6765 #ifdef INET6
6766 	pha.pa_type = PFIL_TYPE_IP6;
6767 	pha.pa_mbuf_chk = pf_check6_in;
6768 	pha.pa_flags = PFIL_IN;
6769 	pha.pa_rulname = "default-in6";
6770 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6771 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6772 	pla.pa_head = V_inet6_pfil_head;
6773 	pla.pa_hook = V_pf_ip6_in_hook;
6774 	ret = pfil_link(&pla);
6775 	MPASS(ret == 0);
6776 	pha.pa_mbuf_chk = pf_check6_out;
6777 	pha.pa_rulname = "default-out6";
6778 	pha.pa_flags = PFIL_OUT;
6779 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6780 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6781 	pla.pa_head = V_inet6_pfil_head;
6782 	pla.pa_hook = V_pf_ip6_out_hook;
6783 	ret = pfil_link(&pla);
6784 	MPASS(ret == 0);
6785 	if (V_pf_filter_local) {
6786 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6787 		pla.pa_head = V_inet6_local_pfil_head;
6788 		pla.pa_hook = V_pf_ip6_out_hook;
6789 		ret = pfil_link(&pla);
6790 		MPASS(ret == 0);
6791 	}
6792 #endif
6793 
6794 	atomic_store_bool(&V_pf_pfil_hooked, true);
6795 }
6796 
6797 static void
6798 dehook_pf_eth(void)
6799 {
6800 
6801 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6802 		return;
6803 
6804 	pfil_remove_hook(V_pf_eth_in_hook);
6805 	pfil_remove_hook(V_pf_eth_out_hook);
6806 
6807 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6808 }
6809 
6810 static void
6811 dehook_pf(void)
6812 {
6813 
6814 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6815 		return;
6816 
6817 #ifdef INET
6818 	pfil_remove_hook(V_pf_ip4_in_hook);
6819 	pfil_remove_hook(V_pf_ip4_out_hook);
6820 #endif
6821 #ifdef INET6
6822 	pfil_remove_hook(V_pf_ip6_in_hook);
6823 	pfil_remove_hook(V_pf_ip6_out_hook);
6824 #endif
6825 
6826 	atomic_store_bool(&V_pf_pfil_hooked, false);
6827 }
6828 
6829 static void
6830 pf_load_vnet(void)
6831 {
6832 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6833 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6834 
6835 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6836 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6837 
6838 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6839 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6840 #ifdef ALTQ
6841 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6842 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6843 #endif
6844 
6845 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6846 
6847 	pfattach_vnet();
6848 	V_pf_vnet_active = 1;
6849 }
6850 
6851 static int
6852 pf_load(void)
6853 {
6854 	int error;
6855 
6856 	sx_init(&pf_end_lock, "pf end thread");
6857 
6858 	pf_mtag_initialize();
6859 
6860 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6861 	if (pf_dev == NULL)
6862 		return (ENOMEM);
6863 
6864 	pf_end_threads = 0;
6865 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6866 	if (error != 0)
6867 		return (error);
6868 
6869 	pfi_initialize();
6870 
6871 	return (0);
6872 }
6873 
6874 static void
6875 pf_unload_vnet(void)
6876 {
6877 	int ret __diagused;
6878 
6879 	V_pf_vnet_active = 0;
6880 	V_pf_status.running = 0;
6881 	dehook_pf();
6882 	dehook_pf_eth();
6883 
6884 	PF_RULES_WLOCK();
6885 	pf_syncookies_cleanup();
6886 	shutdown_pf();
6887 	PF_RULES_WUNLOCK();
6888 
6889 	ret = swi_remove(V_pf_swi_cookie);
6890 	MPASS(ret == 0);
6891 	ret = intr_event_destroy(V_pf_swi_ie);
6892 	MPASS(ret == 0);
6893 
6894 	pf_unload_vnet_purge();
6895 
6896 	pf_normalize_cleanup();
6897 	PF_RULES_WLOCK();
6898 	pfi_cleanup_vnet();
6899 	PF_RULES_WUNLOCK();
6900 	pfr_cleanup();
6901 	pf_osfp_flush();
6902 	pf_cleanup();
6903 	if (IS_DEFAULT_VNET(curvnet))
6904 		pf_mtag_cleanup();
6905 
6906 	pf_cleanup_tagset(&V_pf_tags);
6907 #ifdef ALTQ
6908 	pf_cleanup_tagset(&V_pf_qids);
6909 #endif
6910 	uma_zdestroy(V_pf_tag_z);
6911 
6912 #ifdef PF_WANT_32_TO_64_COUNTER
6913 	PF_RULES_WLOCK();
6914 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6915 
6916 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6917 	MPASS(V_pf_allkifcount == 0);
6918 
6919 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6920 	V_pf_allrulecount--;
6921 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6922 
6923 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6924 	MPASS(V_pf_allrulecount == 0);
6925 
6926 	PF_RULES_WUNLOCK();
6927 
6928 	free(V_pf_kifmarker, PFI_MTYPE);
6929 	free(V_pf_rulemarker, M_PFRULE);
6930 #endif
6931 
6932 	/* Free counters last as we updated them during shutdown. */
6933 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6934 	for (int i = 0; i < 2; i++) {
6935 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6936 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6937 	}
6938 	counter_u64_free(V_pf_default_rule.states_cur);
6939 	counter_u64_free(V_pf_default_rule.states_tot);
6940 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
6941 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
6942 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6943 
6944 	for (int i = 0; i < PFRES_MAX; i++)
6945 		counter_u64_free(V_pf_status.counters[i]);
6946 	for (int i = 0; i < KLCNT_MAX; i++)
6947 		counter_u64_free(V_pf_status.lcounters[i]);
6948 	for (int i = 0; i < FCNT_MAX; i++)
6949 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6950 	for (int i = 0; i < SCNT_MAX; i++)
6951 		counter_u64_free(V_pf_status.scounters[i]);
6952 
6953 	rm_destroy(&V_pf_rules_lock);
6954 	sx_destroy(&V_pf_ioctl_lock);
6955 }
6956 
6957 static void
6958 pf_unload(void)
6959 {
6960 
6961 	sx_xlock(&pf_end_lock);
6962 	pf_end_threads = 1;
6963 	while (pf_end_threads < 2) {
6964 		wakeup_one(pf_purge_thread);
6965 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6966 	}
6967 	sx_xunlock(&pf_end_lock);
6968 
6969 	pf_nl_unregister();
6970 
6971 	if (pf_dev != NULL)
6972 		destroy_dev(pf_dev);
6973 
6974 	pfi_cleanup();
6975 
6976 	sx_destroy(&pf_end_lock);
6977 }
6978 
6979 static void
6980 vnet_pf_init(void *unused __unused)
6981 {
6982 
6983 	pf_load_vnet();
6984 }
6985 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6986     vnet_pf_init, NULL);
6987 
6988 static void
6989 vnet_pf_uninit(const void *unused __unused)
6990 {
6991 
6992 	pf_unload_vnet();
6993 }
6994 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6995 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6996     vnet_pf_uninit, NULL);
6997 
6998 static int
6999 pf_modevent(module_t mod, int type, void *data)
7000 {
7001 	int error = 0;
7002 
7003 	switch(type) {
7004 	case MOD_LOAD:
7005 		error = pf_load();
7006 		pf_nl_register();
7007 		break;
7008 	case MOD_UNLOAD:
7009 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
7010 		 * the vnet_pf_uninit()s */
7011 		break;
7012 	default:
7013 		error = EINVAL;
7014 		break;
7015 	}
7016 
7017 	return (error);
7018 }
7019 
7020 static moduledata_t pf_mod = {
7021 	"pf",
7022 	pf_modevent,
7023 	0
7024 };
7025 
7026 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
7027 MODULE_DEPEND(pf, netlink, 1, 1, 1);
7028 MODULE_DEPEND(pf, crypto, 1, 1, 1);
7029 MODULE_VERSION(pf, PF_MODVER);
7030