xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 81f36fbc98dd74ca923938e0329919d426811b0c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nv.h>
87 
88 #ifdef INET6
89 #include <netinet/ip6.h>
90 #endif /* INET6 */
91 
92 #ifdef ALTQ
93 #include <net/altq/altq.h>
94 #endif
95 
96 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
97 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
98 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
100 
101 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
102 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
103 
104 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
105 static void		 pf_empty_kpool(struct pf_kpalist *);
106 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
107 			    struct thread *);
108 static int		 pf_begin_eth(uint32_t *, const char *);
109 static void		 pf_rollback_eth_cb(struct epoch_context *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_addr_copyout(struct pf_addr_wrap *);
132 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
133 			    struct pf_src_node *);
134 #ifdef ALTQ
135 static int		 pf_export_kaltq(struct pf_altq *,
136 			    struct pfioc_altq_v1 *, size_t);
137 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
138 			    struct pf_altq *, size_t);
139 #endif /* ALTQ */
140 
141 VNET_DEFINE(struct pf_krule,	pf_default_rule);
142 
143 static __inline int             pf_krule_compare(struct pf_krule *,
144 				    struct pf_krule *);
145 
146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
147 
148 #ifdef ALTQ
149 VNET_DEFINE_STATIC(int,		pf_altq_running);
150 #define	V_pf_altq_running	VNET(pf_altq_running)
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 struct pf_tagname {
155 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
156 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
157 	char			name[PF_TAG_NAME_SIZE];
158 	uint16_t		tag;
159 	int			ref;
160 };
161 
162 struct pf_tagset {
163 	TAILQ_HEAD(, pf_tagname)	*namehash;
164 	TAILQ_HEAD(, pf_tagname)	*taghash;
165 	unsigned int			 mask;
166 	uint32_t			 seed;
167 	BITSET_DEFINE(, TAGID_MAX)	 avail;
168 };
169 
170 VNET_DEFINE(struct pf_tagset, pf_tags);
171 #define	V_pf_tags	VNET(pf_tags)
172 static unsigned int	pf_rule_tag_hashsize;
173 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
175     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
176     "Size of pf(4) rule tag hashtable");
177 
178 #ifdef ALTQ
179 VNET_DEFINE(struct pf_tagset, pf_qids);
180 #define	V_pf_qids	VNET(pf_qids)
181 static unsigned int	pf_queue_tag_hashsize;
182 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
184     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
185     "Size of pf(4) queue tag hashtable");
186 #endif
187 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
188 #define	V_pf_tag_z		 VNET(pf_tag_z)
189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
222 
223 struct cdev *pf_dev;
224 
225 /*
226  * XXX - These are new and need to be checked when moveing to a new version
227  */
228 static void		 pf_clear_all_states(void);
229 static unsigned int	 pf_clear_states(const struct pf_kstate_kill *);
230 static void		 pf_killstates(struct pf_kstate_kill *,
231 			    unsigned int *);
232 static int		 pf_killstates_row(struct pf_kstate_kill *,
233 			    struct pf_idhash *);
234 static int		 pf_killstates_nv(struct pfioc_nv *);
235 static int		 pf_clearstates_nv(struct pfioc_nv *);
236 static int		 pf_getstate(struct pfioc_nv *);
237 static int		 pf_getstatus(struct pfioc_nv *);
238 static int		 pf_clear_tables(void);
239 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
240 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
241 static int		 pf_keepcounters(struct pfioc_nv *);
242 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
243 
244 /*
245  * Wrapper functions for pfil(9) hooks
246  */
247 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
250     int flags, void *ruleset __unused, struct inpcb *inp);
251 #ifdef INET
252 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
253     int flags, void *ruleset __unused, struct inpcb *inp);
254 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
255     int flags, void *ruleset __unused, struct inpcb *inp);
256 #endif
257 #ifdef INET6
258 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
259     int flags, void *ruleset __unused, struct inpcb *inp);
260 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
261     int flags, void *ruleset __unused, struct inpcb *inp);
262 #endif
263 
264 static void		hook_pf_eth(void);
265 static void		hook_pf(void);
266 static void		dehook_pf_eth(void);
267 static void		dehook_pf(void);
268 static int		shutdown_pf(void);
269 static int		pf_load(void);
270 static void		pf_unload(void);
271 
272 static struct cdevsw pf_cdevsw = {
273 	.d_ioctl =	pfioctl,
274 	.d_name =	PF_NAME,
275 	.d_version =	D_VERSION,
276 };
277 
278 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
279 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
280 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
281 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
282 
283 /*
284  * We need a flag that is neither hooked nor running to know when
285  * the VNET is "valid".  We primarily need this to control (global)
286  * external event, e.g., eventhandlers.
287  */
288 VNET_DEFINE(int, pf_vnet_active);
289 #define V_pf_vnet_active	VNET(pf_vnet_active)
290 
291 int pf_end_threads;
292 struct proc *pf_purge_proc;
293 
294 VNET_DEFINE(struct rmlock, pf_rules_lock);
295 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
296 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
297 struct sx			pf_end_lock;
298 
299 /* pfsync */
300 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
301 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
302 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
303 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
304 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
305 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
306 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
307 
308 /* pflog */
309 pflog_packet_t			*pflog_packet_ptr = NULL;
310 
311 /*
312  * Copy a user-provided string, returning an error if truncation would occur.
313  * Avoid scanning past "sz" bytes in the source string since there's no
314  * guarantee that it's nul-terminated.
315  */
316 static int
317 pf_user_strcpy(char *dst, const char *src, size_t sz)
318 {
319 	if (strnlen(src, sz) == sz)
320 		return (EINVAL);
321 	(void)strlcpy(dst, src, sz);
322 	return (0);
323 }
324 
325 static void
326 pfattach_vnet(void)
327 {
328 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
329 
330 	bzero(&V_pf_status, sizeof(V_pf_status));
331 
332 	pf_initialize();
333 	pfr_initialize();
334 	pfi_initialize_vnet();
335 	pf_normalize_init();
336 	pf_syncookies_init();
337 
338 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
339 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
340 
341 	RB_INIT(&V_pf_anchors);
342 	pf_init_kruleset(&pf_main_ruleset);
343 
344 	pf_init_keth(V_pf_keth);
345 
346 	/* default rule should never be garbage collected */
347 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
348 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
349 	V_pf_default_rule.nr = -1;
350 	V_pf_default_rule.rtableid = -1;
351 
352 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
353 	for (int i = 0; i < 2; i++) {
354 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
355 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
356 	}
357 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
358 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
359 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
360 
361 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
362 	    M_WAITOK | M_ZERO);
363 
364 #ifdef PF_WANT_32_TO_64_COUNTER
365 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
366 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
367 	PF_RULES_WLOCK();
368 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
369 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
370 	V_pf_allrulecount++;
371 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
372 	PF_RULES_WUNLOCK();
373 #endif
374 
375 	/* initialize default timeouts */
376 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
377 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
378 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
379 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
380 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
381 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
382 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
383 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
384 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
385 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
386 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
387 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
389 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
390 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
391 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
392 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
393 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
394 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
395 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
396 
397 	V_pf_status.debug = PF_DEBUG_URGENT;
398 	/*
399 	 * XXX This is different than in OpenBSD where reassembly is enabled by
400 	 * defult. In FreeBSD we expect people to still use scrub rules and
401 	 * switch to the new syntax later. Only when they switch they must
402 	 * explicitly enable reassemle. We could change the default once the
403 	 * scrub rule functionality is hopefully removed some day in future.
404 	 */
405 	V_pf_status.reass = 0;
406 
407 	V_pf_pfil_hooked = false;
408 	V_pf_pfil_eth_hooked = false;
409 
410 	/* XXX do our best to avoid a conflict */
411 	V_pf_status.hostid = arc4random();
412 
413 	for (int i = 0; i < PFRES_MAX; i++)
414 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
415 	for (int i = 0; i < KLCNT_MAX; i++)
416 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < FCNT_MAX; i++)
418 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
419 	for (int i = 0; i < SCNT_MAX; i++)
420 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
421 
422 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
423 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
424 		/* XXXGL: leaked all above. */
425 		return;
426 }
427 
428 static struct pf_kpool *
429 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
430     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
431     u_int8_t check_ticket)
432 {
433 	struct pf_kruleset	*ruleset;
434 	struct pf_krule		*rule;
435 	int			 rs_num;
436 
437 	ruleset = pf_find_kruleset(anchor);
438 	if (ruleset == NULL)
439 		return (NULL);
440 	rs_num = pf_get_ruleset_number(rule_action);
441 	if (rs_num >= PF_RULESET_MAX)
442 		return (NULL);
443 	if (active) {
444 		if (check_ticket && ticket !=
445 		    ruleset->rules[rs_num].active.ticket)
446 			return (NULL);
447 		if (r_last)
448 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
449 			    pf_krulequeue);
450 		else
451 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
452 	} else {
453 		if (check_ticket && ticket !=
454 		    ruleset->rules[rs_num].inactive.ticket)
455 			return (NULL);
456 		if (r_last)
457 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
458 			    pf_krulequeue);
459 		else
460 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
461 	}
462 	if (!r_last) {
463 		while ((rule != NULL) && (rule->nr != rule_number))
464 			rule = TAILQ_NEXT(rule, entries);
465 	}
466 	if (rule == NULL)
467 		return (NULL);
468 
469 	return (&rule->rpool);
470 }
471 
472 static void
473 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
474 {
475 	struct pf_kpooladdr	*mv_pool_pa;
476 
477 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
478 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
479 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
480 	}
481 }
482 
483 static void
484 pf_empty_kpool(struct pf_kpalist *poola)
485 {
486 	struct pf_kpooladdr *pa;
487 
488 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
489 		switch (pa->addr.type) {
490 		case PF_ADDR_DYNIFTL:
491 			pfi_dynaddr_remove(pa->addr.p.dyn);
492 			break;
493 		case PF_ADDR_TABLE:
494 			/* XXX: this could be unfinished pooladdr on pabuf */
495 			if (pa->addr.p.tbl != NULL)
496 				pfr_detach_table(pa->addr.p.tbl);
497 			break;
498 		}
499 		if (pa->kif)
500 			pfi_kkif_unref(pa->kif);
501 		TAILQ_REMOVE(poola, pa, entries);
502 		free(pa, M_PFRULE);
503 	}
504 }
505 
506 static void
507 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
508 {
509 
510 	PF_RULES_WASSERT();
511 	PF_UNLNKDRULES_ASSERT();
512 
513 	TAILQ_REMOVE(rulequeue, rule, entries);
514 
515 	rule->rule_ref |= PFRULE_REFS;
516 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
517 }
518 
519 static void
520 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
521 {
522 
523 	PF_RULES_WASSERT();
524 
525 	PF_UNLNKDRULES_LOCK();
526 	pf_unlink_rule_locked(rulequeue, rule);
527 	PF_UNLNKDRULES_UNLOCK();
528 }
529 
530 static void
531 pf_free_eth_rule(struct pf_keth_rule *rule)
532 {
533 	PF_RULES_WASSERT();
534 
535 	if (rule == NULL)
536 		return;
537 
538 	if (rule->tag)
539 		tag_unref(&V_pf_tags, rule->tag);
540 	if (rule->match_tag)
541 		tag_unref(&V_pf_tags, rule->match_tag);
542 #ifdef ALTQ
543 	pf_qid_unref(rule->qid);
544 #endif
545 
546 	if (rule->bridge_to)
547 		pfi_kkif_unref(rule->bridge_to);
548 	if (rule->kif)
549 		pfi_kkif_unref(rule->kif);
550 
551 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
552 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
553 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
554 		pfr_detach_table(rule->ipdst.addr.p.tbl);
555 
556 	counter_u64_free(rule->evaluations);
557 	for (int i = 0; i < 2; i++) {
558 		counter_u64_free(rule->packets[i]);
559 		counter_u64_free(rule->bytes[i]);
560 	}
561 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
562 	pf_keth_anchor_remove(rule);
563 
564 	free(rule, M_PFRULE);
565 }
566 
567 void
568 pf_free_rule(struct pf_krule *rule)
569 {
570 
571 	PF_RULES_WASSERT();
572 	PF_CONFIG_ASSERT();
573 
574 	if (rule->tag)
575 		tag_unref(&V_pf_tags, rule->tag);
576 	if (rule->match_tag)
577 		tag_unref(&V_pf_tags, rule->match_tag);
578 #ifdef ALTQ
579 	if (rule->pqid != rule->qid)
580 		pf_qid_unref(rule->pqid);
581 	pf_qid_unref(rule->qid);
582 #endif
583 	switch (rule->src.addr.type) {
584 	case PF_ADDR_DYNIFTL:
585 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
586 		break;
587 	case PF_ADDR_TABLE:
588 		pfr_detach_table(rule->src.addr.p.tbl);
589 		break;
590 	}
591 	switch (rule->dst.addr.type) {
592 	case PF_ADDR_DYNIFTL:
593 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
594 		break;
595 	case PF_ADDR_TABLE:
596 		pfr_detach_table(rule->dst.addr.p.tbl);
597 		break;
598 	}
599 	if (rule->overload_tbl)
600 		pfr_detach_table(rule->overload_tbl);
601 	if (rule->kif)
602 		pfi_kkif_unref(rule->kif);
603 	pf_kanchor_remove(rule);
604 	pf_empty_kpool(&rule->rpool.list);
605 
606 	pf_krule_free(rule);
607 }
608 
609 static void
610 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
611     unsigned int default_size)
612 {
613 	unsigned int i;
614 	unsigned int hashsize;
615 
616 	if (*tunable_size == 0 || !powerof2(*tunable_size))
617 		*tunable_size = default_size;
618 
619 	hashsize = *tunable_size;
620 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
621 	    M_WAITOK);
622 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
623 	    M_WAITOK);
624 	ts->mask = hashsize - 1;
625 	ts->seed = arc4random();
626 	for (i = 0; i < hashsize; i++) {
627 		TAILQ_INIT(&ts->namehash[i]);
628 		TAILQ_INIT(&ts->taghash[i]);
629 	}
630 	BIT_FILL(TAGID_MAX, &ts->avail);
631 }
632 
633 static void
634 pf_cleanup_tagset(struct pf_tagset *ts)
635 {
636 	unsigned int i;
637 	unsigned int hashsize;
638 	struct pf_tagname *t, *tmp;
639 
640 	/*
641 	 * Only need to clean up one of the hashes as each tag is hashed
642 	 * into each table.
643 	 */
644 	hashsize = ts->mask + 1;
645 	for (i = 0; i < hashsize; i++)
646 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
647 			uma_zfree(V_pf_tag_z, t);
648 
649 	free(ts->namehash, M_PFHASH);
650 	free(ts->taghash, M_PFHASH);
651 }
652 
653 static uint16_t
654 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
655 {
656 	size_t len;
657 
658 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
659 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
660 }
661 
662 static uint16_t
663 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
664 {
665 
666 	return (tag & ts->mask);
667 }
668 
669 static u_int16_t
670 tagname2tag(struct pf_tagset *ts, const char *tagname)
671 {
672 	struct pf_tagname	*tag;
673 	u_int32_t		 index;
674 	u_int16_t		 new_tagid;
675 
676 	PF_RULES_WASSERT();
677 
678 	index = tagname2hashindex(ts, tagname);
679 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
680 		if (strcmp(tagname, tag->name) == 0) {
681 			tag->ref++;
682 			return (tag->tag);
683 		}
684 
685 	/*
686 	 * new entry
687 	 *
688 	 * to avoid fragmentation, we do a linear search from the beginning
689 	 * and take the first free slot we find.
690 	 */
691 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
692 	/*
693 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
694 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
695 	 * set.  It may also return a bit number greater than TAGID_MAX due
696 	 * to rounding of the number of bits in the vector up to a multiple
697 	 * of the vector word size at declaration/allocation time.
698 	 */
699 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
700 		return (0);
701 
702 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
703 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
704 
705 	/* allocate and fill new struct pf_tagname */
706 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
707 	if (tag == NULL)
708 		return (0);
709 	strlcpy(tag->name, tagname, sizeof(tag->name));
710 	tag->tag = new_tagid;
711 	tag->ref = 1;
712 
713 	/* Insert into namehash */
714 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
715 
716 	/* Insert into taghash */
717 	index = tag2hashindex(ts, new_tagid);
718 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
719 
720 	return (tag->tag);
721 }
722 
723 static void
724 tag_unref(struct pf_tagset *ts, u_int16_t tag)
725 {
726 	struct pf_tagname	*t;
727 	uint16_t		 index;
728 
729 	PF_RULES_WASSERT();
730 
731 	index = tag2hashindex(ts, tag);
732 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
733 		if (tag == t->tag) {
734 			if (--t->ref == 0) {
735 				TAILQ_REMOVE(&ts->taghash[index], t,
736 				    taghash_entries);
737 				index = tagname2hashindex(ts, t->name);
738 				TAILQ_REMOVE(&ts->namehash[index], t,
739 				    namehash_entries);
740 				/* Bits are 0-based for BIT_SET() */
741 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
742 				uma_zfree(V_pf_tag_z, t);
743 			}
744 			break;
745 		}
746 }
747 
748 static uint16_t
749 pf_tagname2tag(const char *tagname)
750 {
751 	return (tagname2tag(&V_pf_tags, tagname));
752 }
753 
754 static int
755 pf_begin_eth(uint32_t *ticket, const char *anchor)
756 {
757 	struct pf_keth_rule *rule, *tmp;
758 	struct pf_keth_ruleset *rs;
759 
760 	PF_RULES_WASSERT();
761 
762 	rs = pf_find_or_create_keth_ruleset(anchor);
763 	if (rs == NULL)
764 		return (EINVAL);
765 
766 	/* Purge old inactive rules. */
767 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
768 	    tmp) {
769 		TAILQ_REMOVE(rs->inactive.rules, rule,
770 		    entries);
771 		pf_free_eth_rule(rule);
772 	}
773 
774 	*ticket = ++rs->inactive.ticket;
775 	rs->inactive.open = 1;
776 
777 	return (0);
778 }
779 
780 static void
781 pf_rollback_eth_cb(struct epoch_context *ctx)
782 {
783 	struct pf_keth_ruleset *rs;
784 
785 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
786 
787 	CURVNET_SET(rs->vnet);
788 
789 	PF_RULES_WLOCK();
790 	pf_rollback_eth(rs->inactive.ticket,
791 	    rs->anchor ? rs->anchor->path : "");
792 	PF_RULES_WUNLOCK();
793 
794 	CURVNET_RESTORE();
795 }
796 
797 static int
798 pf_rollback_eth(uint32_t ticket, const char *anchor)
799 {
800 	struct pf_keth_rule *rule, *tmp;
801 	struct pf_keth_ruleset *rs;
802 
803 	PF_RULES_WASSERT();
804 
805 	rs = pf_find_keth_ruleset(anchor);
806 	if (rs == NULL)
807 		return (EINVAL);
808 
809 	if (!rs->inactive.open ||
810 	    ticket != rs->inactive.ticket)
811 		return (0);
812 
813 	/* Purge old inactive rules. */
814 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
815 	    tmp) {
816 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
817 		pf_free_eth_rule(rule);
818 	}
819 
820 	rs->inactive.open = 0;
821 
822 	pf_remove_if_empty_keth_ruleset(rs);
823 
824 	return (0);
825 }
826 
827 #define	PF_SET_SKIP_STEPS(i)					\
828 	do {							\
829 		while (head[i] != cur) {			\
830 			head[i]->skip[i].ptr = cur;		\
831 			head[i] = TAILQ_NEXT(head[i], entries);	\
832 		}						\
833 	} while (0)
834 
835 static void
836 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
837 {
838 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
839 	int i;
840 
841 	cur = TAILQ_FIRST(rules);
842 	prev = cur;
843 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
844 		head[i] = cur;
845 	while (cur != NULL) {
846 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
847 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
848 		if (cur->direction != prev->direction)
849 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
850 		if (cur->proto != prev->proto)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
852 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
854 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
856 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
857 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
858 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
859 		if (cur->ipdst.neg != prev->ipdst.neg ||
860 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
861 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
862 
863 		prev = cur;
864 		cur = TAILQ_NEXT(cur, entries);
865 	}
866 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
867 		PF_SET_SKIP_STEPS(i);
868 }
869 
870 static int
871 pf_commit_eth(uint32_t ticket, const char *anchor)
872 {
873 	struct pf_keth_ruleq *rules;
874 	struct pf_keth_ruleset *rs;
875 
876 	rs = pf_find_keth_ruleset(anchor);
877 	if (rs == NULL) {
878 		return (EINVAL);
879 	}
880 
881 	if (!rs->inactive.open ||
882 	    ticket != rs->inactive.ticket)
883 		return (EBUSY);
884 
885 	PF_RULES_WASSERT();
886 
887 	pf_eth_calc_skip_steps(rs->inactive.rules);
888 
889 	rules = rs->active.rules;
890 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
891 	rs->inactive.rules = rules;
892 	rs->inactive.ticket = rs->active.ticket;
893 
894 	/* Clean up inactive rules (i.e. previously active rules), only when
895 	 * we're sure they're no longer used. */
896 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
897 
898 	return (0);
899 }
900 
901 #ifdef ALTQ
902 static uint16_t
903 pf_qname2qid(const char *qname)
904 {
905 	return (tagname2tag(&V_pf_qids, qname));
906 }
907 
908 static void
909 pf_qid_unref(uint16_t qid)
910 {
911 	tag_unref(&V_pf_qids, qid);
912 }
913 
914 static int
915 pf_begin_altq(u_int32_t *ticket)
916 {
917 	struct pf_altq	*altq, *tmp;
918 	int		 error = 0;
919 
920 	PF_RULES_WASSERT();
921 
922 	/* Purge the old altq lists */
923 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
924 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
925 			/* detach and destroy the discipline */
926 			error = altq_remove(altq);
927 		}
928 		free(altq, M_PFALTQ);
929 	}
930 	TAILQ_INIT(V_pf_altq_ifs_inactive);
931 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
932 		pf_qid_unref(altq->qid);
933 		free(altq, M_PFALTQ);
934 	}
935 	TAILQ_INIT(V_pf_altqs_inactive);
936 	if (error)
937 		return (error);
938 	*ticket = ++V_ticket_altqs_inactive;
939 	V_altqs_inactive_open = 1;
940 	return (0);
941 }
942 
943 static int
944 pf_rollback_altq(u_int32_t ticket)
945 {
946 	struct pf_altq	*altq, *tmp;
947 	int		 error = 0;
948 
949 	PF_RULES_WASSERT();
950 
951 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
952 		return (0);
953 	/* Purge the old altq lists */
954 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
955 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
956 			/* detach and destroy the discipline */
957 			error = altq_remove(altq);
958 		}
959 		free(altq, M_PFALTQ);
960 	}
961 	TAILQ_INIT(V_pf_altq_ifs_inactive);
962 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
963 		pf_qid_unref(altq->qid);
964 		free(altq, M_PFALTQ);
965 	}
966 	TAILQ_INIT(V_pf_altqs_inactive);
967 	V_altqs_inactive_open = 0;
968 	return (error);
969 }
970 
971 static int
972 pf_commit_altq(u_int32_t ticket)
973 {
974 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
975 	struct pf_altq		*altq, *tmp;
976 	int			 err, error = 0;
977 
978 	PF_RULES_WASSERT();
979 
980 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
981 		return (EBUSY);
982 
983 	/* swap altqs, keep the old. */
984 	old_altqs = V_pf_altqs_active;
985 	old_altq_ifs = V_pf_altq_ifs_active;
986 	V_pf_altqs_active = V_pf_altqs_inactive;
987 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
988 	V_pf_altqs_inactive = old_altqs;
989 	V_pf_altq_ifs_inactive = old_altq_ifs;
990 	V_ticket_altqs_active = V_ticket_altqs_inactive;
991 
992 	/* Attach new disciplines */
993 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
994 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
995 			/* attach the discipline */
996 			error = altq_pfattach(altq);
997 			if (error == 0 && V_pf_altq_running)
998 				error = pf_enable_altq(altq);
999 			if (error != 0)
1000 				return (error);
1001 		}
1002 	}
1003 
1004 	/* Purge the old altq lists */
1005 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1006 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1007 			/* detach and destroy the discipline */
1008 			if (V_pf_altq_running)
1009 				error = pf_disable_altq(altq);
1010 			err = altq_pfdetach(altq);
1011 			if (err != 0 && error == 0)
1012 				error = err;
1013 			err = altq_remove(altq);
1014 			if (err != 0 && error == 0)
1015 				error = err;
1016 		}
1017 		free(altq, M_PFALTQ);
1018 	}
1019 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1020 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1021 		pf_qid_unref(altq->qid);
1022 		free(altq, M_PFALTQ);
1023 	}
1024 	TAILQ_INIT(V_pf_altqs_inactive);
1025 
1026 	V_altqs_inactive_open = 0;
1027 	return (error);
1028 }
1029 
1030 static int
1031 pf_enable_altq(struct pf_altq *altq)
1032 {
1033 	struct ifnet		*ifp;
1034 	struct tb_profile	 tb;
1035 	int			 error = 0;
1036 
1037 	if ((ifp = ifunit(altq->ifname)) == NULL)
1038 		return (EINVAL);
1039 
1040 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1041 		error = altq_enable(&ifp->if_snd);
1042 
1043 	/* set tokenbucket regulator */
1044 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1045 		tb.rate = altq->ifbandwidth;
1046 		tb.depth = altq->tbrsize;
1047 		error = tbr_set(&ifp->if_snd, &tb);
1048 	}
1049 
1050 	return (error);
1051 }
1052 
1053 static int
1054 pf_disable_altq(struct pf_altq *altq)
1055 {
1056 	struct ifnet		*ifp;
1057 	struct tb_profile	 tb;
1058 	int			 error;
1059 
1060 	if ((ifp = ifunit(altq->ifname)) == NULL)
1061 		return (EINVAL);
1062 
1063 	/*
1064 	 * when the discipline is no longer referenced, it was overridden
1065 	 * by a new one.  if so, just return.
1066 	 */
1067 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1068 		return (0);
1069 
1070 	error = altq_disable(&ifp->if_snd);
1071 
1072 	if (error == 0) {
1073 		/* clear tokenbucket regulator */
1074 		tb.rate = 0;
1075 		error = tbr_set(&ifp->if_snd, &tb);
1076 	}
1077 
1078 	return (error);
1079 }
1080 
1081 static int
1082 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1083     struct pf_altq *altq)
1084 {
1085 	struct ifnet	*ifp1;
1086 	int		 error = 0;
1087 
1088 	/* Deactivate the interface in question */
1089 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1090 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1091 	    (remove && ifp1 == ifp)) {
1092 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1093 	} else {
1094 		error = altq_add(ifp1, altq);
1095 
1096 		if (ticket != V_ticket_altqs_inactive)
1097 			error = EBUSY;
1098 
1099 		if (error)
1100 			free(altq, M_PFALTQ);
1101 	}
1102 
1103 	return (error);
1104 }
1105 
1106 void
1107 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1108 {
1109 	struct pf_altq	*a1, *a2, *a3;
1110 	u_int32_t	 ticket;
1111 	int		 error = 0;
1112 
1113 	/*
1114 	 * No need to re-evaluate the configuration for events on interfaces
1115 	 * that do not support ALTQ, as it's not possible for such
1116 	 * interfaces to be part of the configuration.
1117 	 */
1118 	if (!ALTQ_IS_READY(&ifp->if_snd))
1119 		return;
1120 
1121 	/* Interrupt userland queue modifications */
1122 	if (V_altqs_inactive_open)
1123 		pf_rollback_altq(V_ticket_altqs_inactive);
1124 
1125 	/* Start new altq ruleset */
1126 	if (pf_begin_altq(&ticket))
1127 		return;
1128 
1129 	/* Copy the current active set */
1130 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1131 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1132 		if (a2 == NULL) {
1133 			error = ENOMEM;
1134 			break;
1135 		}
1136 		bcopy(a1, a2, sizeof(struct pf_altq));
1137 
1138 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1139 		if (error)
1140 			break;
1141 
1142 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1143 	}
1144 	if (error)
1145 		goto out;
1146 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1147 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1148 		if (a2 == NULL) {
1149 			error = ENOMEM;
1150 			break;
1151 		}
1152 		bcopy(a1, a2, sizeof(struct pf_altq));
1153 
1154 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1155 			error = EBUSY;
1156 			free(a2, M_PFALTQ);
1157 			break;
1158 		}
1159 		a2->altq_disc = NULL;
1160 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1161 			if (strncmp(a3->ifname, a2->ifname,
1162 				IFNAMSIZ) == 0) {
1163 				a2->altq_disc = a3->altq_disc;
1164 				break;
1165 			}
1166 		}
1167 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1168 		if (error)
1169 			break;
1170 
1171 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1172 	}
1173 
1174 out:
1175 	if (error != 0)
1176 		pf_rollback_altq(ticket);
1177 	else
1178 		pf_commit_altq(ticket);
1179 }
1180 #endif /* ALTQ */
1181 
1182 static struct pf_krule_global *
1183 pf_rule_tree_alloc(int flags)
1184 {
1185 	struct pf_krule_global *tree;
1186 
1187 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1188 	if (tree == NULL)
1189 		return (NULL);
1190 	RB_INIT(tree);
1191 	return (tree);
1192 }
1193 
1194 static void
1195 pf_rule_tree_free(struct pf_krule_global *tree)
1196 {
1197 
1198 	free(tree, M_TEMP);
1199 }
1200 
1201 static int
1202 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1203 {
1204 	struct pf_krule_global *tree;
1205 	struct pf_kruleset	*rs;
1206 	struct pf_krule		*rule;
1207 
1208 	PF_RULES_WASSERT();
1209 
1210 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1211 		return (EINVAL);
1212 	tree = pf_rule_tree_alloc(M_NOWAIT);
1213 	if (tree == NULL)
1214 		return (ENOMEM);
1215 	rs = pf_find_or_create_kruleset(anchor);
1216 	if (rs == NULL) {
1217 		free(tree, M_TEMP);
1218 		return (EINVAL);
1219 	}
1220 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1221 	rs->rules[rs_num].inactive.tree = tree;
1222 
1223 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1224 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1225 		rs->rules[rs_num].inactive.rcount--;
1226 	}
1227 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1228 	rs->rules[rs_num].inactive.open = 1;
1229 	return (0);
1230 }
1231 
1232 static int
1233 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1234 {
1235 	struct pf_kruleset	*rs;
1236 	struct pf_krule		*rule;
1237 
1238 	PF_RULES_WASSERT();
1239 
1240 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1241 		return (EINVAL);
1242 	rs = pf_find_kruleset(anchor);
1243 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1244 	    rs->rules[rs_num].inactive.ticket != ticket)
1245 		return (0);
1246 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1247 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1248 		rs->rules[rs_num].inactive.rcount--;
1249 	}
1250 	rs->rules[rs_num].inactive.open = 0;
1251 	return (0);
1252 }
1253 
1254 #define PF_MD5_UPD(st, elm)						\
1255 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1256 
1257 #define PF_MD5_UPD_STR(st, elm)						\
1258 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1259 
1260 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1261 		(stor) = htonl((st)->elm);				\
1262 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1263 } while (0)
1264 
1265 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1266 		(stor) = htons((st)->elm);				\
1267 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1268 } while (0)
1269 
1270 static void
1271 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1272 {
1273 	PF_MD5_UPD(pfr, addr.type);
1274 	switch (pfr->addr.type) {
1275 		case PF_ADDR_DYNIFTL:
1276 			PF_MD5_UPD(pfr, addr.v.ifname);
1277 			PF_MD5_UPD(pfr, addr.iflags);
1278 			break;
1279 		case PF_ADDR_TABLE:
1280 			PF_MD5_UPD(pfr, addr.v.tblname);
1281 			break;
1282 		case PF_ADDR_ADDRMASK:
1283 			/* XXX ignore af? */
1284 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1285 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1286 			break;
1287 	}
1288 
1289 	PF_MD5_UPD(pfr, port[0]);
1290 	PF_MD5_UPD(pfr, port[1]);
1291 	PF_MD5_UPD(pfr, neg);
1292 	PF_MD5_UPD(pfr, port_op);
1293 }
1294 
1295 static void
1296 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1297 {
1298 	u_int16_t x;
1299 	u_int32_t y;
1300 
1301 	pf_hash_rule_addr(ctx, &rule->src);
1302 	pf_hash_rule_addr(ctx, &rule->dst);
1303 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1304 		PF_MD5_UPD_STR(rule, label[i]);
1305 	PF_MD5_UPD_STR(rule, ifname);
1306 	PF_MD5_UPD_STR(rule, match_tagname);
1307 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1308 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1309 	PF_MD5_UPD_HTONL(rule, prob, y);
1310 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1311 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1312 	PF_MD5_UPD(rule, uid.op);
1313 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1314 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1315 	PF_MD5_UPD(rule, gid.op);
1316 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1317 	PF_MD5_UPD(rule, action);
1318 	PF_MD5_UPD(rule, direction);
1319 	PF_MD5_UPD(rule, af);
1320 	PF_MD5_UPD(rule, quick);
1321 	PF_MD5_UPD(rule, ifnot);
1322 	PF_MD5_UPD(rule, match_tag_not);
1323 	PF_MD5_UPD(rule, natpass);
1324 	PF_MD5_UPD(rule, keep_state);
1325 	PF_MD5_UPD(rule, proto);
1326 	PF_MD5_UPD(rule, type);
1327 	PF_MD5_UPD(rule, code);
1328 	PF_MD5_UPD(rule, flags);
1329 	PF_MD5_UPD(rule, flagset);
1330 	PF_MD5_UPD(rule, allow_opts);
1331 	PF_MD5_UPD(rule, rt);
1332 	PF_MD5_UPD(rule, tos);
1333 	PF_MD5_UPD(rule, scrub_flags);
1334 	PF_MD5_UPD(rule, min_ttl);
1335 	PF_MD5_UPD(rule, set_tos);
1336 	if (rule->anchor != NULL)
1337 		PF_MD5_UPD_STR(rule, anchor->path);
1338 }
1339 
1340 static void
1341 pf_hash_rule(struct pf_krule *rule)
1342 {
1343 	MD5_CTX		ctx;
1344 
1345 	MD5Init(&ctx);
1346 	pf_hash_rule_rolling(&ctx, rule);
1347 	MD5Final(rule->md5sum, &ctx);
1348 }
1349 
1350 static int
1351 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1352 {
1353 
1354 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1355 }
1356 
1357 static int
1358 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1359 {
1360 	struct pf_kruleset	*rs;
1361 	struct pf_krule		*rule, **old_array, *old_rule;
1362 	struct pf_krulequeue	*old_rules;
1363 	struct pf_krule_global  *old_tree;
1364 	int			 error;
1365 	u_int32_t		 old_rcount;
1366 
1367 	PF_RULES_WASSERT();
1368 
1369 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1370 		return (EINVAL);
1371 	rs = pf_find_kruleset(anchor);
1372 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1373 	    ticket != rs->rules[rs_num].inactive.ticket)
1374 		return (EBUSY);
1375 
1376 	/* Calculate checksum for the main ruleset */
1377 	if (rs == &pf_main_ruleset) {
1378 		error = pf_setup_pfsync_matching(rs);
1379 		if (error != 0)
1380 			return (error);
1381 	}
1382 
1383 	/* Swap rules, keep the old. */
1384 	old_rules = rs->rules[rs_num].active.ptr;
1385 	old_rcount = rs->rules[rs_num].active.rcount;
1386 	old_array = rs->rules[rs_num].active.ptr_array;
1387 	old_tree = rs->rules[rs_num].active.tree;
1388 
1389 	rs->rules[rs_num].active.ptr =
1390 	    rs->rules[rs_num].inactive.ptr;
1391 	rs->rules[rs_num].active.ptr_array =
1392 	    rs->rules[rs_num].inactive.ptr_array;
1393 	rs->rules[rs_num].active.tree =
1394 	    rs->rules[rs_num].inactive.tree;
1395 	rs->rules[rs_num].active.rcount =
1396 	    rs->rules[rs_num].inactive.rcount;
1397 
1398 	/* Attempt to preserve counter information. */
1399 	if (V_pf_status.keep_counters && old_tree != NULL) {
1400 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1401 		    entries) {
1402 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1403 			if (old_rule == NULL) {
1404 				continue;
1405 			}
1406 			pf_counter_u64_critical_enter();
1407 			pf_counter_u64_add_protected(&rule->evaluations,
1408 			    pf_counter_u64_fetch(&old_rule->evaluations));
1409 			pf_counter_u64_add_protected(&rule->packets[0],
1410 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1411 			pf_counter_u64_add_protected(&rule->packets[1],
1412 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1413 			pf_counter_u64_add_protected(&rule->bytes[0],
1414 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1415 			pf_counter_u64_add_protected(&rule->bytes[1],
1416 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1417 			pf_counter_u64_critical_exit();
1418 		}
1419 	}
1420 
1421 	rs->rules[rs_num].inactive.ptr = old_rules;
1422 	rs->rules[rs_num].inactive.ptr_array = old_array;
1423 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1424 	rs->rules[rs_num].inactive.rcount = old_rcount;
1425 
1426 	rs->rules[rs_num].active.ticket =
1427 	    rs->rules[rs_num].inactive.ticket;
1428 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1429 
1430 	/* Purge the old rule list. */
1431 	PF_UNLNKDRULES_LOCK();
1432 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1433 		pf_unlink_rule_locked(old_rules, rule);
1434 	PF_UNLNKDRULES_UNLOCK();
1435 	if (rs->rules[rs_num].inactive.ptr_array)
1436 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1437 	rs->rules[rs_num].inactive.ptr_array = NULL;
1438 	rs->rules[rs_num].inactive.rcount = 0;
1439 	rs->rules[rs_num].inactive.open = 0;
1440 	pf_remove_if_empty_kruleset(rs);
1441 	free(old_tree, M_TEMP);
1442 
1443 	return (0);
1444 }
1445 
1446 static int
1447 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1448 {
1449 	MD5_CTX			 ctx;
1450 	struct pf_krule		*rule;
1451 	int			 rs_cnt;
1452 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1453 
1454 	MD5Init(&ctx);
1455 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1456 		/* XXX PF_RULESET_SCRUB as well? */
1457 		if (rs_cnt == PF_RULESET_SCRUB)
1458 			continue;
1459 
1460 		if (rs->rules[rs_cnt].inactive.ptr_array)
1461 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1462 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1463 
1464 		if (rs->rules[rs_cnt].inactive.rcount) {
1465 			rs->rules[rs_cnt].inactive.ptr_array =
1466 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1467 			    sizeof(struct pf_rule **),
1468 			    M_TEMP, M_NOWAIT);
1469 
1470 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1471 				return (ENOMEM);
1472 		}
1473 
1474 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1475 		    entries) {
1476 			pf_hash_rule_rolling(&ctx, rule);
1477 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1478 		}
1479 	}
1480 
1481 	MD5Final(digest, &ctx);
1482 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1483 	return (0);
1484 }
1485 
1486 static int
1487 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1488 {
1489 	int error = 0;
1490 
1491 	switch (addr->type) {
1492 	case PF_ADDR_TABLE:
1493 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1494 		if (addr->p.tbl == NULL)
1495 			error = ENOMEM;
1496 		break;
1497 	default:
1498 		error = EINVAL;
1499 	}
1500 
1501 	return (error);
1502 }
1503 
1504 static int
1505 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1506     sa_family_t af)
1507 {
1508 	int error = 0;
1509 
1510 	switch (addr->type) {
1511 	case PF_ADDR_TABLE:
1512 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1513 		if (addr->p.tbl == NULL)
1514 			error = ENOMEM;
1515 		break;
1516 	case PF_ADDR_DYNIFTL:
1517 		error = pfi_dynaddr_setup(addr, af);
1518 		break;
1519 	}
1520 
1521 	return (error);
1522 }
1523 
1524 static void
1525 pf_addr_copyout(struct pf_addr_wrap *addr)
1526 {
1527 
1528 	switch (addr->type) {
1529 	case PF_ADDR_DYNIFTL:
1530 		pfi_dynaddr_copyout(addr);
1531 		break;
1532 	case PF_ADDR_TABLE:
1533 		pf_tbladdr_copyout(addr);
1534 		break;
1535 	}
1536 }
1537 
1538 static void
1539 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1540 {
1541 	int	secs = time_uptime, diff;
1542 
1543 	bzero(out, sizeof(struct pf_src_node));
1544 
1545 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1546 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1547 
1548 	if (in->rule.ptr != NULL)
1549 		out->rule.nr = in->rule.ptr->nr;
1550 
1551 	for (int i = 0; i < 2; i++) {
1552 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1553 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1554 	}
1555 
1556 	out->states = in->states;
1557 	out->conn = in->conn;
1558 	out->af = in->af;
1559 	out->ruletype = in->ruletype;
1560 
1561 	out->creation = secs - in->creation;
1562 	if (out->expire > secs)
1563 		out->expire -= secs;
1564 	else
1565 		out->expire = 0;
1566 
1567 	/* Adjust the connection rate estimate. */
1568 	diff = secs - in->conn_rate.last;
1569 	if (diff >= in->conn_rate.seconds)
1570 		out->conn_rate.count = 0;
1571 	else
1572 		out->conn_rate.count -=
1573 		    in->conn_rate.count * diff /
1574 		    in->conn_rate.seconds;
1575 }
1576 
1577 #ifdef ALTQ
1578 /*
1579  * Handle export of struct pf_kaltq to user binaries that may be using any
1580  * version of struct pf_altq.
1581  */
1582 static int
1583 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1584 {
1585 	u_int32_t version;
1586 
1587 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1588 		version = 0;
1589 	else
1590 		version = pa->version;
1591 
1592 	if (version > PFIOC_ALTQ_VERSION)
1593 		return (EINVAL);
1594 
1595 #define ASSIGN(x) exported_q->x = q->x
1596 #define COPY(x) \
1597 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1598 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1599 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1600 
1601 	switch (version) {
1602 	case 0: {
1603 		struct pf_altq_v0 *exported_q =
1604 		    &((struct pfioc_altq_v0 *)pa)->altq;
1605 
1606 		COPY(ifname);
1607 
1608 		ASSIGN(scheduler);
1609 		ASSIGN(tbrsize);
1610 		exported_q->tbrsize = SATU16(q->tbrsize);
1611 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1612 
1613 		COPY(qname);
1614 		COPY(parent);
1615 		ASSIGN(parent_qid);
1616 		exported_q->bandwidth = SATU32(q->bandwidth);
1617 		ASSIGN(priority);
1618 		ASSIGN(local_flags);
1619 
1620 		ASSIGN(qlimit);
1621 		ASSIGN(flags);
1622 
1623 		if (q->scheduler == ALTQT_HFSC) {
1624 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1625 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1626 			    SATU32(q->pq_u.hfsc_opts.x)
1627 
1628 			ASSIGN_OPT_SATU32(rtsc_m1);
1629 			ASSIGN_OPT(rtsc_d);
1630 			ASSIGN_OPT_SATU32(rtsc_m2);
1631 
1632 			ASSIGN_OPT_SATU32(lssc_m1);
1633 			ASSIGN_OPT(lssc_d);
1634 			ASSIGN_OPT_SATU32(lssc_m2);
1635 
1636 			ASSIGN_OPT_SATU32(ulsc_m1);
1637 			ASSIGN_OPT(ulsc_d);
1638 			ASSIGN_OPT_SATU32(ulsc_m2);
1639 
1640 			ASSIGN_OPT(flags);
1641 
1642 #undef ASSIGN_OPT
1643 #undef ASSIGN_OPT_SATU32
1644 		} else
1645 			COPY(pq_u);
1646 
1647 		ASSIGN(qid);
1648 		break;
1649 	}
1650 	case 1:	{
1651 		struct pf_altq_v1 *exported_q =
1652 		    &((struct pfioc_altq_v1 *)pa)->altq;
1653 
1654 		COPY(ifname);
1655 
1656 		ASSIGN(scheduler);
1657 		ASSIGN(tbrsize);
1658 		ASSIGN(ifbandwidth);
1659 
1660 		COPY(qname);
1661 		COPY(parent);
1662 		ASSIGN(parent_qid);
1663 		ASSIGN(bandwidth);
1664 		ASSIGN(priority);
1665 		ASSIGN(local_flags);
1666 
1667 		ASSIGN(qlimit);
1668 		ASSIGN(flags);
1669 		COPY(pq_u);
1670 
1671 		ASSIGN(qid);
1672 		break;
1673 	}
1674 	default:
1675 		panic("%s: unhandled struct pfioc_altq version", __func__);
1676 		break;
1677 	}
1678 
1679 #undef ASSIGN
1680 #undef COPY
1681 #undef SATU16
1682 #undef SATU32
1683 
1684 	return (0);
1685 }
1686 
1687 /*
1688  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1689  * that may be using any version of it.
1690  */
1691 static int
1692 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1693 {
1694 	u_int32_t version;
1695 
1696 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1697 		version = 0;
1698 	else
1699 		version = pa->version;
1700 
1701 	if (version > PFIOC_ALTQ_VERSION)
1702 		return (EINVAL);
1703 
1704 #define ASSIGN(x) q->x = imported_q->x
1705 #define COPY(x) \
1706 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1707 
1708 	switch (version) {
1709 	case 0: {
1710 		struct pf_altq_v0 *imported_q =
1711 		    &((struct pfioc_altq_v0 *)pa)->altq;
1712 
1713 		COPY(ifname);
1714 
1715 		ASSIGN(scheduler);
1716 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1717 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1718 
1719 		COPY(qname);
1720 		COPY(parent);
1721 		ASSIGN(parent_qid);
1722 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1723 		ASSIGN(priority);
1724 		ASSIGN(local_flags);
1725 
1726 		ASSIGN(qlimit);
1727 		ASSIGN(flags);
1728 
1729 		if (imported_q->scheduler == ALTQT_HFSC) {
1730 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1731 
1732 			/*
1733 			 * The m1 and m2 parameters are being copied from
1734 			 * 32-bit to 64-bit.
1735 			 */
1736 			ASSIGN_OPT(rtsc_m1);
1737 			ASSIGN_OPT(rtsc_d);
1738 			ASSIGN_OPT(rtsc_m2);
1739 
1740 			ASSIGN_OPT(lssc_m1);
1741 			ASSIGN_OPT(lssc_d);
1742 			ASSIGN_OPT(lssc_m2);
1743 
1744 			ASSIGN_OPT(ulsc_m1);
1745 			ASSIGN_OPT(ulsc_d);
1746 			ASSIGN_OPT(ulsc_m2);
1747 
1748 			ASSIGN_OPT(flags);
1749 
1750 #undef ASSIGN_OPT
1751 		} else
1752 			COPY(pq_u);
1753 
1754 		ASSIGN(qid);
1755 		break;
1756 	}
1757 	case 1: {
1758 		struct pf_altq_v1 *imported_q =
1759 		    &((struct pfioc_altq_v1 *)pa)->altq;
1760 
1761 		COPY(ifname);
1762 
1763 		ASSIGN(scheduler);
1764 		ASSIGN(tbrsize);
1765 		ASSIGN(ifbandwidth);
1766 
1767 		COPY(qname);
1768 		COPY(parent);
1769 		ASSIGN(parent_qid);
1770 		ASSIGN(bandwidth);
1771 		ASSIGN(priority);
1772 		ASSIGN(local_flags);
1773 
1774 		ASSIGN(qlimit);
1775 		ASSIGN(flags);
1776 		COPY(pq_u);
1777 
1778 		ASSIGN(qid);
1779 		break;
1780 	}
1781 	default:
1782 		panic("%s: unhandled struct pfioc_altq version", __func__);
1783 		break;
1784 	}
1785 
1786 #undef ASSIGN
1787 #undef COPY
1788 
1789 	return (0);
1790 }
1791 
1792 static struct pf_altq *
1793 pf_altq_get_nth_active(u_int32_t n)
1794 {
1795 	struct pf_altq		*altq;
1796 	u_int32_t		 nr;
1797 
1798 	nr = 0;
1799 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1800 		if (nr == n)
1801 			return (altq);
1802 		nr++;
1803 	}
1804 
1805 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1806 		if (nr == n)
1807 			return (altq);
1808 		nr++;
1809 	}
1810 
1811 	return (NULL);
1812 }
1813 #endif /* ALTQ */
1814 
1815 struct pf_krule *
1816 pf_krule_alloc(void)
1817 {
1818 	struct pf_krule *rule;
1819 
1820 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1821 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1822 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1823 	    M_WAITOK | M_ZERO);
1824 	return (rule);
1825 }
1826 
1827 void
1828 pf_krule_free(struct pf_krule *rule)
1829 {
1830 #ifdef PF_WANT_32_TO_64_COUNTER
1831 	bool wowned;
1832 #endif
1833 
1834 	if (rule == NULL)
1835 		return;
1836 
1837 #ifdef PF_WANT_32_TO_64_COUNTER
1838 	if (rule->allrulelinked) {
1839 		wowned = PF_RULES_WOWNED();
1840 		if (!wowned)
1841 			PF_RULES_WLOCK();
1842 		LIST_REMOVE(rule, allrulelist);
1843 		V_pf_allrulecount--;
1844 		if (!wowned)
1845 			PF_RULES_WUNLOCK();
1846 	}
1847 #endif
1848 
1849 	pf_counter_u64_deinit(&rule->evaluations);
1850 	for (int i = 0; i < 2; i++) {
1851 		pf_counter_u64_deinit(&rule->packets[i]);
1852 		pf_counter_u64_deinit(&rule->bytes[i]);
1853 	}
1854 	counter_u64_free(rule->states_cur);
1855 	counter_u64_free(rule->states_tot);
1856 	counter_u64_free(rule->src_nodes);
1857 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1858 
1859 	mtx_destroy(&rule->rpool.mtx);
1860 	free(rule, M_PFRULE);
1861 }
1862 
1863 static void
1864 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1865     struct pf_pooladdr *pool)
1866 {
1867 
1868 	bzero(pool, sizeof(*pool));
1869 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1870 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1871 }
1872 
1873 static int
1874 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1875     struct pf_kpooladdr *kpool)
1876 {
1877 	int ret;
1878 
1879 	bzero(kpool, sizeof(*kpool));
1880 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1881 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1882 	    sizeof(kpool->ifname));
1883 	return (ret);
1884 }
1885 
1886 static void
1887 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1888 {
1889 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1890 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1891 
1892 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1893 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1894 
1895 	kpool->tblidx = pool->tblidx;
1896 	kpool->proxy_port[0] = pool->proxy_port[0];
1897 	kpool->proxy_port[1] = pool->proxy_port[1];
1898 	kpool->opts = pool->opts;
1899 }
1900 
1901 static int
1902 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1903 {
1904 	int ret;
1905 
1906 #ifndef INET
1907 	if (rule->af == AF_INET) {
1908 		return (EAFNOSUPPORT);
1909 	}
1910 #endif /* INET */
1911 #ifndef INET6
1912 	if (rule->af == AF_INET6) {
1913 		return (EAFNOSUPPORT);
1914 	}
1915 #endif /* INET6 */
1916 
1917 	ret = pf_check_rule_addr(&rule->src);
1918 	if (ret != 0)
1919 		return (ret);
1920 	ret = pf_check_rule_addr(&rule->dst);
1921 	if (ret != 0)
1922 		return (ret);
1923 
1924 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1925 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1926 
1927 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1928 	if (ret != 0)
1929 		return (ret);
1930 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1931 	if (ret != 0)
1932 		return (ret);
1933 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1934 	if (ret != 0)
1935 		return (ret);
1936 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1937 	if (ret != 0)
1938 		return (ret);
1939 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1940 	    sizeof(rule->tagname));
1941 	if (ret != 0)
1942 		return (ret);
1943 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1944 	    sizeof(rule->match_tagname));
1945 	if (ret != 0)
1946 		return (ret);
1947 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1948 	    sizeof(rule->overload_tblname));
1949 	if (ret != 0)
1950 		return (ret);
1951 
1952 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1953 
1954 	/* Don't allow userspace to set evaluations, packets or bytes. */
1955 	/* kif, anchor, overload_tbl are not copied over. */
1956 
1957 	krule->os_fingerprint = rule->os_fingerprint;
1958 
1959 	krule->rtableid = rule->rtableid;
1960 	bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
1961 	krule->max_states = rule->max_states;
1962 	krule->max_src_nodes = rule->max_src_nodes;
1963 	krule->max_src_states = rule->max_src_states;
1964 	krule->max_src_conn = rule->max_src_conn;
1965 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1966 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1967 	krule->qid = rule->qid;
1968 	krule->pqid = rule->pqid;
1969 	krule->nr = rule->nr;
1970 	krule->prob = rule->prob;
1971 	krule->cuid = rule->cuid;
1972 	krule->cpid = rule->cpid;
1973 
1974 	krule->return_icmp = rule->return_icmp;
1975 	krule->return_icmp6 = rule->return_icmp6;
1976 	krule->max_mss = rule->max_mss;
1977 	krule->tag = rule->tag;
1978 	krule->match_tag = rule->match_tag;
1979 	krule->scrub_flags = rule->scrub_flags;
1980 
1981 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1982 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1983 
1984 	krule->rule_flag = rule->rule_flag;
1985 	krule->action = rule->action;
1986 	krule->direction = rule->direction;
1987 	krule->log = rule->log;
1988 	krule->logif = rule->logif;
1989 	krule->quick = rule->quick;
1990 	krule->ifnot = rule->ifnot;
1991 	krule->match_tag_not = rule->match_tag_not;
1992 	krule->natpass = rule->natpass;
1993 
1994 	krule->keep_state = rule->keep_state;
1995 	krule->af = rule->af;
1996 	krule->proto = rule->proto;
1997 	krule->type = rule->type;
1998 	krule->code = rule->code;
1999 	krule->flags = rule->flags;
2000 	krule->flagset = rule->flagset;
2001 	krule->min_ttl = rule->min_ttl;
2002 	krule->allow_opts = rule->allow_opts;
2003 	krule->rt = rule->rt;
2004 	krule->return_ttl = rule->return_ttl;
2005 	krule->tos = rule->tos;
2006 	krule->set_tos = rule->set_tos;
2007 
2008 	krule->flush = rule->flush;
2009 	krule->prio = rule->prio;
2010 	krule->set_prio[0] = rule->set_prio[0];
2011 	krule->set_prio[1] = rule->set_prio[1];
2012 
2013 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2014 
2015 	return (0);
2016 }
2017 
2018 static int
2019 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2020     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2021     struct thread *td)
2022 {
2023 	struct pf_kruleset	*ruleset;
2024 	struct pf_krule		*tail;
2025 	struct pf_kpooladdr	*pa;
2026 	struct pfi_kkif		*kif = NULL;
2027 	int			 rs_num;
2028 	int			 error = 0;
2029 
2030 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2031 		error = EINVAL;
2032 		goto errout_unlocked;
2033 	}
2034 
2035 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2036 
2037 	if (rule->ifname[0])
2038 		kif = pf_kkif_create(M_WAITOK);
2039 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2040 	for (int i = 0; i < 2; i++) {
2041 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2042 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2043 	}
2044 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2045 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2046 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2047 	rule->cuid = td->td_ucred->cr_ruid;
2048 	rule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
2049 	TAILQ_INIT(&rule->rpool.list);
2050 
2051 	PF_CONFIG_LOCK();
2052 	PF_RULES_WLOCK();
2053 #ifdef PF_WANT_32_TO_64_COUNTER
2054 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2055 	MPASS(!rule->allrulelinked);
2056 	rule->allrulelinked = true;
2057 	V_pf_allrulecount++;
2058 #endif
2059 	ruleset = pf_find_kruleset(anchor);
2060 	if (ruleset == NULL)
2061 		ERROUT(EINVAL);
2062 	rs_num = pf_get_ruleset_number(rule->action);
2063 	if (rs_num >= PF_RULESET_MAX)
2064 		ERROUT(EINVAL);
2065 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2066 		DPFPRINTF(PF_DEBUG_MISC,
2067 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2068 		    ruleset->rules[rs_num].inactive.ticket));
2069 		ERROUT(EBUSY);
2070 	}
2071 	if (pool_ticket != V_ticket_pabuf) {
2072 		DPFPRINTF(PF_DEBUG_MISC,
2073 		    ("pool_ticket: %d != %d\n", pool_ticket,
2074 		    V_ticket_pabuf));
2075 		ERROUT(EBUSY);
2076 	}
2077 	/*
2078 	 * XXXMJG hack: there is no mechanism to ensure they started the
2079 	 * transaction. Ticket checked above may happen to match by accident,
2080 	 * even if nobody called DIOCXBEGIN, let alone this process.
2081 	 * Partially work around it by checking if the RB tree got allocated,
2082 	 * see pf_begin_rules.
2083 	 */
2084 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2085 		ERROUT(EINVAL);
2086 	}
2087 
2088 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2089 	    pf_krulequeue);
2090 	if (tail)
2091 		rule->nr = tail->nr + 1;
2092 	else
2093 		rule->nr = 0;
2094 	if (rule->ifname[0]) {
2095 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2096 		kif = NULL;
2097 		pfi_kkif_ref(rule->kif);
2098 	} else
2099 		rule->kif = NULL;
2100 
2101 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2102 		error = EBUSY;
2103 
2104 #ifdef ALTQ
2105 	/* set queue IDs */
2106 	if (rule->qname[0] != 0) {
2107 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2108 			error = EBUSY;
2109 		else if (rule->pqname[0] != 0) {
2110 			if ((rule->pqid =
2111 			    pf_qname2qid(rule->pqname)) == 0)
2112 				error = EBUSY;
2113 		} else
2114 			rule->pqid = rule->qid;
2115 	}
2116 #endif
2117 	if (rule->tagname[0])
2118 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2119 			error = EBUSY;
2120 	if (rule->match_tagname[0])
2121 		if ((rule->match_tag =
2122 		    pf_tagname2tag(rule->match_tagname)) == 0)
2123 			error = EBUSY;
2124 	if (rule->rt && !rule->direction)
2125 		error = EINVAL;
2126 	if (!rule->log)
2127 		rule->logif = 0;
2128 	if (rule->logif >= PFLOGIFS_MAX)
2129 		error = EINVAL;
2130 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2131 		error = ENOMEM;
2132 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2133 		error = ENOMEM;
2134 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2135 		error = EINVAL;
2136 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2137 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2138 	    rule->set_prio[1] > PF_PRIO_MAX))
2139 		error = EINVAL;
2140 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2141 		if (pa->addr.type == PF_ADDR_TABLE) {
2142 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2143 			    pa->addr.v.tblname);
2144 			if (pa->addr.p.tbl == NULL)
2145 				error = ENOMEM;
2146 		}
2147 
2148 	rule->overload_tbl = NULL;
2149 	if (rule->overload_tblname[0]) {
2150 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2151 		    rule->overload_tblname)) == NULL)
2152 			error = EINVAL;
2153 		else
2154 			rule->overload_tbl->pfrkt_flags |=
2155 			    PFR_TFLAG_ACTIVE;
2156 	}
2157 
2158 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2159 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2160 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2161 	    (rule->rt > PF_NOPFROUTE)) &&
2162 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2163 		error = EINVAL;
2164 
2165 	if (error) {
2166 		pf_free_rule(rule);
2167 		rule = NULL;
2168 		ERROUT(error);
2169 	}
2170 
2171 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2172 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2173 	    rule, entries);
2174 	ruleset->rules[rs_num].inactive.rcount++;
2175 
2176 	PF_RULES_WUNLOCK();
2177 	pf_hash_rule(rule);
2178 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2179 		PF_RULES_WLOCK();
2180 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2181 		ruleset->rules[rs_num].inactive.rcount--;
2182 		pf_free_rule(rule);
2183 		rule = NULL;
2184 		ERROUT(EEXIST);
2185 	}
2186 	PF_CONFIG_UNLOCK();
2187 
2188 	return (0);
2189 
2190 #undef ERROUT
2191 errout:
2192 	PF_RULES_WUNLOCK();
2193 	PF_CONFIG_UNLOCK();
2194 errout_unlocked:
2195 	pf_kkif_free(kif);
2196 	pf_krule_free(rule);
2197 	return (error);
2198 }
2199 
2200 static bool
2201 pf_label_match(const struct pf_krule *rule, const char *label)
2202 {
2203 	int i = 0;
2204 
2205 	while (*rule->label[i]) {
2206 		if (strcmp(rule->label[i], label) == 0)
2207 			return (true);
2208 		i++;
2209 	}
2210 
2211 	return (false);
2212 }
2213 
2214 static unsigned int
2215 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2216 {
2217 	struct pf_kstate *s;
2218 	int more = 0;
2219 
2220 	s = pf_find_state_all(key, dir, &more);
2221 	if (s == NULL)
2222 		return (0);
2223 
2224 	if (more) {
2225 		PF_STATE_UNLOCK(s);
2226 		return (0);
2227 	}
2228 
2229 	pf_unlink_state(s);
2230 	return (1);
2231 }
2232 
2233 static int
2234 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2235 {
2236 	struct pf_kstate	*s;
2237 	struct pf_state_key	*sk;
2238 	struct pf_addr		*srcaddr, *dstaddr;
2239 	struct pf_state_key_cmp	 match_key;
2240 	int			 idx, killed = 0;
2241 	unsigned int		 dir;
2242 	u_int16_t		 srcport, dstport;
2243 	struct pfi_kkif		*kif;
2244 
2245 relock_DIOCKILLSTATES:
2246 	PF_HASHROW_LOCK(ih);
2247 	LIST_FOREACH(s, &ih->states, entry) {
2248 		/* For floating states look at the original kif. */
2249 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2250 
2251 		sk = s->key[PF_SK_WIRE];
2252 		if (s->direction == PF_OUT) {
2253 			srcaddr = &sk->addr[1];
2254 			dstaddr = &sk->addr[0];
2255 			srcport = sk->port[1];
2256 			dstport = sk->port[0];
2257 		} else {
2258 			srcaddr = &sk->addr[0];
2259 			dstaddr = &sk->addr[1];
2260 			srcport = sk->port[0];
2261 			dstport = sk->port[1];
2262 		}
2263 
2264 		if (psk->psk_af && sk->af != psk->psk_af)
2265 			continue;
2266 
2267 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2268 			continue;
2269 
2270 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2271 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2272 			continue;
2273 
2274 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2275 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2276 			continue;
2277 
2278 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2279 		    &psk->psk_rt_addr.addr.v.a.addr,
2280 		    &psk->psk_rt_addr.addr.v.a.mask,
2281 		    &s->rt_addr, sk->af))
2282 			continue;
2283 
2284 		if (psk->psk_src.port_op != 0 &&
2285 		    ! pf_match_port(psk->psk_src.port_op,
2286 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2287 			continue;
2288 
2289 		if (psk->psk_dst.port_op != 0 &&
2290 		    ! pf_match_port(psk->psk_dst.port_op,
2291 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2292 			continue;
2293 
2294 		if (psk->psk_label[0] &&
2295 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2296 			continue;
2297 
2298 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2299 		    kif->pfik_name))
2300 			continue;
2301 
2302 		if (psk->psk_kill_match) {
2303 			/* Create the key to find matching states, with lock
2304 			 * held. */
2305 
2306 			bzero(&match_key, sizeof(match_key));
2307 
2308 			if (s->direction == PF_OUT) {
2309 				dir = PF_IN;
2310 				idx = PF_SK_STACK;
2311 			} else {
2312 				dir = PF_OUT;
2313 				idx = PF_SK_WIRE;
2314 			}
2315 
2316 			match_key.af = s->key[idx]->af;
2317 			match_key.proto = s->key[idx]->proto;
2318 			PF_ACPY(&match_key.addr[0],
2319 			    &s->key[idx]->addr[1], match_key.af);
2320 			match_key.port[0] = s->key[idx]->port[1];
2321 			PF_ACPY(&match_key.addr[1],
2322 			    &s->key[idx]->addr[0], match_key.af);
2323 			match_key.port[1] = s->key[idx]->port[0];
2324 		}
2325 
2326 		pf_unlink_state(s);
2327 		killed++;
2328 
2329 		if (psk->psk_kill_match)
2330 			killed += pf_kill_matching_state(&match_key, dir);
2331 
2332 		goto relock_DIOCKILLSTATES;
2333 	}
2334 	PF_HASHROW_UNLOCK(ih);
2335 
2336 	return (killed);
2337 }
2338 
2339 static int
2340 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2341 {
2342 	int			 error = 0;
2343 	PF_RULES_RLOCK_TRACKER;
2344 
2345 #define	ERROUT_IOCTL(target, x)					\
2346     do {								\
2347 	    error = (x);						\
2348 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2349 	    goto target;						\
2350     } while (0)
2351 
2352 
2353 	/* XXX keep in sync with switch() below */
2354 	if (securelevel_gt(td->td_ucred, 2))
2355 		switch (cmd) {
2356 		case DIOCGETRULES:
2357 		case DIOCGETRULENV:
2358 		case DIOCGETADDRS:
2359 		case DIOCGETADDR:
2360 		case DIOCGETSTATE:
2361 		case DIOCGETSTATENV:
2362 		case DIOCSETSTATUSIF:
2363 		case DIOCGETSTATUSNV:
2364 		case DIOCCLRSTATUS:
2365 		case DIOCNATLOOK:
2366 		case DIOCSETDEBUG:
2367 		case DIOCGETSTATES:
2368 		case DIOCGETSTATESV2:
2369 		case DIOCGETTIMEOUT:
2370 		case DIOCCLRRULECTRS:
2371 		case DIOCGETLIMIT:
2372 		case DIOCGETALTQSV0:
2373 		case DIOCGETALTQSV1:
2374 		case DIOCGETALTQV0:
2375 		case DIOCGETALTQV1:
2376 		case DIOCGETQSTATSV0:
2377 		case DIOCGETQSTATSV1:
2378 		case DIOCGETRULESETS:
2379 		case DIOCGETRULESET:
2380 		case DIOCRGETTABLES:
2381 		case DIOCRGETTSTATS:
2382 		case DIOCRCLRTSTATS:
2383 		case DIOCRCLRADDRS:
2384 		case DIOCRADDADDRS:
2385 		case DIOCRDELADDRS:
2386 		case DIOCRSETADDRS:
2387 		case DIOCRGETADDRS:
2388 		case DIOCRGETASTATS:
2389 		case DIOCRCLRASTATS:
2390 		case DIOCRTSTADDRS:
2391 		case DIOCOSFPGET:
2392 		case DIOCGETSRCNODES:
2393 		case DIOCCLRSRCNODES:
2394 		case DIOCGETSYNCOOKIES:
2395 		case DIOCIGETIFACES:
2396 		case DIOCGIFSPEEDV0:
2397 		case DIOCGIFSPEEDV1:
2398 		case DIOCSETIFFLAG:
2399 		case DIOCCLRIFFLAG:
2400 		case DIOCGETETHRULES:
2401 		case DIOCGETETHRULE:
2402 		case DIOCGETETHRULESETS:
2403 		case DIOCGETETHRULESET:
2404 			break;
2405 		case DIOCRCLRTABLES:
2406 		case DIOCRADDTABLES:
2407 		case DIOCRDELTABLES:
2408 		case DIOCRSETTFLAGS:
2409 			if (((struct pfioc_table *)addr)->pfrio_flags &
2410 			    PFR_FLAG_DUMMY)
2411 				break; /* dummy operation ok */
2412 			return (EPERM);
2413 		default:
2414 			return (EPERM);
2415 		}
2416 
2417 	if (!(flags & FWRITE))
2418 		switch (cmd) {
2419 		case DIOCGETRULES:
2420 		case DIOCGETADDRS:
2421 		case DIOCGETADDR:
2422 		case DIOCGETSTATE:
2423 		case DIOCGETSTATENV:
2424 		case DIOCGETSTATUSNV:
2425 		case DIOCGETSTATES:
2426 		case DIOCGETSTATESV2:
2427 		case DIOCGETTIMEOUT:
2428 		case DIOCGETLIMIT:
2429 		case DIOCGETALTQSV0:
2430 		case DIOCGETALTQSV1:
2431 		case DIOCGETALTQV0:
2432 		case DIOCGETALTQV1:
2433 		case DIOCGETQSTATSV0:
2434 		case DIOCGETQSTATSV1:
2435 		case DIOCGETRULESETS:
2436 		case DIOCGETRULESET:
2437 		case DIOCNATLOOK:
2438 		case DIOCRGETTABLES:
2439 		case DIOCRGETTSTATS:
2440 		case DIOCRGETADDRS:
2441 		case DIOCRGETASTATS:
2442 		case DIOCRTSTADDRS:
2443 		case DIOCOSFPGET:
2444 		case DIOCGETSRCNODES:
2445 		case DIOCGETSYNCOOKIES:
2446 		case DIOCIGETIFACES:
2447 		case DIOCGIFSPEEDV1:
2448 		case DIOCGIFSPEEDV0:
2449 		case DIOCGETRULENV:
2450 		case DIOCGETETHRULES:
2451 		case DIOCGETETHRULE:
2452 		case DIOCGETETHRULESETS:
2453 		case DIOCGETETHRULESET:
2454 			break;
2455 		case DIOCRCLRTABLES:
2456 		case DIOCRADDTABLES:
2457 		case DIOCRDELTABLES:
2458 		case DIOCRCLRTSTATS:
2459 		case DIOCRCLRADDRS:
2460 		case DIOCRADDADDRS:
2461 		case DIOCRDELADDRS:
2462 		case DIOCRSETADDRS:
2463 		case DIOCRSETTFLAGS:
2464 			if (((struct pfioc_table *)addr)->pfrio_flags &
2465 			    PFR_FLAG_DUMMY) {
2466 				flags |= FWRITE; /* need write lock for dummy */
2467 				break; /* dummy operation ok */
2468 			}
2469 			return (EACCES);
2470 		default:
2471 			return (EACCES);
2472 		}
2473 
2474 	CURVNET_SET(TD_TO_VNET(td));
2475 
2476 	switch (cmd) {
2477 	case DIOCSTART:
2478 		sx_xlock(&V_pf_ioctl_lock);
2479 		if (V_pf_status.running)
2480 			error = EEXIST;
2481 		else {
2482 			hook_pf();
2483 			if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2484 				hook_pf_eth();
2485 			V_pf_status.running = 1;
2486 			V_pf_status.since = time_second;
2487 			new_unrhdr64(&V_pf_stateid, time_second);
2488 
2489 			DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2490 		}
2491 		break;
2492 
2493 	case DIOCSTOP:
2494 		sx_xlock(&V_pf_ioctl_lock);
2495 		if (!V_pf_status.running)
2496 			error = ENOENT;
2497 		else {
2498 			V_pf_status.running = 0;
2499 			dehook_pf();
2500 			dehook_pf_eth();
2501 			V_pf_status.since = time_second;
2502 			DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2503 		}
2504 		break;
2505 
2506 	case DIOCGETETHRULES: {
2507 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2508 		nvlist_t		*nvl;
2509 		void			*packed;
2510 		struct pf_keth_rule	*tail;
2511 		struct pf_keth_ruleset	*rs;
2512 		u_int32_t		 ticket, nr;
2513 		const char		*anchor = "";
2514 
2515 		nvl = NULL;
2516 		packed = NULL;
2517 
2518 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2519 
2520 		if (nv->len > pf_ioctl_maxcount)
2521 			ERROUT(ENOMEM);
2522 
2523 		/* Copy the request in */
2524 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2525 		if (packed == NULL)
2526 			ERROUT(ENOMEM);
2527 
2528 		error = copyin(nv->data, packed, nv->len);
2529 		if (error)
2530 			ERROUT(error);
2531 
2532 		nvl = nvlist_unpack(packed, nv->len, 0);
2533 		if (nvl == NULL)
2534 			ERROUT(EBADMSG);
2535 
2536 		if (! nvlist_exists_string(nvl, "anchor"))
2537 			ERROUT(EBADMSG);
2538 
2539 		anchor = nvlist_get_string(nvl, "anchor");
2540 
2541 		rs = pf_find_keth_ruleset(anchor);
2542 
2543 		nvlist_destroy(nvl);
2544 		nvl = NULL;
2545 		free(packed, M_NVLIST);
2546 		packed = NULL;
2547 
2548 		if (rs == NULL)
2549 			ERROUT(ENOENT);
2550 
2551 		/* Reply */
2552 		nvl = nvlist_create(0);
2553 		if (nvl == NULL)
2554 			ERROUT(ENOMEM);
2555 
2556 		PF_RULES_RLOCK();
2557 
2558 		ticket = rs->active.ticket;
2559 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2560 		if (tail)
2561 			nr = tail->nr + 1;
2562 		else
2563 			nr = 0;
2564 
2565 		PF_RULES_RUNLOCK();
2566 
2567 		nvlist_add_number(nvl, "ticket", ticket);
2568 		nvlist_add_number(nvl, "nr", nr);
2569 
2570 		packed = nvlist_pack(nvl, &nv->len);
2571 		if (packed == NULL)
2572 			ERROUT(ENOMEM);
2573 
2574 		if (nv->size == 0)
2575 			ERROUT(0);
2576 		else if (nv->size < nv->len)
2577 			ERROUT(ENOSPC);
2578 
2579 		error = copyout(packed, nv->data, nv->len);
2580 
2581 #undef ERROUT
2582 DIOCGETETHRULES_error:
2583 		free(packed, M_NVLIST);
2584 		nvlist_destroy(nvl);
2585 		break;
2586 	}
2587 
2588 	case DIOCGETETHRULE: {
2589 		struct epoch_tracker	 et;
2590 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2591 		nvlist_t		*nvl = NULL;
2592 		void			*nvlpacked = NULL;
2593 		struct pf_keth_rule	*rule = NULL;
2594 		struct pf_keth_ruleset	*rs;
2595 		u_int32_t		 ticket, nr;
2596 		bool			 clear = false;
2597 		const char		*anchor;
2598 
2599 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2600 
2601 		if (nv->len > pf_ioctl_maxcount)
2602 			ERROUT(ENOMEM);
2603 
2604 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2605 		if (nvlpacked == NULL)
2606 			ERROUT(ENOMEM);
2607 
2608 		error = copyin(nv->data, nvlpacked, nv->len);
2609 		if (error)
2610 			ERROUT(error);
2611 
2612 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2613 		if (nvl == NULL)
2614 			ERROUT(EBADMSG);
2615 		if (! nvlist_exists_number(nvl, "ticket"))
2616 			ERROUT(EBADMSG);
2617 		ticket = nvlist_get_number(nvl, "ticket");
2618 		if (! nvlist_exists_string(nvl, "anchor"))
2619 			ERROUT(EBADMSG);
2620 		anchor = nvlist_get_string(nvl, "anchor");
2621 
2622 		if (nvlist_exists_bool(nvl, "clear"))
2623 			clear = nvlist_get_bool(nvl, "clear");
2624 
2625 		if (clear && !(flags & FWRITE))
2626 			ERROUT(EACCES);
2627 
2628 		if (! nvlist_exists_number(nvl, "nr"))
2629 			ERROUT(EBADMSG);
2630 		nr = nvlist_get_number(nvl, "nr");
2631 
2632 		PF_RULES_RLOCK();
2633 		rs = pf_find_keth_ruleset(anchor);
2634 		if (rs == NULL) {
2635 			PF_RULES_RUNLOCK();
2636 			ERROUT(ENOENT);
2637 		}
2638 		if (ticket != rs->active.ticket) {
2639 			PF_RULES_RUNLOCK();
2640 			ERROUT(EBUSY);
2641 		}
2642 
2643 		nvlist_destroy(nvl);
2644 		nvl = NULL;
2645 		free(nvlpacked, M_NVLIST);
2646 		nvlpacked = NULL;
2647 
2648 		rule = TAILQ_FIRST(rs->active.rules);
2649 		while ((rule != NULL) && (rule->nr != nr))
2650 			rule = TAILQ_NEXT(rule, entries);
2651 		if (rule == NULL) {
2652 			PF_RULES_RUNLOCK();
2653 			ERROUT(ENOENT);
2654 		}
2655 		/* Make sure rule can't go away. */
2656 		NET_EPOCH_ENTER(et);
2657 		PF_RULES_RUNLOCK();
2658 		nvl = pf_keth_rule_to_nveth_rule(rule);
2659 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2660 			ERROUT(EBUSY);
2661 		NET_EPOCH_EXIT(et);
2662 		if (nvl == NULL)
2663 			ERROUT(ENOMEM);
2664 
2665 		nvlpacked = nvlist_pack(nvl, &nv->len);
2666 		if (nvlpacked == NULL)
2667 			ERROUT(ENOMEM);
2668 
2669 		if (nv->size == 0)
2670 			ERROUT(0);
2671 		else if (nv->size < nv->len)
2672 			ERROUT(ENOSPC);
2673 
2674 		error = copyout(nvlpacked, nv->data, nv->len);
2675 		if (error == 0 && clear) {
2676 			counter_u64_zero(rule->evaluations);
2677 			for (int i = 0; i < 2; i++) {
2678 				counter_u64_zero(rule->packets[i]);
2679 				counter_u64_zero(rule->bytes[i]);
2680 			}
2681 		}
2682 
2683 #undef ERROUT
2684 DIOCGETETHRULE_error:
2685 		free(nvlpacked, M_NVLIST);
2686 		nvlist_destroy(nvl);
2687 		break;
2688 	}
2689 
2690 	case DIOCADDETHRULE: {
2691 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2692 		nvlist_t		*nvl = NULL;
2693 		void			*nvlpacked = NULL;
2694 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2695 		struct pf_keth_ruleset	*ruleset = NULL;
2696 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2697 		const char		*anchor = "", *anchor_call = "";
2698 
2699 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2700 
2701 		if (nv->len > pf_ioctl_maxcount)
2702 			ERROUT(ENOMEM);
2703 
2704 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2705 		if (nvlpacked == NULL)
2706 			ERROUT(ENOMEM);
2707 
2708 		error = copyin(nv->data, nvlpacked, nv->len);
2709 		if (error)
2710 			ERROUT(error);
2711 
2712 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2713 		if (nvl == NULL)
2714 			ERROUT(EBADMSG);
2715 
2716 		if (! nvlist_exists_number(nvl, "ticket"))
2717 			ERROUT(EBADMSG);
2718 
2719 		if (nvlist_exists_string(nvl, "anchor"))
2720 			anchor = nvlist_get_string(nvl, "anchor");
2721 		if (nvlist_exists_string(nvl, "anchor_call"))
2722 			anchor_call = nvlist_get_string(nvl, "anchor_call");
2723 
2724 		ruleset = pf_find_keth_ruleset(anchor);
2725 		if (ruleset == NULL)
2726 			ERROUT(EINVAL);
2727 
2728 		if (nvlist_get_number(nvl, "ticket") !=
2729 		    ruleset->inactive.ticket) {
2730 			DPFPRINTF(PF_DEBUG_MISC,
2731 			    ("ticket: %d != %d\n",
2732 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
2733 			    ruleset->inactive.ticket));
2734 			ERROUT(EBUSY);
2735 		}
2736 
2737 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2738 		if (rule == NULL)
2739 			ERROUT(ENOMEM);
2740 		rule->timestamp = NULL;
2741 
2742 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
2743 		if (error != 0)
2744 			ERROUT(error);
2745 
2746 		if (rule->ifname[0])
2747 			kif = pf_kkif_create(M_WAITOK);
2748 		if (rule->bridge_to_name[0])
2749 			bridge_to_kif = pf_kkif_create(M_WAITOK);
2750 		rule->evaluations = counter_u64_alloc(M_WAITOK);
2751 		for (int i = 0; i < 2; i++) {
2752 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
2753 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2754 		}
2755 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2756 		    M_WAITOK | M_ZERO);
2757 
2758 		PF_RULES_WLOCK();
2759 
2760 		if (rule->ifname[0]) {
2761 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
2762 			pfi_kkif_ref(rule->kif);
2763 		} else
2764 			rule->kif = NULL;
2765 		if (rule->bridge_to_name[0]) {
2766 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
2767 			    rule->bridge_to_name);
2768 			pfi_kkif_ref(rule->bridge_to);
2769 		} else
2770 			rule->bridge_to = NULL;
2771 
2772 #ifdef ALTQ
2773 		/* set queue IDs */
2774 		if (rule->qname[0] != 0) {
2775 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2776 				error = EBUSY;
2777 			else
2778 				rule->qid = rule->qid;
2779 		}
2780 #endif
2781 		if (rule->tagname[0])
2782 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2783 				error = EBUSY;
2784 		if (rule->match_tagname[0])
2785 			if ((rule->match_tag = pf_tagname2tag(
2786 			    rule->match_tagname)) == 0)
2787 				error = EBUSY;
2788 
2789 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
2790 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
2791 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
2792 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
2793 
2794 		if (error) {
2795 			pf_free_eth_rule(rule);
2796 			PF_RULES_WUNLOCK();
2797 			ERROUT(error);
2798 		}
2799 
2800 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
2801 			pf_free_eth_rule(rule);
2802 			PF_RULES_WUNLOCK();
2803 			ERROUT(EINVAL);
2804 		}
2805 
2806 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
2807 		if (tail)
2808 			rule->nr = tail->nr + 1;
2809 		else
2810 			rule->nr = 0;
2811 
2812 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
2813 
2814 		PF_RULES_WUNLOCK();
2815 
2816 #undef ERROUT
2817 DIOCADDETHRULE_error:
2818 		nvlist_destroy(nvl);
2819 		free(nvlpacked, M_NVLIST);
2820 		break;
2821 	}
2822 
2823 	case DIOCGETETHRULESETS: {
2824 		struct epoch_tracker	 et;
2825 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2826 		nvlist_t		*nvl = NULL;
2827 		void			*nvlpacked = NULL;
2828 		struct pf_keth_ruleset	*ruleset;
2829 		struct pf_keth_anchor	*anchor;
2830 		int			 nr = 0;
2831 
2832 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
2833 
2834 		if (nv->len > pf_ioctl_maxcount)
2835 			ERROUT(ENOMEM);
2836 
2837 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2838 		if (nvlpacked == NULL)
2839 			ERROUT(ENOMEM);
2840 
2841 		error = copyin(nv->data, nvlpacked, nv->len);
2842 		if (error)
2843 			ERROUT(error);
2844 
2845 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2846 		if (nvl == NULL)
2847 			ERROUT(EBADMSG);
2848 		if (! nvlist_exists_string(nvl, "path"))
2849 			ERROUT(EBADMSG);
2850 
2851 		NET_EPOCH_ENTER(et);
2852 
2853 		if ((ruleset = pf_find_keth_ruleset(
2854 		    nvlist_get_string(nvl, "path"))) == NULL) {
2855 			NET_EPOCH_EXIT(et);
2856 			ERROUT(ENOENT);
2857 		}
2858 
2859 		if (ruleset->anchor == NULL) {
2860 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
2861 				if (anchor->parent == NULL)
2862 					nr++;
2863 		} else {
2864 			RB_FOREACH(anchor, pf_keth_anchor_node,
2865 			    &ruleset->anchor->children)
2866 				nr++;
2867 		}
2868 
2869 		NET_EPOCH_EXIT(et);
2870 
2871 		nvlist_destroy(nvl);
2872 		nvl = NULL;
2873 		free(nvlpacked, M_NVLIST);
2874 		nvlpacked = NULL;
2875 
2876 		nvl = nvlist_create(0);
2877 		if (nvl == NULL)
2878 			ERROUT(ENOMEM);
2879 
2880 		nvlist_add_number(nvl, "nr", nr);
2881 
2882 		nvlpacked = nvlist_pack(nvl, &nv->len);
2883 		if (nvlpacked == NULL)
2884 			ERROUT(ENOMEM);
2885 
2886 		if (nv->size == 0)
2887 			ERROUT(0);
2888 		else if (nv->size < nv->len)
2889 			ERROUT(ENOSPC);
2890 
2891 		error = copyout(nvlpacked, nv->data, nv->len);
2892 
2893 #undef ERROUT
2894 DIOCGETETHRULESETS_error:
2895 		free(nvlpacked, M_NVLIST);
2896 		nvlist_destroy(nvl);
2897 		break;
2898 	}
2899 
2900 	case DIOCGETETHRULESET: {
2901 		struct epoch_tracker	 et;
2902 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2903 		nvlist_t		*nvl = NULL;
2904 		void			*nvlpacked = NULL;
2905 		struct pf_keth_ruleset	*ruleset;
2906 		struct pf_keth_anchor	*anchor;
2907 		int			 nr = 0, req_nr = 0;
2908 		bool			 found = false;
2909 
2910 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
2911 
2912 		if (nv->len > pf_ioctl_maxcount)
2913 			ERROUT(ENOMEM);
2914 
2915 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2916 		if (nvlpacked == NULL)
2917 			ERROUT(ENOMEM);
2918 
2919 		error = copyin(nv->data, nvlpacked, nv->len);
2920 		if (error)
2921 			ERROUT(error);
2922 
2923 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2924 		if (nvl == NULL)
2925 			ERROUT(EBADMSG);
2926 		if (! nvlist_exists_string(nvl, "path"))
2927 			ERROUT(EBADMSG);
2928 		if (! nvlist_exists_number(nvl, "nr"))
2929 			ERROUT(EBADMSG);
2930 
2931 		req_nr = nvlist_get_number(nvl, "nr");
2932 
2933 		NET_EPOCH_ENTER(et);
2934 
2935 		if ((ruleset = pf_find_keth_ruleset(
2936 		    nvlist_get_string(nvl, "path"))) == NULL) {
2937 			NET_EPOCH_EXIT(et);
2938 			ERROUT(ENOENT);
2939 		}
2940 
2941 		nvlist_destroy(nvl);
2942 		nvl = NULL;
2943 		free(nvlpacked, M_NVLIST);
2944 		nvlpacked = NULL;
2945 
2946 		nvl = nvlist_create(0);
2947 		if (nvl == NULL) {
2948 			NET_EPOCH_EXIT(et);
2949 			ERROUT(ENOMEM);
2950 		}
2951 
2952 		if (ruleset->anchor == NULL) {
2953 			RB_FOREACH(anchor, pf_keth_anchor_global,
2954 			    &V_pf_keth_anchors) {
2955 				if (anchor->parent == NULL && nr++ == req_nr) {
2956 					found = true;
2957 					break;
2958 				}
2959 			}
2960 		} else {
2961 			RB_FOREACH(anchor, pf_keth_anchor_node,
2962 			     &ruleset->anchor->children) {
2963 				if (nr++ == req_nr) {
2964 					found = true;
2965 					break;
2966 				}
2967 			}
2968 		}
2969 
2970 		NET_EPOCH_EXIT(et);
2971 		if (found) {
2972 			nvlist_add_number(nvl, "nr", nr);
2973 			nvlist_add_string(nvl, "name", anchor->name);
2974 			if (ruleset->anchor)
2975 				nvlist_add_string(nvl, "path",
2976 				    ruleset->anchor->path);
2977 			else
2978 				nvlist_add_string(nvl, "path", "");
2979 		} else {
2980 			ERROUT(EBUSY);
2981 		}
2982 
2983 		nvlpacked = nvlist_pack(nvl, &nv->len);
2984 		if (nvlpacked == NULL)
2985 			ERROUT(ENOMEM);
2986 
2987 		if (nv->size == 0)
2988 			ERROUT(0);
2989 		else if (nv->size < nv->len)
2990 			ERROUT(ENOSPC);
2991 
2992 		error = copyout(nvlpacked, nv->data, nv->len);
2993 
2994 #undef ERROUT
2995 DIOCGETETHRULESET_error:
2996 		free(nvlpacked, M_NVLIST);
2997 		nvlist_destroy(nvl);
2998 		break;
2999 	}
3000 
3001 	case DIOCADDRULENV: {
3002 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3003 		nvlist_t	*nvl = NULL;
3004 		void		*nvlpacked = NULL;
3005 		struct pf_krule	*rule = NULL;
3006 		const char	*anchor = "", *anchor_call = "";
3007 		uint32_t	 ticket = 0, pool_ticket = 0;
3008 
3009 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3010 
3011 		if (nv->len > pf_ioctl_maxcount)
3012 			ERROUT(ENOMEM);
3013 
3014 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3015 		error = copyin(nv->data, nvlpacked, nv->len);
3016 		if (error)
3017 			ERROUT(error);
3018 
3019 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3020 		if (nvl == NULL)
3021 			ERROUT(EBADMSG);
3022 
3023 		if (! nvlist_exists_number(nvl, "ticket"))
3024 			ERROUT(EINVAL);
3025 		ticket = nvlist_get_number(nvl, "ticket");
3026 
3027 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3028 			ERROUT(EINVAL);
3029 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3030 
3031 		if (! nvlist_exists_nvlist(nvl, "rule"))
3032 			ERROUT(EINVAL);
3033 
3034 		rule = pf_krule_alloc();
3035 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3036 		    rule);
3037 		if (error)
3038 			ERROUT(error);
3039 
3040 		if (nvlist_exists_string(nvl, "anchor"))
3041 			anchor = nvlist_get_string(nvl, "anchor");
3042 		if (nvlist_exists_string(nvl, "anchor_call"))
3043 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3044 
3045 		if ((error = nvlist_error(nvl)))
3046 			ERROUT(error);
3047 
3048 		/* Frees rule on error */
3049 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3050 		    anchor_call, td);
3051 
3052 		nvlist_destroy(nvl);
3053 		free(nvlpacked, M_NVLIST);
3054 		break;
3055 #undef ERROUT
3056 DIOCADDRULENV_error:
3057 		pf_krule_free(rule);
3058 		nvlist_destroy(nvl);
3059 		free(nvlpacked, M_NVLIST);
3060 
3061 		break;
3062 	}
3063 	case DIOCADDRULE: {
3064 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3065 		struct pf_krule		*rule;
3066 
3067 		rule = pf_krule_alloc();
3068 		error = pf_rule_to_krule(&pr->rule, rule);
3069 		if (error != 0) {
3070 			pf_krule_free(rule);
3071 			break;
3072 		}
3073 
3074 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3075 
3076 		/* Frees rule on error */
3077 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3078 		    pr->anchor, pr->anchor_call, td);
3079 		break;
3080 	}
3081 
3082 	case DIOCGETRULES: {
3083 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3084 		struct pf_kruleset	*ruleset;
3085 		struct pf_krule		*tail;
3086 		int			 rs_num;
3087 
3088 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3089 
3090 		PF_RULES_WLOCK();
3091 		ruleset = pf_find_kruleset(pr->anchor);
3092 		if (ruleset == NULL) {
3093 			PF_RULES_WUNLOCK();
3094 			error = EINVAL;
3095 			break;
3096 		}
3097 		rs_num = pf_get_ruleset_number(pr->rule.action);
3098 		if (rs_num >= PF_RULESET_MAX) {
3099 			PF_RULES_WUNLOCK();
3100 			error = EINVAL;
3101 			break;
3102 		}
3103 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3104 		    pf_krulequeue);
3105 		if (tail)
3106 			pr->nr = tail->nr + 1;
3107 		else
3108 			pr->nr = 0;
3109 		pr->ticket = ruleset->rules[rs_num].active.ticket;
3110 		PF_RULES_WUNLOCK();
3111 		break;
3112 	}
3113 
3114 	case DIOCGETRULENV: {
3115 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3116 		nvlist_t		*nvrule = NULL;
3117 		nvlist_t		*nvl = NULL;
3118 		struct pf_kruleset	*ruleset;
3119 		struct pf_krule		*rule;
3120 		void			*nvlpacked = NULL;
3121 		int			 rs_num, nr;
3122 		bool			 clear_counter = false;
3123 
3124 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3125 
3126 		if (nv->len > pf_ioctl_maxcount)
3127 			ERROUT(ENOMEM);
3128 
3129 		/* Copy the request in */
3130 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3131 		if (nvlpacked == NULL)
3132 			ERROUT(ENOMEM);
3133 
3134 		error = copyin(nv->data, nvlpacked, nv->len);
3135 		if (error)
3136 			ERROUT(error);
3137 
3138 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3139 		if (nvl == NULL)
3140 			ERROUT(EBADMSG);
3141 
3142 		if (! nvlist_exists_string(nvl, "anchor"))
3143 			ERROUT(EBADMSG);
3144 		if (! nvlist_exists_number(nvl, "ruleset"))
3145 			ERROUT(EBADMSG);
3146 		if (! nvlist_exists_number(nvl, "ticket"))
3147 			ERROUT(EBADMSG);
3148 		if (! nvlist_exists_number(nvl, "nr"))
3149 			ERROUT(EBADMSG);
3150 
3151 		if (nvlist_exists_bool(nvl, "clear_counter"))
3152 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3153 
3154 		if (clear_counter && !(flags & FWRITE))
3155 			ERROUT(EACCES);
3156 
3157 		nr = nvlist_get_number(nvl, "nr");
3158 
3159 		PF_RULES_WLOCK();
3160 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3161 		if (ruleset == NULL) {
3162 			PF_RULES_WUNLOCK();
3163 			ERROUT(ENOENT);
3164 		}
3165 
3166 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3167 		if (rs_num >= PF_RULESET_MAX) {
3168 			PF_RULES_WUNLOCK();
3169 			ERROUT(EINVAL);
3170 		}
3171 
3172 		if (nvlist_get_number(nvl, "ticket") !=
3173 		    ruleset->rules[rs_num].active.ticket) {
3174 			PF_RULES_WUNLOCK();
3175 			ERROUT(EBUSY);
3176 		}
3177 
3178 		if ((error = nvlist_error(nvl))) {
3179 			PF_RULES_WUNLOCK();
3180 			ERROUT(error);
3181 		}
3182 
3183 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3184 		while ((rule != NULL) && (rule->nr != nr))
3185 			rule = TAILQ_NEXT(rule, entries);
3186 		if (rule == NULL) {
3187 			PF_RULES_WUNLOCK();
3188 			ERROUT(EBUSY);
3189 		}
3190 
3191 		nvrule = pf_krule_to_nvrule(rule);
3192 
3193 		nvlist_destroy(nvl);
3194 		nvl = nvlist_create(0);
3195 		if (nvl == NULL) {
3196 			PF_RULES_WUNLOCK();
3197 			ERROUT(ENOMEM);
3198 		}
3199 		nvlist_add_number(nvl, "nr", nr);
3200 		nvlist_add_nvlist(nvl, "rule", nvrule);
3201 		nvlist_destroy(nvrule);
3202 		nvrule = NULL;
3203 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3204 			PF_RULES_WUNLOCK();
3205 			ERROUT(EBUSY);
3206 		}
3207 
3208 		free(nvlpacked, M_NVLIST);
3209 		nvlpacked = nvlist_pack(nvl, &nv->len);
3210 		if (nvlpacked == NULL) {
3211 			PF_RULES_WUNLOCK();
3212 			ERROUT(ENOMEM);
3213 		}
3214 
3215 		if (nv->size == 0) {
3216 			PF_RULES_WUNLOCK();
3217 			ERROUT(0);
3218 		}
3219 		else if (nv->size < nv->len) {
3220 			PF_RULES_WUNLOCK();
3221 			ERROUT(ENOSPC);
3222 		}
3223 
3224 		if (clear_counter) {
3225 			pf_counter_u64_zero(&rule->evaluations);
3226 			for (int i = 0; i < 2; i++) {
3227 				pf_counter_u64_zero(&rule->packets[i]);
3228 				pf_counter_u64_zero(&rule->bytes[i]);
3229 			}
3230 			counter_u64_zero(rule->states_tot);
3231 		}
3232 		PF_RULES_WUNLOCK();
3233 
3234 		error = copyout(nvlpacked, nv->data, nv->len);
3235 
3236 #undef ERROUT
3237 DIOCGETRULENV_error:
3238 		free(nvlpacked, M_NVLIST);
3239 		nvlist_destroy(nvrule);
3240 		nvlist_destroy(nvl);
3241 
3242 		break;
3243 	}
3244 
3245 	case DIOCCHANGERULE: {
3246 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3247 		struct pf_kruleset	*ruleset;
3248 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3249 		struct pfi_kkif		*kif = NULL;
3250 		struct pf_kpooladdr	*pa;
3251 		u_int32_t		 nr = 0;
3252 		int			 rs_num;
3253 
3254 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3255 
3256 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3257 		    pcr->action > PF_CHANGE_GET_TICKET) {
3258 			error = EINVAL;
3259 			break;
3260 		}
3261 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3262 			error = EINVAL;
3263 			break;
3264 		}
3265 
3266 		if (pcr->action != PF_CHANGE_REMOVE) {
3267 			newrule = pf_krule_alloc();
3268 			error = pf_rule_to_krule(&pcr->rule, newrule);
3269 			if (error != 0) {
3270 				pf_krule_free(newrule);
3271 				break;
3272 			}
3273 
3274 			if (newrule->ifname[0])
3275 				kif = pf_kkif_create(M_WAITOK);
3276 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3277 			for (int i = 0; i < 2; i++) {
3278 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3279 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3280 			}
3281 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3282 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3283 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3284 			newrule->cuid = td->td_ucred->cr_ruid;
3285 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3286 			TAILQ_INIT(&newrule->rpool.list);
3287 		}
3288 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3289 
3290 		PF_CONFIG_LOCK();
3291 		PF_RULES_WLOCK();
3292 #ifdef PF_WANT_32_TO_64_COUNTER
3293 		if (newrule != NULL) {
3294 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3295 			newrule->allrulelinked = true;
3296 			V_pf_allrulecount++;
3297 		}
3298 #endif
3299 
3300 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3301 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3302 		    pcr->pool_ticket != V_ticket_pabuf)
3303 			ERROUT(EBUSY);
3304 
3305 		ruleset = pf_find_kruleset(pcr->anchor);
3306 		if (ruleset == NULL)
3307 			ERROUT(EINVAL);
3308 
3309 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3310 		if (rs_num >= PF_RULESET_MAX)
3311 			ERROUT(EINVAL);
3312 
3313 		/*
3314 		 * XXXMJG: there is no guarantee that the ruleset was
3315 		 * created by the usual route of calling DIOCXBEGIN.
3316 		 * As a result it is possible the rule tree will not
3317 		 * be allocated yet. Hack around it by doing it here.
3318 		 * Note it is fine to let the tree persist in case of
3319 		 * error as it will be freed down the road on future
3320 		 * updates (if need be).
3321 		 */
3322 		if (ruleset->rules[rs_num].active.tree == NULL) {
3323 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3324 			if (ruleset->rules[rs_num].active.tree == NULL) {
3325 				ERROUT(ENOMEM);
3326 			}
3327 		}
3328 
3329 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3330 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3331 			ERROUT(0);
3332 		} else if (pcr->ticket !=
3333 			    ruleset->rules[rs_num].active.ticket)
3334 				ERROUT(EINVAL);
3335 
3336 		if (pcr->action != PF_CHANGE_REMOVE) {
3337 			if (newrule->ifname[0]) {
3338 				newrule->kif = pfi_kkif_attach(kif,
3339 				    newrule->ifname);
3340 				kif = NULL;
3341 				pfi_kkif_ref(newrule->kif);
3342 			} else
3343 				newrule->kif = NULL;
3344 
3345 			if (newrule->rtableid > 0 &&
3346 			    newrule->rtableid >= rt_numfibs)
3347 				error = EBUSY;
3348 
3349 #ifdef ALTQ
3350 			/* set queue IDs */
3351 			if (newrule->qname[0] != 0) {
3352 				if ((newrule->qid =
3353 				    pf_qname2qid(newrule->qname)) == 0)
3354 					error = EBUSY;
3355 				else if (newrule->pqname[0] != 0) {
3356 					if ((newrule->pqid =
3357 					    pf_qname2qid(newrule->pqname)) == 0)
3358 						error = EBUSY;
3359 				} else
3360 					newrule->pqid = newrule->qid;
3361 			}
3362 #endif /* ALTQ */
3363 			if (newrule->tagname[0])
3364 				if ((newrule->tag =
3365 				    pf_tagname2tag(newrule->tagname)) == 0)
3366 					error = EBUSY;
3367 			if (newrule->match_tagname[0])
3368 				if ((newrule->match_tag = pf_tagname2tag(
3369 				    newrule->match_tagname)) == 0)
3370 					error = EBUSY;
3371 			if (newrule->rt && !newrule->direction)
3372 				error = EINVAL;
3373 			if (!newrule->log)
3374 				newrule->logif = 0;
3375 			if (newrule->logif >= PFLOGIFS_MAX)
3376 				error = EINVAL;
3377 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3378 				error = ENOMEM;
3379 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3380 				error = ENOMEM;
3381 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3382 				error = EINVAL;
3383 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3384 				if (pa->addr.type == PF_ADDR_TABLE) {
3385 					pa->addr.p.tbl =
3386 					    pfr_attach_table(ruleset,
3387 					    pa->addr.v.tblname);
3388 					if (pa->addr.p.tbl == NULL)
3389 						error = ENOMEM;
3390 				}
3391 
3392 			newrule->overload_tbl = NULL;
3393 			if (newrule->overload_tblname[0]) {
3394 				if ((newrule->overload_tbl = pfr_attach_table(
3395 				    ruleset, newrule->overload_tblname)) ==
3396 				    NULL)
3397 					error = EINVAL;
3398 				else
3399 					newrule->overload_tbl->pfrkt_flags |=
3400 					    PFR_TFLAG_ACTIVE;
3401 			}
3402 
3403 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3404 			if (((((newrule->action == PF_NAT) ||
3405 			    (newrule->action == PF_RDR) ||
3406 			    (newrule->action == PF_BINAT) ||
3407 			    (newrule->rt > PF_NOPFROUTE)) &&
3408 			    !newrule->anchor)) &&
3409 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3410 				error = EINVAL;
3411 
3412 			if (error) {
3413 				pf_free_rule(newrule);
3414 				PF_RULES_WUNLOCK();
3415 				PF_CONFIG_UNLOCK();
3416 				break;
3417 			}
3418 
3419 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3420 		}
3421 		pf_empty_kpool(&V_pf_pabuf);
3422 
3423 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3424 			oldrule = TAILQ_FIRST(
3425 			    ruleset->rules[rs_num].active.ptr);
3426 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3427 			oldrule = TAILQ_LAST(
3428 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3429 		else {
3430 			oldrule = TAILQ_FIRST(
3431 			    ruleset->rules[rs_num].active.ptr);
3432 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3433 				oldrule = TAILQ_NEXT(oldrule, entries);
3434 			if (oldrule == NULL) {
3435 				if (newrule != NULL)
3436 					pf_free_rule(newrule);
3437 				PF_RULES_WUNLOCK();
3438 				PF_CONFIG_UNLOCK();
3439 				error = EINVAL;
3440 				break;
3441 			}
3442 		}
3443 
3444 		if (pcr->action == PF_CHANGE_REMOVE) {
3445 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3446 			    oldrule);
3447 			RB_REMOVE(pf_krule_global,
3448 			    ruleset->rules[rs_num].active.tree, oldrule);
3449 			ruleset->rules[rs_num].active.rcount--;
3450 		} else {
3451 			pf_hash_rule(newrule);
3452 			if (RB_INSERT(pf_krule_global,
3453 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3454 				pf_free_rule(newrule);
3455 				PF_RULES_WUNLOCK();
3456 				PF_CONFIG_UNLOCK();
3457 				error = EEXIST;
3458 				break;
3459 			}
3460 
3461 			if (oldrule == NULL)
3462 				TAILQ_INSERT_TAIL(
3463 				    ruleset->rules[rs_num].active.ptr,
3464 				    newrule, entries);
3465 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3466 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3467 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3468 			else
3469 				TAILQ_INSERT_AFTER(
3470 				    ruleset->rules[rs_num].active.ptr,
3471 				    oldrule, newrule, entries);
3472 			ruleset->rules[rs_num].active.rcount++;
3473 		}
3474 
3475 		nr = 0;
3476 		TAILQ_FOREACH(oldrule,
3477 		    ruleset->rules[rs_num].active.ptr, entries)
3478 			oldrule->nr = nr++;
3479 
3480 		ruleset->rules[rs_num].active.ticket++;
3481 
3482 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3483 		pf_remove_if_empty_kruleset(ruleset);
3484 
3485 		PF_RULES_WUNLOCK();
3486 		PF_CONFIG_UNLOCK();
3487 		break;
3488 
3489 #undef ERROUT
3490 DIOCCHANGERULE_error:
3491 		PF_RULES_WUNLOCK();
3492 		PF_CONFIG_UNLOCK();
3493 		pf_krule_free(newrule);
3494 		pf_kkif_free(kif);
3495 		break;
3496 	}
3497 
3498 	case DIOCCLRSTATESNV: {
3499 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3500 		break;
3501 	}
3502 
3503 	case DIOCKILLSTATESNV: {
3504 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3505 		break;
3506 	}
3507 
3508 	case DIOCADDSTATE: {
3509 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3510 		struct pfsync_state_1301	*sp = &ps->state;
3511 
3512 		if (sp->timeout >= PFTM_MAX) {
3513 			error = EINVAL;
3514 			break;
3515 		}
3516 		if (V_pfsync_state_import_ptr != NULL) {
3517 			PF_RULES_RLOCK();
3518 			error = V_pfsync_state_import_ptr(
3519 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3520 			    PFSYNC_MSG_VERSION_1301);
3521 			PF_RULES_RUNLOCK();
3522 		} else
3523 			error = EOPNOTSUPP;
3524 		break;
3525 	}
3526 
3527 	case DIOCGETSTATE: {
3528 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3529 		struct pf_kstate	*s;
3530 
3531 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3532 		if (s == NULL) {
3533 			error = ENOENT;
3534 			break;
3535 		}
3536 
3537 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3538 		    s, PFSYNC_MSG_VERSION_1301);
3539 		PF_STATE_UNLOCK(s);
3540 		break;
3541 	}
3542 
3543 	case DIOCGETSTATENV: {
3544 		error = pf_getstate((struct pfioc_nv *)addr);
3545 		break;
3546 	}
3547 
3548 	case DIOCGETSTATES: {
3549 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3550 		struct pf_kstate	*s;
3551 		struct pfsync_state_1301	*pstore, *p;
3552 		int			 i, nr;
3553 		size_t			 slice_count = 16, count;
3554 		void			*out;
3555 
3556 		if (ps->ps_len <= 0) {
3557 			nr = uma_zone_get_cur(V_pf_state_z);
3558 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3559 			break;
3560 		}
3561 
3562 		out = ps->ps_states;
3563 		pstore = mallocarray(slice_count,
3564 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3565 		nr = 0;
3566 
3567 		for (i = 0; i <= pf_hashmask; i++) {
3568 			struct pf_idhash *ih = &V_pf_idhash[i];
3569 
3570 DIOCGETSTATES_retry:
3571 			p = pstore;
3572 
3573 			if (LIST_EMPTY(&ih->states))
3574 				continue;
3575 
3576 			PF_HASHROW_LOCK(ih);
3577 			count = 0;
3578 			LIST_FOREACH(s, &ih->states, entry) {
3579 				if (s->timeout == PFTM_UNLINKED)
3580 					continue;
3581 				count++;
3582 			}
3583 
3584 			if (count > slice_count) {
3585 				PF_HASHROW_UNLOCK(ih);
3586 				free(pstore, M_TEMP);
3587 				slice_count = count * 2;
3588 				pstore = mallocarray(slice_count,
3589 				    sizeof(struct pfsync_state_1301), M_TEMP,
3590 				    M_WAITOK | M_ZERO);
3591 				goto DIOCGETSTATES_retry;
3592 			}
3593 
3594 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3595 				PF_HASHROW_UNLOCK(ih);
3596 				goto DIOCGETSTATES_full;
3597 			}
3598 
3599 			LIST_FOREACH(s, &ih->states, entry) {
3600 				if (s->timeout == PFTM_UNLINKED)
3601 					continue;
3602 
3603 				pfsync_state_export((union pfsync_state_union*)p,
3604 				    s, PFSYNC_MSG_VERSION_1301);
3605 				p++;
3606 				nr++;
3607 			}
3608 			PF_HASHROW_UNLOCK(ih);
3609 			error = copyout(pstore, out,
3610 			    sizeof(struct pfsync_state_1301) * count);
3611 			if (error)
3612 				break;
3613 			out = ps->ps_states + nr;
3614 		}
3615 DIOCGETSTATES_full:
3616 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3617 		free(pstore, M_TEMP);
3618 
3619 		break;
3620 	}
3621 
3622 	case DIOCGETSTATESV2: {
3623 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3624 		struct pf_kstate	*s;
3625 		struct pf_state_export	*pstore, *p;
3626 		int i, nr;
3627 		size_t slice_count = 16, count;
3628 		void *out;
3629 
3630 		if (ps->ps_req_version > PF_STATE_VERSION) {
3631 			error = ENOTSUP;
3632 			break;
3633 		}
3634 
3635 		if (ps->ps_len <= 0) {
3636 			nr = uma_zone_get_cur(V_pf_state_z);
3637 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3638 			break;
3639 		}
3640 
3641 		out = ps->ps_states;
3642 		pstore = mallocarray(slice_count,
3643 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3644 		nr = 0;
3645 
3646 		for (i = 0; i <= pf_hashmask; i++) {
3647 			struct pf_idhash *ih = &V_pf_idhash[i];
3648 
3649 DIOCGETSTATESV2_retry:
3650 			p = pstore;
3651 
3652 			if (LIST_EMPTY(&ih->states))
3653 				continue;
3654 
3655 			PF_HASHROW_LOCK(ih);
3656 			count = 0;
3657 			LIST_FOREACH(s, &ih->states, entry) {
3658 				if (s->timeout == PFTM_UNLINKED)
3659 					continue;
3660 				count++;
3661 			}
3662 
3663 			if (count > slice_count) {
3664 				PF_HASHROW_UNLOCK(ih);
3665 				free(pstore, M_TEMP);
3666 				slice_count = count * 2;
3667 				pstore = mallocarray(slice_count,
3668 				    sizeof(struct pf_state_export), M_TEMP,
3669 				    M_WAITOK | M_ZERO);
3670 				goto DIOCGETSTATESV2_retry;
3671 			}
3672 
3673 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3674 				PF_HASHROW_UNLOCK(ih);
3675 				goto DIOCGETSTATESV2_full;
3676 			}
3677 
3678 			LIST_FOREACH(s, &ih->states, entry) {
3679 				if (s->timeout == PFTM_UNLINKED)
3680 					continue;
3681 
3682 				pf_state_export(p, s);
3683 				p++;
3684 				nr++;
3685 			}
3686 			PF_HASHROW_UNLOCK(ih);
3687 			error = copyout(pstore, out,
3688 			    sizeof(struct pf_state_export) * count);
3689 			if (error)
3690 				break;
3691 			out = ps->ps_states + nr;
3692 		}
3693 DIOCGETSTATESV2_full:
3694 		ps->ps_len = nr * sizeof(struct pf_state_export);
3695 		free(pstore, M_TEMP);
3696 
3697 		break;
3698 	}
3699 
3700 	case DIOCGETSTATUSNV: {
3701 		error = pf_getstatus((struct pfioc_nv *)addr);
3702 		break;
3703 	}
3704 
3705 	case DIOCSETSTATUSIF: {
3706 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3707 
3708 		if (pi->ifname[0] == 0) {
3709 			bzero(V_pf_status.ifname, IFNAMSIZ);
3710 			break;
3711 		}
3712 		PF_RULES_WLOCK();
3713 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3714 		PF_RULES_WUNLOCK();
3715 		break;
3716 	}
3717 
3718 	case DIOCCLRSTATUS: {
3719 		PF_RULES_WLOCK();
3720 		for (int i = 0; i < PFRES_MAX; i++)
3721 			counter_u64_zero(V_pf_status.counters[i]);
3722 		for (int i = 0; i < FCNT_MAX; i++)
3723 			pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3724 		for (int i = 0; i < SCNT_MAX; i++)
3725 			counter_u64_zero(V_pf_status.scounters[i]);
3726 		for (int i = 0; i < KLCNT_MAX; i++)
3727 			counter_u64_zero(V_pf_status.lcounters[i]);
3728 		V_pf_status.since = time_second;
3729 		if (*V_pf_status.ifname)
3730 			pfi_update_status(V_pf_status.ifname, NULL);
3731 		PF_RULES_WUNLOCK();
3732 		break;
3733 	}
3734 
3735 	case DIOCNATLOOK: {
3736 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3737 		struct pf_state_key	*sk;
3738 		struct pf_kstate	*state;
3739 		struct pf_state_key_cmp	 key;
3740 		int			 m = 0, direction = pnl->direction;
3741 		int			 sidx, didx;
3742 
3743 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3744 		sidx = (direction == PF_IN) ? 1 : 0;
3745 		didx = (direction == PF_IN) ? 0 : 1;
3746 
3747 		if (!pnl->proto ||
3748 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3749 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3750 		    ((pnl->proto == IPPROTO_TCP ||
3751 		    pnl->proto == IPPROTO_UDP) &&
3752 		    (!pnl->dport || !pnl->sport)))
3753 			error = EINVAL;
3754 		else {
3755 			bzero(&key, sizeof(key));
3756 			key.af = pnl->af;
3757 			key.proto = pnl->proto;
3758 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3759 			key.port[sidx] = pnl->sport;
3760 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3761 			key.port[didx] = pnl->dport;
3762 
3763 			state = pf_find_state_all(&key, direction, &m);
3764 			if (state == NULL) {
3765 				error = ENOENT;
3766 			} else {
3767 				if (m > 1) {
3768 					PF_STATE_UNLOCK(state);
3769 					error = E2BIG;	/* more than one state */
3770 				} else {
3771 					sk = state->key[sidx];
3772 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3773 					pnl->rsport = sk->port[sidx];
3774 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3775 					pnl->rdport = sk->port[didx];
3776 					PF_STATE_UNLOCK(state);
3777 				}
3778 			}
3779 		}
3780 		break;
3781 	}
3782 
3783 	case DIOCSETTIMEOUT: {
3784 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3785 		int		 old;
3786 
3787 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3788 		    pt->seconds < 0) {
3789 			error = EINVAL;
3790 			break;
3791 		}
3792 		PF_RULES_WLOCK();
3793 		old = V_pf_default_rule.timeout[pt->timeout];
3794 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3795 			pt->seconds = 1;
3796 		V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3797 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3798 			wakeup(pf_purge_thread);
3799 		pt->seconds = old;
3800 		PF_RULES_WUNLOCK();
3801 		break;
3802 	}
3803 
3804 	case DIOCGETTIMEOUT: {
3805 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3806 
3807 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3808 			error = EINVAL;
3809 			break;
3810 		}
3811 		PF_RULES_RLOCK();
3812 		pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3813 		PF_RULES_RUNLOCK();
3814 		break;
3815 	}
3816 
3817 	case DIOCGETLIMIT: {
3818 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3819 
3820 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3821 			error = EINVAL;
3822 			break;
3823 		}
3824 		PF_RULES_RLOCK();
3825 		pl->limit = V_pf_limits[pl->index].limit;
3826 		PF_RULES_RUNLOCK();
3827 		break;
3828 	}
3829 
3830 	case DIOCSETLIMIT: {
3831 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3832 		int			 old_limit;
3833 
3834 		PF_RULES_WLOCK();
3835 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3836 		    V_pf_limits[pl->index].zone == NULL) {
3837 			PF_RULES_WUNLOCK();
3838 			error = EINVAL;
3839 			break;
3840 		}
3841 		uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3842 		old_limit = V_pf_limits[pl->index].limit;
3843 		V_pf_limits[pl->index].limit = pl->limit;
3844 		pl->limit = old_limit;
3845 		PF_RULES_WUNLOCK();
3846 		break;
3847 	}
3848 
3849 	case DIOCSETDEBUG: {
3850 		u_int32_t	*level = (u_int32_t *)addr;
3851 
3852 		PF_RULES_WLOCK();
3853 		V_pf_status.debug = *level;
3854 		PF_RULES_WUNLOCK();
3855 		break;
3856 	}
3857 
3858 	case DIOCCLRRULECTRS: {
3859 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3860 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
3861 		struct pf_krule		*rule;
3862 
3863 		PF_RULES_WLOCK();
3864 		TAILQ_FOREACH(rule,
3865 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3866 			pf_counter_u64_zero(&rule->evaluations);
3867 			for (int i = 0; i < 2; i++) {
3868 				pf_counter_u64_zero(&rule->packets[i]);
3869 				pf_counter_u64_zero(&rule->bytes[i]);
3870 			}
3871 		}
3872 		PF_RULES_WUNLOCK();
3873 		break;
3874 	}
3875 
3876 	case DIOCGIFSPEEDV0:
3877 	case DIOCGIFSPEEDV1: {
3878 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
3879 		struct pf_ifspeed_v1	ps;
3880 		struct ifnet		*ifp;
3881 
3882 		if (psp->ifname[0] == '\0') {
3883 			error = EINVAL;
3884 			break;
3885 		}
3886 
3887 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3888 		if (error != 0)
3889 			break;
3890 		ifp = ifunit(ps.ifname);
3891 		if (ifp != NULL) {
3892 			psp->baudrate32 =
3893 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3894 			if (cmd == DIOCGIFSPEEDV1)
3895 				psp->baudrate = ifp->if_baudrate;
3896 		} else {
3897 			error = EINVAL;
3898 		}
3899 		break;
3900 	}
3901 
3902 #ifdef ALTQ
3903 	case DIOCSTARTALTQ: {
3904 		struct pf_altq		*altq;
3905 
3906 		PF_RULES_WLOCK();
3907 		/* enable all altq interfaces on active list */
3908 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3909 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3910 				error = pf_enable_altq(altq);
3911 				if (error != 0)
3912 					break;
3913 			}
3914 		}
3915 		if (error == 0)
3916 			V_pf_altq_running = 1;
3917 		PF_RULES_WUNLOCK();
3918 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3919 		break;
3920 	}
3921 
3922 	case DIOCSTOPALTQ: {
3923 		struct pf_altq		*altq;
3924 
3925 		PF_RULES_WLOCK();
3926 		/* disable all altq interfaces on active list */
3927 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3928 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3929 				error = pf_disable_altq(altq);
3930 				if (error != 0)
3931 					break;
3932 			}
3933 		}
3934 		if (error == 0)
3935 			V_pf_altq_running = 0;
3936 		PF_RULES_WUNLOCK();
3937 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3938 		break;
3939 	}
3940 
3941 	case DIOCADDALTQV0:
3942 	case DIOCADDALTQV1: {
3943 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
3944 		struct pf_altq		*altq, *a;
3945 		struct ifnet		*ifp;
3946 
3947 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3948 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3949 		if (error)
3950 			break;
3951 		altq->local_flags = 0;
3952 
3953 		PF_RULES_WLOCK();
3954 		if (pa->ticket != V_ticket_altqs_inactive) {
3955 			PF_RULES_WUNLOCK();
3956 			free(altq, M_PFALTQ);
3957 			error = EBUSY;
3958 			break;
3959 		}
3960 
3961 		/*
3962 		 * if this is for a queue, find the discipline and
3963 		 * copy the necessary fields
3964 		 */
3965 		if (altq->qname[0] != 0) {
3966 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3967 				PF_RULES_WUNLOCK();
3968 				error = EBUSY;
3969 				free(altq, M_PFALTQ);
3970 				break;
3971 			}
3972 			altq->altq_disc = NULL;
3973 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
3974 				if (strncmp(a->ifname, altq->ifname,
3975 				    IFNAMSIZ) == 0) {
3976 					altq->altq_disc = a->altq_disc;
3977 					break;
3978 				}
3979 			}
3980 		}
3981 
3982 		if ((ifp = ifunit(altq->ifname)) == NULL)
3983 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
3984 		else
3985 			error = altq_add(ifp, altq);
3986 
3987 		if (error) {
3988 			PF_RULES_WUNLOCK();
3989 			free(altq, M_PFALTQ);
3990 			break;
3991 		}
3992 
3993 		if (altq->qname[0] != 0)
3994 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
3995 		else
3996 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
3997 		/* version error check done on import above */
3998 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
3999 		PF_RULES_WUNLOCK();
4000 		break;
4001 	}
4002 
4003 	case DIOCGETALTQSV0:
4004 	case DIOCGETALTQSV1: {
4005 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4006 		struct pf_altq		*altq;
4007 
4008 		PF_RULES_RLOCK();
4009 		pa->nr = 0;
4010 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4011 			pa->nr++;
4012 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4013 			pa->nr++;
4014 		pa->ticket = V_ticket_altqs_active;
4015 		PF_RULES_RUNLOCK();
4016 		break;
4017 	}
4018 
4019 	case DIOCGETALTQV0:
4020 	case DIOCGETALTQV1: {
4021 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4022 		struct pf_altq		*altq;
4023 
4024 		PF_RULES_RLOCK();
4025 		if (pa->ticket != V_ticket_altqs_active) {
4026 			PF_RULES_RUNLOCK();
4027 			error = EBUSY;
4028 			break;
4029 		}
4030 		altq = pf_altq_get_nth_active(pa->nr);
4031 		if (altq == NULL) {
4032 			PF_RULES_RUNLOCK();
4033 			error = EBUSY;
4034 			break;
4035 		}
4036 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4037 		PF_RULES_RUNLOCK();
4038 		break;
4039 	}
4040 
4041 	case DIOCCHANGEALTQV0:
4042 	case DIOCCHANGEALTQV1:
4043 		/* CHANGEALTQ not supported yet! */
4044 		error = ENODEV;
4045 		break;
4046 
4047 	case DIOCGETQSTATSV0:
4048 	case DIOCGETQSTATSV1: {
4049 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4050 		struct pf_altq		*altq;
4051 		int			 nbytes;
4052 		u_int32_t		 version;
4053 
4054 		PF_RULES_RLOCK();
4055 		if (pq->ticket != V_ticket_altqs_active) {
4056 			PF_RULES_RUNLOCK();
4057 			error = EBUSY;
4058 			break;
4059 		}
4060 		nbytes = pq->nbytes;
4061 		altq = pf_altq_get_nth_active(pq->nr);
4062 		if (altq == NULL) {
4063 			PF_RULES_RUNLOCK();
4064 			error = EBUSY;
4065 			break;
4066 		}
4067 
4068 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4069 			PF_RULES_RUNLOCK();
4070 			error = ENXIO;
4071 			break;
4072 		}
4073 		PF_RULES_RUNLOCK();
4074 		if (cmd == DIOCGETQSTATSV0)
4075 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4076 		else
4077 			version = pq->version;
4078 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4079 		if (error == 0) {
4080 			pq->scheduler = altq->scheduler;
4081 			pq->nbytes = nbytes;
4082 		}
4083 		break;
4084 	}
4085 #endif /* ALTQ */
4086 
4087 	case DIOCBEGINADDRS: {
4088 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4089 
4090 		PF_RULES_WLOCK();
4091 		pf_empty_kpool(&V_pf_pabuf);
4092 		pp->ticket = ++V_ticket_pabuf;
4093 		PF_RULES_WUNLOCK();
4094 		break;
4095 	}
4096 
4097 	case DIOCADDADDR: {
4098 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4099 		struct pf_kpooladdr	*pa;
4100 		struct pfi_kkif		*kif = NULL;
4101 
4102 #ifndef INET
4103 		if (pp->af == AF_INET) {
4104 			error = EAFNOSUPPORT;
4105 			break;
4106 		}
4107 #endif /* INET */
4108 #ifndef INET6
4109 		if (pp->af == AF_INET6) {
4110 			error = EAFNOSUPPORT;
4111 			break;
4112 		}
4113 #endif /* INET6 */
4114 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4115 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4116 		    pp->addr.addr.type != PF_ADDR_TABLE) {
4117 			error = EINVAL;
4118 			break;
4119 		}
4120 		if (pp->addr.addr.p.dyn != NULL) {
4121 			error = EINVAL;
4122 			break;
4123 		}
4124 		pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4125 		error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4126 		if (error != 0)
4127 			break;
4128 		if (pa->ifname[0])
4129 			kif = pf_kkif_create(M_WAITOK);
4130 		PF_RULES_WLOCK();
4131 		if (pp->ticket != V_ticket_pabuf) {
4132 			PF_RULES_WUNLOCK();
4133 			if (pa->ifname[0])
4134 				pf_kkif_free(kif);
4135 			free(pa, M_PFRULE);
4136 			error = EBUSY;
4137 			break;
4138 		}
4139 		if (pa->ifname[0]) {
4140 			pa->kif = pfi_kkif_attach(kif, pa->ifname);
4141 			kif = NULL;
4142 			pfi_kkif_ref(pa->kif);
4143 		} else
4144 			pa->kif = NULL;
4145 		if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4146 		    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4147 			if (pa->ifname[0])
4148 				pfi_kkif_unref(pa->kif);
4149 			PF_RULES_WUNLOCK();
4150 			free(pa, M_PFRULE);
4151 			break;
4152 		}
4153 		TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4154 		PF_RULES_WUNLOCK();
4155 		break;
4156 	}
4157 
4158 	case DIOCGETADDRS: {
4159 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4160 		struct pf_kpool		*pool;
4161 		struct pf_kpooladdr	*pa;
4162 
4163 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4164 		pp->nr = 0;
4165 
4166 		PF_RULES_RLOCK();
4167 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4168 		    pp->r_num, 0, 1, 0);
4169 		if (pool == NULL) {
4170 			PF_RULES_RUNLOCK();
4171 			error = EBUSY;
4172 			break;
4173 		}
4174 		TAILQ_FOREACH(pa, &pool->list, entries)
4175 			pp->nr++;
4176 		PF_RULES_RUNLOCK();
4177 		break;
4178 	}
4179 
4180 	case DIOCGETADDR: {
4181 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4182 		struct pf_kpool		*pool;
4183 		struct pf_kpooladdr	*pa;
4184 		u_int32_t		 nr = 0;
4185 
4186 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4187 
4188 		PF_RULES_RLOCK();
4189 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4190 		    pp->r_num, 0, 1, 1);
4191 		if (pool == NULL) {
4192 			PF_RULES_RUNLOCK();
4193 			error = EBUSY;
4194 			break;
4195 		}
4196 		pa = TAILQ_FIRST(&pool->list);
4197 		while ((pa != NULL) && (nr < pp->nr)) {
4198 			pa = TAILQ_NEXT(pa, entries);
4199 			nr++;
4200 		}
4201 		if (pa == NULL) {
4202 			PF_RULES_RUNLOCK();
4203 			error = EBUSY;
4204 			break;
4205 		}
4206 		pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4207 		pf_addr_copyout(&pp->addr.addr);
4208 		PF_RULES_RUNLOCK();
4209 		break;
4210 	}
4211 
4212 	case DIOCCHANGEADDR: {
4213 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4214 		struct pf_kpool		*pool;
4215 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4216 		struct pf_kruleset	*ruleset;
4217 		struct pfi_kkif		*kif = NULL;
4218 
4219 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4220 
4221 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4222 		    pca->action > PF_CHANGE_REMOVE) {
4223 			error = EINVAL;
4224 			break;
4225 		}
4226 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4227 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4228 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4229 			error = EINVAL;
4230 			break;
4231 		}
4232 		if (pca->addr.addr.p.dyn != NULL) {
4233 			error = EINVAL;
4234 			break;
4235 		}
4236 
4237 		if (pca->action != PF_CHANGE_REMOVE) {
4238 #ifndef INET
4239 			if (pca->af == AF_INET) {
4240 				error = EAFNOSUPPORT;
4241 				break;
4242 			}
4243 #endif /* INET */
4244 #ifndef INET6
4245 			if (pca->af == AF_INET6) {
4246 				error = EAFNOSUPPORT;
4247 				break;
4248 			}
4249 #endif /* INET6 */
4250 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4251 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4252 			if (newpa->ifname[0])
4253 				kif = pf_kkif_create(M_WAITOK);
4254 			newpa->kif = NULL;
4255 		}
4256 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4257 		PF_RULES_WLOCK();
4258 		ruleset = pf_find_kruleset(pca->anchor);
4259 		if (ruleset == NULL)
4260 			ERROUT(EBUSY);
4261 
4262 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4263 		    pca->r_num, pca->r_last, 1, 1);
4264 		if (pool == NULL)
4265 			ERROUT(EBUSY);
4266 
4267 		if (pca->action != PF_CHANGE_REMOVE) {
4268 			if (newpa->ifname[0]) {
4269 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4270 				pfi_kkif_ref(newpa->kif);
4271 				kif = NULL;
4272 			}
4273 
4274 			switch (newpa->addr.type) {
4275 			case PF_ADDR_DYNIFTL:
4276 				error = pfi_dynaddr_setup(&newpa->addr,
4277 				    pca->af);
4278 				break;
4279 			case PF_ADDR_TABLE:
4280 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4281 				    newpa->addr.v.tblname);
4282 				if (newpa->addr.p.tbl == NULL)
4283 					error = ENOMEM;
4284 				break;
4285 			}
4286 			if (error)
4287 				goto DIOCCHANGEADDR_error;
4288 		}
4289 
4290 		switch (pca->action) {
4291 		case PF_CHANGE_ADD_HEAD:
4292 			oldpa = TAILQ_FIRST(&pool->list);
4293 			break;
4294 		case PF_CHANGE_ADD_TAIL:
4295 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4296 			break;
4297 		default:
4298 			oldpa = TAILQ_FIRST(&pool->list);
4299 			for (int i = 0; oldpa && i < pca->nr; i++)
4300 				oldpa = TAILQ_NEXT(oldpa, entries);
4301 
4302 			if (oldpa == NULL)
4303 				ERROUT(EINVAL);
4304 		}
4305 
4306 		if (pca->action == PF_CHANGE_REMOVE) {
4307 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4308 			switch (oldpa->addr.type) {
4309 			case PF_ADDR_DYNIFTL:
4310 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4311 				break;
4312 			case PF_ADDR_TABLE:
4313 				pfr_detach_table(oldpa->addr.p.tbl);
4314 				break;
4315 			}
4316 			if (oldpa->kif)
4317 				pfi_kkif_unref(oldpa->kif);
4318 			free(oldpa, M_PFRULE);
4319 		} else {
4320 			if (oldpa == NULL)
4321 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4322 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4323 			    pca->action == PF_CHANGE_ADD_BEFORE)
4324 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4325 			else
4326 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4327 				    newpa, entries);
4328 		}
4329 
4330 		pool->cur = TAILQ_FIRST(&pool->list);
4331 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4332 		PF_RULES_WUNLOCK();
4333 		break;
4334 
4335 #undef ERROUT
4336 DIOCCHANGEADDR_error:
4337 		if (newpa != NULL) {
4338 			if (newpa->kif)
4339 				pfi_kkif_unref(newpa->kif);
4340 			free(newpa, M_PFRULE);
4341 		}
4342 		PF_RULES_WUNLOCK();
4343 		pf_kkif_free(kif);
4344 		break;
4345 	}
4346 
4347 	case DIOCGETRULESETS: {
4348 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4349 		struct pf_kruleset	*ruleset;
4350 		struct pf_kanchor	*anchor;
4351 
4352 		pr->path[sizeof(pr->path) - 1] = 0;
4353 
4354 		PF_RULES_RLOCK();
4355 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4356 			PF_RULES_RUNLOCK();
4357 			error = ENOENT;
4358 			break;
4359 		}
4360 		pr->nr = 0;
4361 		if (ruleset->anchor == NULL) {
4362 			/* XXX kludge for pf_main_ruleset */
4363 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4364 				if (anchor->parent == NULL)
4365 					pr->nr++;
4366 		} else {
4367 			RB_FOREACH(anchor, pf_kanchor_node,
4368 			    &ruleset->anchor->children)
4369 				pr->nr++;
4370 		}
4371 		PF_RULES_RUNLOCK();
4372 		break;
4373 	}
4374 
4375 	case DIOCGETRULESET: {
4376 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4377 		struct pf_kruleset	*ruleset;
4378 		struct pf_kanchor	*anchor;
4379 		u_int32_t		 nr = 0;
4380 
4381 		pr->path[sizeof(pr->path) - 1] = 0;
4382 
4383 		PF_RULES_RLOCK();
4384 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4385 			PF_RULES_RUNLOCK();
4386 			error = ENOENT;
4387 			break;
4388 		}
4389 		pr->name[0] = 0;
4390 		if (ruleset->anchor == NULL) {
4391 			/* XXX kludge for pf_main_ruleset */
4392 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4393 				if (anchor->parent == NULL && nr++ == pr->nr) {
4394 					strlcpy(pr->name, anchor->name,
4395 					    sizeof(pr->name));
4396 					break;
4397 				}
4398 		} else {
4399 			RB_FOREACH(anchor, pf_kanchor_node,
4400 			    &ruleset->anchor->children)
4401 				if (nr++ == pr->nr) {
4402 					strlcpy(pr->name, anchor->name,
4403 					    sizeof(pr->name));
4404 					break;
4405 				}
4406 		}
4407 		if (!pr->name[0])
4408 			error = EBUSY;
4409 		PF_RULES_RUNLOCK();
4410 		break;
4411 	}
4412 
4413 	case DIOCRCLRTABLES: {
4414 		struct pfioc_table *io = (struct pfioc_table *)addr;
4415 
4416 		if (io->pfrio_esize != 0) {
4417 			error = ENODEV;
4418 			break;
4419 		}
4420 		PF_RULES_WLOCK();
4421 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4422 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4423 		PF_RULES_WUNLOCK();
4424 		break;
4425 	}
4426 
4427 	case DIOCRADDTABLES: {
4428 		struct pfioc_table *io = (struct pfioc_table *)addr;
4429 		struct pfr_table *pfrts;
4430 		size_t totlen;
4431 
4432 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4433 			error = ENODEV;
4434 			break;
4435 		}
4436 
4437 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4438 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4439 			error = ENOMEM;
4440 			break;
4441 		}
4442 
4443 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4444 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4445 		    M_TEMP, M_WAITOK);
4446 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4447 		if (error) {
4448 			free(pfrts, M_TEMP);
4449 			break;
4450 		}
4451 		PF_RULES_WLOCK();
4452 		error = pfr_add_tables(pfrts, io->pfrio_size,
4453 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4454 		PF_RULES_WUNLOCK();
4455 		free(pfrts, M_TEMP);
4456 		break;
4457 	}
4458 
4459 	case DIOCRDELTABLES: {
4460 		struct pfioc_table *io = (struct pfioc_table *)addr;
4461 		struct pfr_table *pfrts;
4462 		size_t totlen;
4463 
4464 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4465 			error = ENODEV;
4466 			break;
4467 		}
4468 
4469 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4470 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4471 			error = ENOMEM;
4472 			break;
4473 		}
4474 
4475 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4476 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4477 		    M_TEMP, M_WAITOK);
4478 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4479 		if (error) {
4480 			free(pfrts, M_TEMP);
4481 			break;
4482 		}
4483 		PF_RULES_WLOCK();
4484 		error = pfr_del_tables(pfrts, io->pfrio_size,
4485 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4486 		PF_RULES_WUNLOCK();
4487 		free(pfrts, M_TEMP);
4488 		break;
4489 	}
4490 
4491 	case DIOCRGETTABLES: {
4492 		struct pfioc_table *io = (struct pfioc_table *)addr;
4493 		struct pfr_table *pfrts;
4494 		size_t totlen;
4495 		int n;
4496 
4497 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4498 			error = ENODEV;
4499 			break;
4500 		}
4501 		PF_RULES_RLOCK();
4502 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4503 		if (n < 0) {
4504 			PF_RULES_RUNLOCK();
4505 			error = EINVAL;
4506 			break;
4507 		}
4508 		io->pfrio_size = min(io->pfrio_size, n);
4509 
4510 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4511 
4512 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4513 		    M_TEMP, M_NOWAIT | M_ZERO);
4514 		if (pfrts == NULL) {
4515 			error = ENOMEM;
4516 			PF_RULES_RUNLOCK();
4517 			break;
4518 		}
4519 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4520 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4521 		PF_RULES_RUNLOCK();
4522 		if (error == 0)
4523 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4524 		free(pfrts, M_TEMP);
4525 		break;
4526 	}
4527 
4528 	case DIOCRGETTSTATS: {
4529 		struct pfioc_table *io = (struct pfioc_table *)addr;
4530 		struct pfr_tstats *pfrtstats;
4531 		size_t totlen;
4532 		int n;
4533 
4534 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4535 			error = ENODEV;
4536 			break;
4537 		}
4538 		PF_TABLE_STATS_LOCK();
4539 		PF_RULES_RLOCK();
4540 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4541 		if (n < 0) {
4542 			PF_RULES_RUNLOCK();
4543 			PF_TABLE_STATS_UNLOCK();
4544 			error = EINVAL;
4545 			break;
4546 		}
4547 		io->pfrio_size = min(io->pfrio_size, n);
4548 
4549 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4550 		pfrtstats = mallocarray(io->pfrio_size,
4551 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4552 		if (pfrtstats == NULL) {
4553 			error = ENOMEM;
4554 			PF_RULES_RUNLOCK();
4555 			PF_TABLE_STATS_UNLOCK();
4556 			break;
4557 		}
4558 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4559 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4560 		PF_RULES_RUNLOCK();
4561 		PF_TABLE_STATS_UNLOCK();
4562 		if (error == 0)
4563 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4564 		free(pfrtstats, M_TEMP);
4565 		break;
4566 	}
4567 
4568 	case DIOCRCLRTSTATS: {
4569 		struct pfioc_table *io = (struct pfioc_table *)addr;
4570 		struct pfr_table *pfrts;
4571 		size_t totlen;
4572 
4573 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4574 			error = ENODEV;
4575 			break;
4576 		}
4577 
4578 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4579 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4580 			/* We used to count tables and use the minimum required
4581 			 * size, so we didn't fail on overly large requests.
4582 			 * Keep doing so. */
4583 			io->pfrio_size = pf_ioctl_maxcount;
4584 			break;
4585 		}
4586 
4587 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4588 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4589 		    M_TEMP, M_WAITOK);
4590 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4591 		if (error) {
4592 			free(pfrts, M_TEMP);
4593 			break;
4594 		}
4595 
4596 		PF_TABLE_STATS_LOCK();
4597 		PF_RULES_RLOCK();
4598 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4599 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4600 		PF_RULES_RUNLOCK();
4601 		PF_TABLE_STATS_UNLOCK();
4602 		free(pfrts, M_TEMP);
4603 		break;
4604 	}
4605 
4606 	case DIOCRSETTFLAGS: {
4607 		struct pfioc_table *io = (struct pfioc_table *)addr;
4608 		struct pfr_table *pfrts;
4609 		size_t totlen;
4610 		int n;
4611 
4612 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4613 			error = ENODEV;
4614 			break;
4615 		}
4616 
4617 		PF_RULES_RLOCK();
4618 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4619 		if (n < 0) {
4620 			PF_RULES_RUNLOCK();
4621 			error = EINVAL;
4622 			break;
4623 		}
4624 
4625 		io->pfrio_size = min(io->pfrio_size, n);
4626 		PF_RULES_RUNLOCK();
4627 
4628 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4629 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4630 		    M_TEMP, M_WAITOK);
4631 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4632 		if (error) {
4633 			free(pfrts, M_TEMP);
4634 			break;
4635 		}
4636 		PF_RULES_WLOCK();
4637 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4638 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4639 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4640 		PF_RULES_WUNLOCK();
4641 		free(pfrts, M_TEMP);
4642 		break;
4643 	}
4644 
4645 	case DIOCRCLRADDRS: {
4646 		struct pfioc_table *io = (struct pfioc_table *)addr;
4647 
4648 		if (io->pfrio_esize != 0) {
4649 			error = ENODEV;
4650 			break;
4651 		}
4652 		PF_RULES_WLOCK();
4653 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4654 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4655 		PF_RULES_WUNLOCK();
4656 		break;
4657 	}
4658 
4659 	case DIOCRADDADDRS: {
4660 		struct pfioc_table *io = (struct pfioc_table *)addr;
4661 		struct pfr_addr *pfras;
4662 		size_t totlen;
4663 
4664 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4665 			error = ENODEV;
4666 			break;
4667 		}
4668 		if (io->pfrio_size < 0 ||
4669 		    io->pfrio_size > pf_ioctl_maxcount ||
4670 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4671 			error = EINVAL;
4672 			break;
4673 		}
4674 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4675 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4676 		    M_TEMP, M_WAITOK);
4677 		error = copyin(io->pfrio_buffer, pfras, totlen);
4678 		if (error) {
4679 			free(pfras, M_TEMP);
4680 			break;
4681 		}
4682 		PF_RULES_WLOCK();
4683 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4684 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4685 		    PFR_FLAG_USERIOCTL);
4686 		PF_RULES_WUNLOCK();
4687 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4688 			error = copyout(pfras, io->pfrio_buffer, totlen);
4689 		free(pfras, M_TEMP);
4690 		break;
4691 	}
4692 
4693 	case DIOCRDELADDRS: {
4694 		struct pfioc_table *io = (struct pfioc_table *)addr;
4695 		struct pfr_addr *pfras;
4696 		size_t totlen;
4697 
4698 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4699 			error = ENODEV;
4700 			break;
4701 		}
4702 		if (io->pfrio_size < 0 ||
4703 		    io->pfrio_size > pf_ioctl_maxcount ||
4704 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4705 			error = EINVAL;
4706 			break;
4707 		}
4708 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4709 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4710 		    M_TEMP, M_WAITOK);
4711 		error = copyin(io->pfrio_buffer, pfras, totlen);
4712 		if (error) {
4713 			free(pfras, M_TEMP);
4714 			break;
4715 		}
4716 		PF_RULES_WLOCK();
4717 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4718 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4719 		    PFR_FLAG_USERIOCTL);
4720 		PF_RULES_WUNLOCK();
4721 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4722 			error = copyout(pfras, io->pfrio_buffer, totlen);
4723 		free(pfras, M_TEMP);
4724 		break;
4725 	}
4726 
4727 	case DIOCRSETADDRS: {
4728 		struct pfioc_table *io = (struct pfioc_table *)addr;
4729 		struct pfr_addr *pfras;
4730 		size_t totlen, count;
4731 
4732 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4733 			error = ENODEV;
4734 			break;
4735 		}
4736 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4737 			error = EINVAL;
4738 			break;
4739 		}
4740 		count = max(io->pfrio_size, io->pfrio_size2);
4741 		if (count > pf_ioctl_maxcount ||
4742 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4743 			error = EINVAL;
4744 			break;
4745 		}
4746 		totlen = count * sizeof(struct pfr_addr);
4747 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4748 		    M_WAITOK);
4749 		error = copyin(io->pfrio_buffer, pfras, totlen);
4750 		if (error) {
4751 			free(pfras, M_TEMP);
4752 			break;
4753 		}
4754 		PF_RULES_WLOCK();
4755 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4756 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4757 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4758 		    PFR_FLAG_USERIOCTL, 0);
4759 		PF_RULES_WUNLOCK();
4760 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4761 			error = copyout(pfras, io->pfrio_buffer, totlen);
4762 		free(pfras, M_TEMP);
4763 		break;
4764 	}
4765 
4766 	case DIOCRGETADDRS: {
4767 		struct pfioc_table *io = (struct pfioc_table *)addr;
4768 		struct pfr_addr *pfras;
4769 		size_t totlen;
4770 
4771 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4772 			error = ENODEV;
4773 			break;
4774 		}
4775 		if (io->pfrio_size < 0 ||
4776 		    io->pfrio_size > pf_ioctl_maxcount ||
4777 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4778 			error = EINVAL;
4779 			break;
4780 		}
4781 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4782 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4783 		    M_TEMP, M_WAITOK | M_ZERO);
4784 		PF_RULES_RLOCK();
4785 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4786 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4787 		PF_RULES_RUNLOCK();
4788 		if (error == 0)
4789 			error = copyout(pfras, io->pfrio_buffer, totlen);
4790 		free(pfras, M_TEMP);
4791 		break;
4792 	}
4793 
4794 	case DIOCRGETASTATS: {
4795 		struct pfioc_table *io = (struct pfioc_table *)addr;
4796 		struct pfr_astats *pfrastats;
4797 		size_t totlen;
4798 
4799 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4800 			error = ENODEV;
4801 			break;
4802 		}
4803 		if (io->pfrio_size < 0 ||
4804 		    io->pfrio_size > pf_ioctl_maxcount ||
4805 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4806 			error = EINVAL;
4807 			break;
4808 		}
4809 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4810 		pfrastats = mallocarray(io->pfrio_size,
4811 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4812 		PF_RULES_RLOCK();
4813 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4814 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4815 		PF_RULES_RUNLOCK();
4816 		if (error == 0)
4817 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4818 		free(pfrastats, M_TEMP);
4819 		break;
4820 	}
4821 
4822 	case DIOCRCLRASTATS: {
4823 		struct pfioc_table *io = (struct pfioc_table *)addr;
4824 		struct pfr_addr *pfras;
4825 		size_t totlen;
4826 
4827 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4828 			error = ENODEV;
4829 			break;
4830 		}
4831 		if (io->pfrio_size < 0 ||
4832 		    io->pfrio_size > pf_ioctl_maxcount ||
4833 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4834 			error = EINVAL;
4835 			break;
4836 		}
4837 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4838 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4839 		    M_TEMP, M_WAITOK);
4840 		error = copyin(io->pfrio_buffer, pfras, totlen);
4841 		if (error) {
4842 			free(pfras, M_TEMP);
4843 			break;
4844 		}
4845 		PF_RULES_WLOCK();
4846 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4847 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4848 		    PFR_FLAG_USERIOCTL);
4849 		PF_RULES_WUNLOCK();
4850 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4851 			error = copyout(pfras, io->pfrio_buffer, totlen);
4852 		free(pfras, M_TEMP);
4853 		break;
4854 	}
4855 
4856 	case DIOCRTSTADDRS: {
4857 		struct pfioc_table *io = (struct pfioc_table *)addr;
4858 		struct pfr_addr *pfras;
4859 		size_t totlen;
4860 
4861 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4862 			error = ENODEV;
4863 			break;
4864 		}
4865 		if (io->pfrio_size < 0 ||
4866 		    io->pfrio_size > pf_ioctl_maxcount ||
4867 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4868 			error = EINVAL;
4869 			break;
4870 		}
4871 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4872 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4873 		    M_TEMP, M_WAITOK);
4874 		error = copyin(io->pfrio_buffer, pfras, totlen);
4875 		if (error) {
4876 			free(pfras, M_TEMP);
4877 			break;
4878 		}
4879 		PF_RULES_RLOCK();
4880 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
4881 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4882 		    PFR_FLAG_USERIOCTL);
4883 		PF_RULES_RUNLOCK();
4884 		if (error == 0)
4885 			error = copyout(pfras, io->pfrio_buffer, totlen);
4886 		free(pfras, M_TEMP);
4887 		break;
4888 	}
4889 
4890 	case DIOCRINADEFINE: {
4891 		struct pfioc_table *io = (struct pfioc_table *)addr;
4892 		struct pfr_addr *pfras;
4893 		size_t totlen;
4894 
4895 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4896 			error = ENODEV;
4897 			break;
4898 		}
4899 		if (io->pfrio_size < 0 ||
4900 		    io->pfrio_size > pf_ioctl_maxcount ||
4901 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4902 			error = EINVAL;
4903 			break;
4904 		}
4905 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4906 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4907 		    M_TEMP, M_WAITOK);
4908 		error = copyin(io->pfrio_buffer, pfras, totlen);
4909 		if (error) {
4910 			free(pfras, M_TEMP);
4911 			break;
4912 		}
4913 		PF_RULES_WLOCK();
4914 		error = pfr_ina_define(&io->pfrio_table, pfras,
4915 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4916 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4917 		PF_RULES_WUNLOCK();
4918 		free(pfras, M_TEMP);
4919 		break;
4920 	}
4921 
4922 	case DIOCOSFPADD: {
4923 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4924 		PF_RULES_WLOCK();
4925 		error = pf_osfp_add(io);
4926 		PF_RULES_WUNLOCK();
4927 		break;
4928 	}
4929 
4930 	case DIOCOSFPGET: {
4931 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4932 		PF_RULES_RLOCK();
4933 		error = pf_osfp_get(io);
4934 		PF_RULES_RUNLOCK();
4935 		break;
4936 	}
4937 
4938 	case DIOCXBEGIN: {
4939 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
4940 		struct pfioc_trans_e	*ioes, *ioe;
4941 		size_t			 totlen;
4942 		int			 i;
4943 
4944 		if (io->esize != sizeof(*ioe)) {
4945 			error = ENODEV;
4946 			break;
4947 		}
4948 		if (io->size < 0 ||
4949 		    io->size > pf_ioctl_maxcount ||
4950 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4951 			error = EINVAL;
4952 			break;
4953 		}
4954 		totlen = sizeof(struct pfioc_trans_e) * io->size;
4955 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4956 		    M_TEMP, M_WAITOK);
4957 		error = copyin(io->array, ioes, totlen);
4958 		if (error) {
4959 			free(ioes, M_TEMP);
4960 			break;
4961 		}
4962 		/* Ensure there's no more ethernet rules to clean up. */
4963 		NET_EPOCH_DRAIN_CALLBACKS();
4964 		PF_RULES_WLOCK();
4965 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4966 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4967 			switch (ioe->rs_num) {
4968 			case PF_RULESET_ETH:
4969 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
4970 					PF_RULES_WUNLOCK();
4971 					free(ioes, M_TEMP);
4972 					goto fail;
4973 				}
4974 				break;
4975 #ifdef ALTQ
4976 			case PF_RULESET_ALTQ:
4977 				if (ioe->anchor[0]) {
4978 					PF_RULES_WUNLOCK();
4979 					free(ioes, M_TEMP);
4980 					error = EINVAL;
4981 					goto fail;
4982 				}
4983 				if ((error = pf_begin_altq(&ioe->ticket))) {
4984 					PF_RULES_WUNLOCK();
4985 					free(ioes, M_TEMP);
4986 					goto fail;
4987 				}
4988 				break;
4989 #endif /* ALTQ */
4990 			case PF_RULESET_TABLE:
4991 			    {
4992 				struct pfr_table table;
4993 
4994 				bzero(&table, sizeof(table));
4995 				strlcpy(table.pfrt_anchor, ioe->anchor,
4996 				    sizeof(table.pfrt_anchor));
4997 				if ((error = pfr_ina_begin(&table,
4998 				    &ioe->ticket, NULL, 0))) {
4999 					PF_RULES_WUNLOCK();
5000 					free(ioes, M_TEMP);
5001 					goto fail;
5002 				}
5003 				break;
5004 			    }
5005 			default:
5006 				if ((error = pf_begin_rules(&ioe->ticket,
5007 				    ioe->rs_num, ioe->anchor))) {
5008 					PF_RULES_WUNLOCK();
5009 					free(ioes, M_TEMP);
5010 					goto fail;
5011 				}
5012 				break;
5013 			}
5014 		}
5015 		PF_RULES_WUNLOCK();
5016 		error = copyout(ioes, io->array, totlen);
5017 		free(ioes, M_TEMP);
5018 		break;
5019 	}
5020 
5021 	case DIOCXROLLBACK: {
5022 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5023 		struct pfioc_trans_e	*ioe, *ioes;
5024 		size_t			 totlen;
5025 		int			 i;
5026 
5027 		if (io->esize != sizeof(*ioe)) {
5028 			error = ENODEV;
5029 			break;
5030 		}
5031 		if (io->size < 0 ||
5032 		    io->size > pf_ioctl_maxcount ||
5033 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5034 			error = EINVAL;
5035 			break;
5036 		}
5037 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5038 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5039 		    M_TEMP, M_WAITOK);
5040 		error = copyin(io->array, ioes, totlen);
5041 		if (error) {
5042 			free(ioes, M_TEMP);
5043 			break;
5044 		}
5045 		PF_RULES_WLOCK();
5046 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5047 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5048 			switch (ioe->rs_num) {
5049 			case PF_RULESET_ETH:
5050 				if ((error = pf_rollback_eth(ioe->ticket,
5051 				    ioe->anchor))) {
5052 					PF_RULES_WUNLOCK();
5053 					free(ioes, M_TEMP);
5054 					goto fail; /* really bad */
5055 				}
5056 				break;
5057 #ifdef ALTQ
5058 			case PF_RULESET_ALTQ:
5059 				if (ioe->anchor[0]) {
5060 					PF_RULES_WUNLOCK();
5061 					free(ioes, M_TEMP);
5062 					error = EINVAL;
5063 					goto fail;
5064 				}
5065 				if ((error = pf_rollback_altq(ioe->ticket))) {
5066 					PF_RULES_WUNLOCK();
5067 					free(ioes, M_TEMP);
5068 					goto fail; /* really bad */
5069 				}
5070 				break;
5071 #endif /* ALTQ */
5072 			case PF_RULESET_TABLE:
5073 			    {
5074 				struct pfr_table table;
5075 
5076 				bzero(&table, sizeof(table));
5077 				strlcpy(table.pfrt_anchor, ioe->anchor,
5078 				    sizeof(table.pfrt_anchor));
5079 				if ((error = pfr_ina_rollback(&table,
5080 				    ioe->ticket, NULL, 0))) {
5081 					PF_RULES_WUNLOCK();
5082 					free(ioes, M_TEMP);
5083 					goto fail; /* really bad */
5084 				}
5085 				break;
5086 			    }
5087 			default:
5088 				if ((error = pf_rollback_rules(ioe->ticket,
5089 				    ioe->rs_num, ioe->anchor))) {
5090 					PF_RULES_WUNLOCK();
5091 					free(ioes, M_TEMP);
5092 					goto fail; /* really bad */
5093 				}
5094 				break;
5095 			}
5096 		}
5097 		PF_RULES_WUNLOCK();
5098 		free(ioes, M_TEMP);
5099 		break;
5100 	}
5101 
5102 	case DIOCXCOMMIT: {
5103 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5104 		struct pfioc_trans_e	*ioe, *ioes;
5105 		struct pf_kruleset	*rs;
5106 		struct pf_keth_ruleset	*ers;
5107 		size_t			 totlen;
5108 		int			 i;
5109 
5110 		if (io->esize != sizeof(*ioe)) {
5111 			error = ENODEV;
5112 			break;
5113 		}
5114 
5115 		if (io->size < 0 ||
5116 		    io->size > pf_ioctl_maxcount ||
5117 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5118 			error = EINVAL;
5119 			break;
5120 		}
5121 
5122 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5123 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5124 		    M_TEMP, M_WAITOK);
5125 		error = copyin(io->array, ioes, totlen);
5126 		if (error) {
5127 			free(ioes, M_TEMP);
5128 			break;
5129 		}
5130 		PF_RULES_WLOCK();
5131 		/* First makes sure everything will succeed. */
5132 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5133 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5134 			switch (ioe->rs_num) {
5135 			case PF_RULESET_ETH:
5136 				ers = pf_find_keth_ruleset(ioe->anchor);
5137 				if (ers == NULL || ioe->ticket == 0 ||
5138 				    ioe->ticket != ers->inactive.ticket) {
5139 					PF_RULES_WUNLOCK();
5140 					free(ioes, M_TEMP);
5141 					error = EINVAL;
5142 					goto fail;
5143 				}
5144 				break;
5145 #ifdef ALTQ
5146 			case PF_RULESET_ALTQ:
5147 				if (ioe->anchor[0]) {
5148 					PF_RULES_WUNLOCK();
5149 					free(ioes, M_TEMP);
5150 					error = EINVAL;
5151 					goto fail;
5152 				}
5153 				if (!V_altqs_inactive_open || ioe->ticket !=
5154 				    V_ticket_altqs_inactive) {
5155 					PF_RULES_WUNLOCK();
5156 					free(ioes, M_TEMP);
5157 					error = EBUSY;
5158 					goto fail;
5159 				}
5160 				break;
5161 #endif /* ALTQ */
5162 			case PF_RULESET_TABLE:
5163 				rs = pf_find_kruleset(ioe->anchor);
5164 				if (rs == NULL || !rs->topen || ioe->ticket !=
5165 				    rs->tticket) {
5166 					PF_RULES_WUNLOCK();
5167 					free(ioes, M_TEMP);
5168 					error = EBUSY;
5169 					goto fail;
5170 				}
5171 				break;
5172 			default:
5173 				if (ioe->rs_num < 0 || ioe->rs_num >=
5174 				    PF_RULESET_MAX) {
5175 					PF_RULES_WUNLOCK();
5176 					free(ioes, M_TEMP);
5177 					error = EINVAL;
5178 					goto fail;
5179 				}
5180 				rs = pf_find_kruleset(ioe->anchor);
5181 				if (rs == NULL ||
5182 				    !rs->rules[ioe->rs_num].inactive.open ||
5183 				    rs->rules[ioe->rs_num].inactive.ticket !=
5184 				    ioe->ticket) {
5185 					PF_RULES_WUNLOCK();
5186 					free(ioes, M_TEMP);
5187 					error = EBUSY;
5188 					goto fail;
5189 				}
5190 				break;
5191 			}
5192 		}
5193 		/* Now do the commit - no errors should happen here. */
5194 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5195 			switch (ioe->rs_num) {
5196 			case PF_RULESET_ETH:
5197 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5198 					PF_RULES_WUNLOCK();
5199 					free(ioes, M_TEMP);
5200 					goto fail; /* really bad */
5201 				}
5202 				break;
5203 #ifdef ALTQ
5204 			case PF_RULESET_ALTQ:
5205 				if ((error = pf_commit_altq(ioe->ticket))) {
5206 					PF_RULES_WUNLOCK();
5207 					free(ioes, M_TEMP);
5208 					goto fail; /* really bad */
5209 				}
5210 				break;
5211 #endif /* ALTQ */
5212 			case PF_RULESET_TABLE:
5213 			    {
5214 				struct pfr_table table;
5215 
5216 				bzero(&table, sizeof(table));
5217 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5218 				    sizeof(table.pfrt_anchor));
5219 				if ((error = pfr_ina_commit(&table,
5220 				    ioe->ticket, NULL, NULL, 0))) {
5221 					PF_RULES_WUNLOCK();
5222 					free(ioes, M_TEMP);
5223 					goto fail; /* really bad */
5224 				}
5225 				break;
5226 			    }
5227 			default:
5228 				if ((error = pf_commit_rules(ioe->ticket,
5229 				    ioe->rs_num, ioe->anchor))) {
5230 					PF_RULES_WUNLOCK();
5231 					free(ioes, M_TEMP);
5232 					goto fail; /* really bad */
5233 				}
5234 				break;
5235 			}
5236 		}
5237 		PF_RULES_WUNLOCK();
5238 
5239 		/* Only hook into EtherNet taffic if we've got rules for it. */
5240 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5241 			hook_pf_eth();
5242 		else
5243 			dehook_pf_eth();
5244 
5245 		free(ioes, M_TEMP);
5246 		break;
5247 	}
5248 
5249 	case DIOCGETSRCNODES: {
5250 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5251 		struct pf_srchash	*sh;
5252 		struct pf_ksrc_node	*n;
5253 		struct pf_src_node	*p, *pstore;
5254 		uint32_t		 i, nr = 0;
5255 
5256 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5257 				i++, sh++) {
5258 			PF_HASHROW_LOCK(sh);
5259 			LIST_FOREACH(n, &sh->nodes, entry)
5260 				nr++;
5261 			PF_HASHROW_UNLOCK(sh);
5262 		}
5263 
5264 		psn->psn_len = min(psn->psn_len,
5265 		    sizeof(struct pf_src_node) * nr);
5266 
5267 		if (psn->psn_len == 0) {
5268 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5269 			break;
5270 		}
5271 
5272 		nr = 0;
5273 
5274 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5275 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5276 		    i++, sh++) {
5277 		    PF_HASHROW_LOCK(sh);
5278 		    LIST_FOREACH(n, &sh->nodes, entry) {
5279 
5280 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5281 				break;
5282 
5283 			pf_src_node_copy(n, p);
5284 
5285 			p++;
5286 			nr++;
5287 		    }
5288 		    PF_HASHROW_UNLOCK(sh);
5289 		}
5290 		error = copyout(pstore, psn->psn_src_nodes,
5291 		    sizeof(struct pf_src_node) * nr);
5292 		if (error) {
5293 			free(pstore, M_TEMP);
5294 			break;
5295 		}
5296 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5297 		free(pstore, M_TEMP);
5298 		break;
5299 	}
5300 
5301 	case DIOCCLRSRCNODES: {
5302 		pf_clear_srcnodes(NULL);
5303 		pf_purge_expired_src_nodes();
5304 		break;
5305 	}
5306 
5307 	case DIOCKILLSRCNODES:
5308 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5309 		break;
5310 
5311 #ifdef COMPAT_FREEBSD13
5312 	case DIOCKEEPCOUNTERS_FREEBSD13:
5313 #endif
5314 	case DIOCKEEPCOUNTERS:
5315 		error = pf_keepcounters((struct pfioc_nv *)addr);
5316 		break;
5317 
5318 	case DIOCGETSYNCOOKIES:
5319 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5320 		break;
5321 
5322 	case DIOCSETSYNCOOKIES:
5323 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5324 		break;
5325 
5326 	case DIOCSETHOSTID: {
5327 		u_int32_t	*hostid = (u_int32_t *)addr;
5328 
5329 		PF_RULES_WLOCK();
5330 		if (*hostid == 0)
5331 			V_pf_status.hostid = arc4random();
5332 		else
5333 			V_pf_status.hostid = *hostid;
5334 		PF_RULES_WUNLOCK();
5335 		break;
5336 	}
5337 
5338 	case DIOCOSFPFLUSH:
5339 		PF_RULES_WLOCK();
5340 		pf_osfp_flush();
5341 		PF_RULES_WUNLOCK();
5342 		break;
5343 
5344 	case DIOCIGETIFACES: {
5345 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5346 		struct pfi_kif *ifstore;
5347 		size_t bufsiz;
5348 
5349 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5350 			error = ENODEV;
5351 			break;
5352 		}
5353 
5354 		if (io->pfiio_size < 0 ||
5355 		    io->pfiio_size > pf_ioctl_maxcount ||
5356 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5357 			error = EINVAL;
5358 			break;
5359 		}
5360 
5361 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5362 
5363 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5364 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5365 		    M_TEMP, M_WAITOK | M_ZERO);
5366 
5367 		PF_RULES_RLOCK();
5368 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5369 		PF_RULES_RUNLOCK();
5370 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5371 		free(ifstore, M_TEMP);
5372 		break;
5373 	}
5374 
5375 	case DIOCSETIFFLAG: {
5376 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5377 
5378 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5379 
5380 		PF_RULES_WLOCK();
5381 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5382 		PF_RULES_WUNLOCK();
5383 		break;
5384 	}
5385 
5386 	case DIOCCLRIFFLAG: {
5387 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5388 
5389 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5390 
5391 		PF_RULES_WLOCK();
5392 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5393 		PF_RULES_WUNLOCK();
5394 		break;
5395 	}
5396 
5397 	case DIOCSETREASS: {
5398 		u_int32_t	*reass = (u_int32_t *)addr;
5399 
5400 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5401 		/* Removal of DF flag without reassembly enabled is not a
5402 		 * valid combination. Disable reassembly in such case. */
5403 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5404 			V_pf_status.reass = 0;
5405 		break;
5406 	}
5407 
5408 	default:
5409 		error = ENODEV;
5410 		break;
5411 	}
5412 fail:
5413 	if (sx_xlocked(&V_pf_ioctl_lock))
5414 		sx_xunlock(&V_pf_ioctl_lock);
5415 	CURVNET_RESTORE();
5416 
5417 #undef ERROUT_IOCTL
5418 
5419 	return (error);
5420 }
5421 
5422 void
5423 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5424 {
5425 	bzero(sp, sizeof(union pfsync_state_union));
5426 
5427 	/* copy from state key */
5428 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5429 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5430 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5431 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5432 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5433 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5434 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5435 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5436 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5437 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5438 
5439 	/* copy from state */
5440 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5441 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5442 	sp->pfs_1301.creation = htonl(time_uptime - st->creation);
5443 	sp->pfs_1301.expire = pf_state_expires(st);
5444 	if (sp->pfs_1301.expire <= time_uptime)
5445 		sp->pfs_1301.expire = htonl(0);
5446 	else
5447 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5448 
5449 	sp->pfs_1301.direction = st->direction;
5450 	sp->pfs_1301.log = st->act.log;
5451 	sp->pfs_1301.timeout = st->timeout;
5452 
5453 	switch (msg_version) {
5454 		case PFSYNC_MSG_VERSION_1301:
5455 			sp->pfs_1301.state_flags = st->state_flags;
5456 			break;
5457 		case PFSYNC_MSG_VERSION_1400:
5458 			sp->pfs_1400.state_flags = htons(st->state_flags);
5459 			sp->pfs_1400.qid = htons(st->act.qid);
5460 			sp->pfs_1400.pqid = htons(st->act.pqid);
5461 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5462 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5463 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5464 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5465 			sp->pfs_1400.set_tos = st->act.set_tos;
5466 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5467 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5468 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5469 			sp->pfs_1400.rt = st->rt;
5470 			if (st->rt_kif)
5471 				strlcpy(sp->pfs_1400.rt_ifname,
5472 				    st->rt_kif->pfik_name,
5473 				    sizeof(sp->pfs_1400.rt_ifname));
5474 			break;
5475 		default:
5476 			panic("%s: Unsupported pfsync_msg_version %d",
5477 			    __func__, msg_version);
5478 	}
5479 
5480 	if (st->src_node)
5481 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5482 	if (st->nat_src_node)
5483 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5484 
5485 	sp->pfs_1301.id = st->id;
5486 	sp->pfs_1301.creatorid = st->creatorid;
5487 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5488 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5489 
5490 	if (st->rule.ptr == NULL)
5491 		sp->pfs_1301.rule = htonl(-1);
5492 	else
5493 		sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5494 	if (st->anchor.ptr == NULL)
5495 		sp->pfs_1301.anchor = htonl(-1);
5496 	else
5497 		sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5498 	if (st->nat_rule.ptr == NULL)
5499 		sp->pfs_1301.nat_rule = htonl(-1);
5500 	else
5501 		sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5502 
5503 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5504 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5505 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5506 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5507 }
5508 
5509 void
5510 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5511 {
5512 	bzero(sp, sizeof(*sp));
5513 
5514 	sp->version = PF_STATE_VERSION;
5515 
5516 	/* copy from state key */
5517 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5518 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5519 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5520 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5521 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5522 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5523 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5524 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5525 	sp->proto = st->key[PF_SK_WIRE]->proto;
5526 	sp->af = st->key[PF_SK_WIRE]->af;
5527 
5528 	/* copy from state */
5529 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5530 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5531 	    sizeof(sp->orig_ifname));
5532 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5533 	sp->creation = htonl(time_uptime - st->creation);
5534 	sp->expire = pf_state_expires(st);
5535 	if (sp->expire <= time_uptime)
5536 		sp->expire = htonl(0);
5537 	else
5538 		sp->expire = htonl(sp->expire - time_uptime);
5539 
5540 	sp->direction = st->direction;
5541 	sp->log = st->act.log;
5542 	sp->timeout = st->timeout;
5543 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5544 	sp->state_flags_compat = st->state_flags;
5545 	sp->state_flags = htons(st->state_flags);
5546 	if (st->src_node)
5547 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5548 	if (st->nat_src_node)
5549 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5550 
5551 	sp->id = st->id;
5552 	sp->creatorid = st->creatorid;
5553 	pf_state_peer_hton(&st->src, &sp->src);
5554 	pf_state_peer_hton(&st->dst, &sp->dst);
5555 
5556 	if (st->rule.ptr == NULL)
5557 		sp->rule = htonl(-1);
5558 	else
5559 		sp->rule = htonl(st->rule.ptr->nr);
5560 	if (st->anchor.ptr == NULL)
5561 		sp->anchor = htonl(-1);
5562 	else
5563 		sp->anchor = htonl(st->anchor.ptr->nr);
5564 	if (st->nat_rule.ptr == NULL)
5565 		sp->nat_rule = htonl(-1);
5566 	else
5567 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5568 
5569 	sp->packets[0] = st->packets[0];
5570 	sp->packets[1] = st->packets[1];
5571 	sp->bytes[0] = st->bytes[0];
5572 	sp->bytes[1] = st->bytes[1];
5573 
5574 	sp->qid = htons(st->act.qid);
5575 	sp->pqid = htons(st->act.pqid);
5576 	sp->dnpipe = htons(st->act.dnpipe);
5577 	sp->dnrpipe = htons(st->act.dnrpipe);
5578 	sp->rtableid = htonl(st->act.rtableid);
5579 	sp->min_ttl = st->act.min_ttl;
5580 	sp->set_tos = st->act.set_tos;
5581 	sp->max_mss = htons(st->act.max_mss);
5582 	sp->rt = st->rt;
5583 	if (st->rt_kif)
5584 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5585 		    sizeof(sp->rt_ifname));
5586 	sp->set_prio[0] = st->act.set_prio[0];
5587 	sp->set_prio[1] = st->act.set_prio[1];
5588 
5589 }
5590 
5591 static void
5592 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5593 {
5594 	struct pfr_ktable *kt;
5595 
5596 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5597 
5598 	kt = aw->p.tbl;
5599 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5600 		kt = kt->pfrkt_root;
5601 	aw->p.tbl = NULL;
5602 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5603 		kt->pfrkt_cnt : -1;
5604 }
5605 
5606 static int
5607 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5608     size_t number, char **names)
5609 {
5610 	nvlist_t        *nvc;
5611 
5612 	nvc = nvlist_create(0);
5613 	if (nvc == NULL)
5614 		return (ENOMEM);
5615 
5616 	for (int i = 0; i < number; i++) {
5617 		nvlist_append_number_array(nvc, "counters",
5618 		    counter_u64_fetch(counters[i]));
5619 		nvlist_append_string_array(nvc, "names",
5620 		    names[i]);
5621 		nvlist_append_number_array(nvc, "ids",
5622 		    i);
5623 	}
5624 	nvlist_add_nvlist(nvl, name, nvc);
5625 	nvlist_destroy(nvc);
5626 
5627 	return (0);
5628 }
5629 
5630 static int
5631 pf_getstatus(struct pfioc_nv *nv)
5632 {
5633 	nvlist_t        *nvl = NULL, *nvc = NULL;
5634 	void            *nvlpacked = NULL;
5635 	int              error;
5636 	struct pf_status s;
5637 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5638 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5639 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5640 	PF_RULES_RLOCK_TRACKER;
5641 
5642 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5643 
5644 	PF_RULES_RLOCK();
5645 
5646 	nvl = nvlist_create(0);
5647 	if (nvl == NULL)
5648 		ERROUT(ENOMEM);
5649 
5650 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5651 	nvlist_add_number(nvl, "since", V_pf_status.since);
5652 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5653 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5654 	nvlist_add_number(nvl, "states", V_pf_status.states);
5655 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5656 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5657 	nvlist_add_bool(nvl, "syncookies_active",
5658 	    V_pf_status.syncookies_active);
5659 
5660 	/* counters */
5661 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5662 	    PFRES_MAX, pf_reasons);
5663 	if (error != 0)
5664 		ERROUT(error);
5665 
5666 	/* lcounters */
5667 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5668 	    KLCNT_MAX, pf_lcounter);
5669 	if (error != 0)
5670 		ERROUT(error);
5671 
5672 	/* fcounters */
5673 	nvc = nvlist_create(0);
5674 	if (nvc == NULL)
5675 		ERROUT(ENOMEM);
5676 
5677 	for (int i = 0; i < FCNT_MAX; i++) {
5678 		nvlist_append_number_array(nvc, "counters",
5679 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5680 		nvlist_append_string_array(nvc, "names",
5681 		    pf_fcounter[i]);
5682 		nvlist_append_number_array(nvc, "ids",
5683 		    i);
5684 	}
5685 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5686 	nvlist_destroy(nvc);
5687 	nvc = NULL;
5688 
5689 	/* scounters */
5690 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5691 	    SCNT_MAX, pf_fcounter);
5692 	if (error != 0)
5693 		ERROUT(error);
5694 
5695 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5696 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5697 	    PF_MD5_DIGEST_LENGTH);
5698 
5699 	pfi_update_status(V_pf_status.ifname, &s);
5700 
5701 	/* pcounters / bcounters */
5702 	for (int i = 0; i < 2; i++) {
5703 		for (int j = 0; j < 2; j++) {
5704 			for (int k = 0; k < 2; k++) {
5705 				nvlist_append_number_array(nvl, "pcounters",
5706 				    s.pcounters[i][j][k]);
5707 			}
5708 			nvlist_append_number_array(nvl, "bcounters",
5709 			    s.bcounters[i][j]);
5710 		}
5711 	}
5712 
5713 	nvlpacked = nvlist_pack(nvl, &nv->len);
5714 	if (nvlpacked == NULL)
5715 		ERROUT(ENOMEM);
5716 
5717 	if (nv->size == 0)
5718 		ERROUT(0);
5719 	else if (nv->size < nv->len)
5720 		ERROUT(ENOSPC);
5721 
5722 	PF_RULES_RUNLOCK();
5723 	error = copyout(nvlpacked, nv->data, nv->len);
5724 	goto done;
5725 
5726 #undef ERROUT
5727 errout:
5728 	PF_RULES_RUNLOCK();
5729 done:
5730 	free(nvlpacked, M_NVLIST);
5731 	nvlist_destroy(nvc);
5732 	nvlist_destroy(nvl);
5733 
5734 	return (error);
5735 }
5736 
5737 /*
5738  * XXX - Check for version mismatch!!!
5739  */
5740 static void
5741 pf_clear_all_states(void)
5742 {
5743 	struct pf_kstate	*s;
5744 	u_int i;
5745 
5746 	for (i = 0; i <= pf_hashmask; i++) {
5747 		struct pf_idhash *ih = &V_pf_idhash[i];
5748 relock:
5749 		PF_HASHROW_LOCK(ih);
5750 		LIST_FOREACH(s, &ih->states, entry) {
5751 			s->timeout = PFTM_PURGE;
5752 			/* Don't send out individual delete messages. */
5753 			s->state_flags |= PFSTATE_NOSYNC;
5754 			pf_unlink_state(s);
5755 			goto relock;
5756 		}
5757 		PF_HASHROW_UNLOCK(ih);
5758 	}
5759 }
5760 
5761 static int
5762 pf_clear_tables(void)
5763 {
5764 	struct pfioc_table io;
5765 	int error;
5766 
5767 	bzero(&io, sizeof(io));
5768 
5769 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5770 	    io.pfrio_flags);
5771 
5772 	return (error);
5773 }
5774 
5775 static void
5776 pf_clear_srcnodes(struct pf_ksrc_node *n)
5777 {
5778 	struct pf_kstate *s;
5779 	int i;
5780 
5781 	for (i = 0; i <= pf_hashmask; i++) {
5782 		struct pf_idhash *ih = &V_pf_idhash[i];
5783 
5784 		PF_HASHROW_LOCK(ih);
5785 		LIST_FOREACH(s, &ih->states, entry) {
5786 			if (n == NULL || n == s->src_node)
5787 				s->src_node = NULL;
5788 			if (n == NULL || n == s->nat_src_node)
5789 				s->nat_src_node = NULL;
5790 		}
5791 		PF_HASHROW_UNLOCK(ih);
5792 	}
5793 
5794 	if (n == NULL) {
5795 		struct pf_srchash *sh;
5796 
5797 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5798 		    i++, sh++) {
5799 			PF_HASHROW_LOCK(sh);
5800 			LIST_FOREACH(n, &sh->nodes, entry) {
5801 				n->expire = 1;
5802 				n->states = 0;
5803 			}
5804 			PF_HASHROW_UNLOCK(sh);
5805 		}
5806 	} else {
5807 		/* XXX: hash slot should already be locked here. */
5808 		n->expire = 1;
5809 		n->states = 0;
5810 	}
5811 }
5812 
5813 static void
5814 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5815 {
5816 	struct pf_ksrc_node_list	 kill;
5817 
5818 	LIST_INIT(&kill);
5819 	for (int i = 0; i <= pf_srchashmask; i++) {
5820 		struct pf_srchash *sh = &V_pf_srchash[i];
5821 		struct pf_ksrc_node *sn, *tmp;
5822 
5823 		PF_HASHROW_LOCK(sh);
5824 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5825 			if (PF_MATCHA(psnk->psnk_src.neg,
5826 			      &psnk->psnk_src.addr.v.a.addr,
5827 			      &psnk->psnk_src.addr.v.a.mask,
5828 			      &sn->addr, sn->af) &&
5829 			    PF_MATCHA(psnk->psnk_dst.neg,
5830 			      &psnk->psnk_dst.addr.v.a.addr,
5831 			      &psnk->psnk_dst.addr.v.a.mask,
5832 			      &sn->raddr, sn->af)) {
5833 				pf_unlink_src_node(sn);
5834 				LIST_INSERT_HEAD(&kill, sn, entry);
5835 				sn->expire = 1;
5836 			}
5837 		PF_HASHROW_UNLOCK(sh);
5838 	}
5839 
5840 	for (int i = 0; i <= pf_hashmask; i++) {
5841 		struct pf_idhash *ih = &V_pf_idhash[i];
5842 		struct pf_kstate *s;
5843 
5844 		PF_HASHROW_LOCK(ih);
5845 		LIST_FOREACH(s, &ih->states, entry) {
5846 			if (s->src_node && s->src_node->expire == 1)
5847 				s->src_node = NULL;
5848 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5849 				s->nat_src_node = NULL;
5850 		}
5851 		PF_HASHROW_UNLOCK(ih);
5852 	}
5853 
5854 	psnk->psnk_killed = pf_free_src_nodes(&kill);
5855 }
5856 
5857 static int
5858 pf_keepcounters(struct pfioc_nv *nv)
5859 {
5860 	nvlist_t	*nvl = NULL;
5861 	void		*nvlpacked = NULL;
5862 	int		 error = 0;
5863 
5864 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5865 
5866 	if (nv->len > pf_ioctl_maxcount)
5867 		ERROUT(ENOMEM);
5868 
5869 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
5870 	if (nvlpacked == NULL)
5871 		ERROUT(ENOMEM);
5872 
5873 	error = copyin(nv->data, nvlpacked, nv->len);
5874 	if (error)
5875 		ERROUT(error);
5876 
5877 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5878 	if (nvl == NULL)
5879 		ERROUT(EBADMSG);
5880 
5881 	if (! nvlist_exists_bool(nvl, "keep_counters"))
5882 		ERROUT(EBADMSG);
5883 
5884 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5885 
5886 on_error:
5887 	nvlist_destroy(nvl);
5888 	free(nvlpacked, M_NVLIST);
5889 	return (error);
5890 }
5891 
5892 static unsigned int
5893 pf_clear_states(const struct pf_kstate_kill *kill)
5894 {
5895 	struct pf_state_key_cmp	 match_key;
5896 	struct pf_kstate	*s;
5897 	struct pfi_kkif	*kif;
5898 	int		 idx;
5899 	unsigned int	 killed = 0, dir;
5900 
5901 	for (unsigned int i = 0; i <= pf_hashmask; i++) {
5902 		struct pf_idhash *ih = &V_pf_idhash[i];
5903 
5904 relock_DIOCCLRSTATES:
5905 		PF_HASHROW_LOCK(ih);
5906 		LIST_FOREACH(s, &ih->states, entry) {
5907 			/* For floating states look at the original kif. */
5908 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
5909 
5910 			if (kill->psk_ifname[0] &&
5911 			    strcmp(kill->psk_ifname,
5912 			    kif->pfik_name))
5913 				continue;
5914 
5915 			if (kill->psk_kill_match) {
5916 				bzero(&match_key, sizeof(match_key));
5917 
5918 				if (s->direction == PF_OUT) {
5919 					dir = PF_IN;
5920 					idx = PF_SK_STACK;
5921 				} else {
5922 					dir = PF_OUT;
5923 					idx = PF_SK_WIRE;
5924 				}
5925 
5926 				match_key.af = s->key[idx]->af;
5927 				match_key.proto = s->key[idx]->proto;
5928 				PF_ACPY(&match_key.addr[0],
5929 				    &s->key[idx]->addr[1], match_key.af);
5930 				match_key.port[0] = s->key[idx]->port[1];
5931 				PF_ACPY(&match_key.addr[1],
5932 				    &s->key[idx]->addr[0], match_key.af);
5933 				match_key.port[1] = s->key[idx]->port[0];
5934 			}
5935 
5936 			/*
5937 			 * Don't send out individual
5938 			 * delete messages.
5939 			 */
5940 			s->state_flags |= PFSTATE_NOSYNC;
5941 			pf_unlink_state(s);
5942 			killed++;
5943 
5944 			if (kill->psk_kill_match)
5945 				killed += pf_kill_matching_state(&match_key,
5946 				    dir);
5947 
5948 			goto relock_DIOCCLRSTATES;
5949 		}
5950 		PF_HASHROW_UNLOCK(ih);
5951 	}
5952 
5953 	if (V_pfsync_clear_states_ptr != NULL)
5954 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
5955 
5956 	return (killed);
5957 }
5958 
5959 static void
5960 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
5961 {
5962 	struct pf_kstate	*s;
5963 
5964 	if (kill->psk_pfcmp.id) {
5965 		if (kill->psk_pfcmp.creatorid == 0)
5966 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
5967 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
5968 		    kill->psk_pfcmp.creatorid))) {
5969 			pf_unlink_state(s);
5970 			*killed = 1;
5971 		}
5972 		return;
5973 	}
5974 
5975 	for (unsigned int i = 0; i <= pf_hashmask; i++)
5976 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
5977 
5978 	return;
5979 }
5980 
5981 static int
5982 pf_killstates_nv(struct pfioc_nv *nv)
5983 {
5984 	struct pf_kstate_kill	 kill;
5985 	nvlist_t		*nvl = NULL;
5986 	void			*nvlpacked = NULL;
5987 	int			 error = 0;
5988 	unsigned int		 killed = 0;
5989 
5990 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5991 
5992 	if (nv->len > pf_ioctl_maxcount)
5993 		ERROUT(ENOMEM);
5994 
5995 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
5996 	if (nvlpacked == NULL)
5997 		ERROUT(ENOMEM);
5998 
5999 	error = copyin(nv->data, nvlpacked, nv->len);
6000 	if (error)
6001 		ERROUT(error);
6002 
6003 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6004 	if (nvl == NULL)
6005 		ERROUT(EBADMSG);
6006 
6007 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6008 	if (error)
6009 		ERROUT(error);
6010 
6011 	pf_killstates(&kill, &killed);
6012 
6013 	free(nvlpacked, M_NVLIST);
6014 	nvlpacked = NULL;
6015 	nvlist_destroy(nvl);
6016 	nvl = nvlist_create(0);
6017 	if (nvl == NULL)
6018 		ERROUT(ENOMEM);
6019 
6020 	nvlist_add_number(nvl, "killed", killed);
6021 
6022 	nvlpacked = nvlist_pack(nvl, &nv->len);
6023 	if (nvlpacked == NULL)
6024 		ERROUT(ENOMEM);
6025 
6026 	if (nv->size == 0)
6027 		ERROUT(0);
6028 	else if (nv->size < nv->len)
6029 		ERROUT(ENOSPC);
6030 
6031 	error = copyout(nvlpacked, nv->data, nv->len);
6032 
6033 on_error:
6034 	nvlist_destroy(nvl);
6035 	free(nvlpacked, M_NVLIST);
6036 	return (error);
6037 }
6038 
6039 static int
6040 pf_clearstates_nv(struct pfioc_nv *nv)
6041 {
6042 	struct pf_kstate_kill	 kill;
6043 	nvlist_t		*nvl = NULL;
6044 	void			*nvlpacked = NULL;
6045 	int			 error = 0;
6046 	unsigned int		 killed;
6047 
6048 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6049 
6050 	if (nv->len > pf_ioctl_maxcount)
6051 		ERROUT(ENOMEM);
6052 
6053 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6054 	if (nvlpacked == NULL)
6055 		ERROUT(ENOMEM);
6056 
6057 	error = copyin(nv->data, nvlpacked, nv->len);
6058 	if (error)
6059 		ERROUT(error);
6060 
6061 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6062 	if (nvl == NULL)
6063 		ERROUT(EBADMSG);
6064 
6065 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6066 	if (error)
6067 		ERROUT(error);
6068 
6069 	killed = pf_clear_states(&kill);
6070 
6071 	free(nvlpacked, M_NVLIST);
6072 	nvlpacked = NULL;
6073 	nvlist_destroy(nvl);
6074 	nvl = nvlist_create(0);
6075 	if (nvl == NULL)
6076 		ERROUT(ENOMEM);
6077 
6078 	nvlist_add_number(nvl, "killed", killed);
6079 
6080 	nvlpacked = nvlist_pack(nvl, &nv->len);
6081 	if (nvlpacked == NULL)
6082 		ERROUT(ENOMEM);
6083 
6084 	if (nv->size == 0)
6085 		ERROUT(0);
6086 	else if (nv->size < nv->len)
6087 		ERROUT(ENOSPC);
6088 
6089 	error = copyout(nvlpacked, nv->data, nv->len);
6090 
6091 #undef ERROUT
6092 on_error:
6093 	nvlist_destroy(nvl);
6094 	free(nvlpacked, M_NVLIST);
6095 	return (error);
6096 }
6097 
6098 static int
6099 pf_getstate(struct pfioc_nv *nv)
6100 {
6101 	nvlist_t		*nvl = NULL, *nvls;
6102 	void			*nvlpacked = NULL;
6103 	struct pf_kstate	*s = NULL;
6104 	int			 error = 0;
6105 	uint64_t		 id, creatorid;
6106 
6107 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6108 
6109 	if (nv->len > pf_ioctl_maxcount)
6110 		ERROUT(ENOMEM);
6111 
6112 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6113 	if (nvlpacked == NULL)
6114 		ERROUT(ENOMEM);
6115 
6116 	error = copyin(nv->data, nvlpacked, nv->len);
6117 	if (error)
6118 		ERROUT(error);
6119 
6120 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6121 	if (nvl == NULL)
6122 		ERROUT(EBADMSG);
6123 
6124 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6125 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6126 
6127 	s = pf_find_state_byid(id, creatorid);
6128 	if (s == NULL)
6129 		ERROUT(ENOENT);
6130 
6131 	free(nvlpacked, M_NVLIST);
6132 	nvlpacked = NULL;
6133 	nvlist_destroy(nvl);
6134 	nvl = nvlist_create(0);
6135 	if (nvl == NULL)
6136 		ERROUT(ENOMEM);
6137 
6138 	nvls = pf_state_to_nvstate(s);
6139 	if (nvls == NULL)
6140 		ERROUT(ENOMEM);
6141 
6142 	nvlist_add_nvlist(nvl, "state", nvls);
6143 	nvlist_destroy(nvls);
6144 
6145 	nvlpacked = nvlist_pack(nvl, &nv->len);
6146 	if (nvlpacked == NULL)
6147 		ERROUT(ENOMEM);
6148 
6149 	if (nv->size == 0)
6150 		ERROUT(0);
6151 	else if (nv->size < nv->len)
6152 		ERROUT(ENOSPC);
6153 
6154 	error = copyout(nvlpacked, nv->data, nv->len);
6155 
6156 #undef ERROUT
6157 errout:
6158 	if (s != NULL)
6159 		PF_STATE_UNLOCK(s);
6160 	free(nvlpacked, M_NVLIST);
6161 	nvlist_destroy(nvl);
6162 	return (error);
6163 }
6164 
6165 /*
6166  * XXX - Check for version mismatch!!!
6167  */
6168 
6169 /*
6170  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6171  */
6172 static int
6173 shutdown_pf(void)
6174 {
6175 	int error = 0;
6176 	u_int32_t t[5];
6177 	char nn = '\0';
6178 
6179 	do {
6180 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6181 		    != 0) {
6182 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6183 			break;
6184 		}
6185 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6186 		    != 0) {
6187 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6188 			break;		/* XXX: rollback? */
6189 		}
6190 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6191 		    != 0) {
6192 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6193 			break;		/* XXX: rollback? */
6194 		}
6195 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6196 		    != 0) {
6197 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6198 			break;		/* XXX: rollback? */
6199 		}
6200 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6201 		    != 0) {
6202 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6203 			break;		/* XXX: rollback? */
6204 		}
6205 
6206 		/* XXX: these should always succeed here */
6207 		pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6208 		pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6209 		pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6210 		pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6211 		pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6212 
6213 		if ((error = pf_clear_tables()) != 0)
6214 			break;
6215 
6216 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6217 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6218 			break;
6219 		}
6220 		pf_commit_eth(t[0], &nn);
6221 
6222 #ifdef ALTQ
6223 		if ((error = pf_begin_altq(&t[0])) != 0) {
6224 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6225 			break;
6226 		}
6227 		pf_commit_altq(t[0]);
6228 #endif
6229 
6230 		pf_clear_all_states();
6231 
6232 		pf_clear_srcnodes(NULL);
6233 
6234 		/* status does not use malloced mem so no need to cleanup */
6235 		/* fingerprints and interfaces have their own cleanup code */
6236 	} while(0);
6237 
6238 	return (error);
6239 }
6240 
6241 static pfil_return_t
6242 pf_check_return(int chk, struct mbuf **m)
6243 {
6244 
6245 	switch (chk) {
6246 	case PF_PASS:
6247 		if (*m == NULL)
6248 			return (PFIL_CONSUMED);
6249 		else
6250 			return (PFIL_PASS);
6251 		break;
6252 	default:
6253 		if (*m != NULL) {
6254 			m_freem(*m);
6255 			*m = NULL;
6256 		}
6257 		return (PFIL_DROPPED);
6258 	}
6259 }
6260 
6261 static pfil_return_t
6262 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6263     void *ruleset __unused, struct inpcb *inp)
6264 {
6265 	int chk;
6266 
6267 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6268 
6269 	return (pf_check_return(chk, m));
6270 }
6271 
6272 static pfil_return_t
6273 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6274     void *ruleset __unused, struct inpcb *inp)
6275 {
6276 	int chk;
6277 
6278 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6279 
6280 	return (pf_check_return(chk, m));
6281 }
6282 
6283 #ifdef INET
6284 static pfil_return_t
6285 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6286     void *ruleset __unused, struct inpcb *inp)
6287 {
6288 	int chk;
6289 
6290 	chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6291 
6292 	return (pf_check_return(chk, m));
6293 }
6294 
6295 static pfil_return_t
6296 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6297     void *ruleset __unused,  struct inpcb *inp)
6298 {
6299 	int chk;
6300 
6301 	chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6302 
6303 	return (pf_check_return(chk, m));
6304 }
6305 #endif
6306 
6307 #ifdef INET6
6308 static pfil_return_t
6309 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6310     void *ruleset __unused,  struct inpcb *inp)
6311 {
6312 	int chk;
6313 
6314 	/*
6315 	 * In case of loopback traffic IPv6 uses the real interface in
6316 	 * order to support scoped addresses. In order to support stateful
6317 	 * filtering we have change this to lo0 as it is the case in IPv4.
6318 	 */
6319 	CURVNET_SET(ifp->if_vnet);
6320 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6321 	    m, inp, NULL);
6322 	CURVNET_RESTORE();
6323 
6324 	return (pf_check_return(chk, m));
6325 }
6326 
6327 static pfil_return_t
6328 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6329     void *ruleset __unused,  struct inpcb *inp)
6330 {
6331 	int chk;
6332 
6333 	CURVNET_SET(ifp->if_vnet);
6334 	chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6335 	CURVNET_RESTORE();
6336 
6337 	return (pf_check_return(chk, m));
6338 }
6339 #endif /* INET6 */
6340 
6341 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6342 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6343 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6344 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6345 
6346 #ifdef INET
6347 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6348 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6349 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6350 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6351 #endif
6352 #ifdef INET6
6353 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6354 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6355 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6356 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6357 #endif
6358 
6359 static void
6360 hook_pf_eth(void)
6361 {
6362 	struct pfil_hook_args pha = {
6363 		.pa_version = PFIL_VERSION,
6364 		.pa_modname = "pf",
6365 		.pa_type = PFIL_TYPE_ETHERNET,
6366 	};
6367 	struct pfil_link_args pla = {
6368 		.pa_version = PFIL_VERSION,
6369 	};
6370 	int ret __diagused;
6371 
6372 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6373 		return;
6374 
6375 	pha.pa_mbuf_chk = pf_eth_check_in;
6376 	pha.pa_flags = PFIL_IN;
6377 	pha.pa_rulname = "eth-in";
6378 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6379 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6380 	pla.pa_head = V_link_pfil_head;
6381 	pla.pa_hook = V_pf_eth_in_hook;
6382 	ret = pfil_link(&pla);
6383 	MPASS(ret == 0);
6384 	pha.pa_mbuf_chk = pf_eth_check_out;
6385 	pha.pa_flags = PFIL_OUT;
6386 	pha.pa_rulname = "eth-out";
6387 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6388 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6389 	pla.pa_head = V_link_pfil_head;
6390 	pla.pa_hook = V_pf_eth_out_hook;
6391 	ret = pfil_link(&pla);
6392 	MPASS(ret == 0);
6393 
6394 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6395 }
6396 
6397 static void
6398 hook_pf(void)
6399 {
6400 	struct pfil_hook_args pha = {
6401 		.pa_version = PFIL_VERSION,
6402 		.pa_modname = "pf",
6403 	};
6404 	struct pfil_link_args pla = {
6405 		.pa_version = PFIL_VERSION,
6406 	};
6407 	int ret __diagused;
6408 
6409 	if (atomic_load_bool(&V_pf_pfil_hooked))
6410 		return;
6411 
6412 #ifdef INET
6413 	pha.pa_type = PFIL_TYPE_IP4;
6414 	pha.pa_mbuf_chk = pf_check_in;
6415 	pha.pa_flags = PFIL_IN;
6416 	pha.pa_rulname = "default-in";
6417 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6418 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6419 	pla.pa_head = V_inet_pfil_head;
6420 	pla.pa_hook = V_pf_ip4_in_hook;
6421 	ret = pfil_link(&pla);
6422 	MPASS(ret == 0);
6423 	pha.pa_mbuf_chk = pf_check_out;
6424 	pha.pa_flags = PFIL_OUT;
6425 	pha.pa_rulname = "default-out";
6426 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6427 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6428 	pla.pa_head = V_inet_pfil_head;
6429 	pla.pa_hook = V_pf_ip4_out_hook;
6430 	ret = pfil_link(&pla);
6431 	MPASS(ret == 0);
6432 	if (V_pf_filter_local) {
6433 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6434 		pla.pa_head = V_inet_local_pfil_head;
6435 		pla.pa_hook = V_pf_ip4_out_hook;
6436 		ret = pfil_link(&pla);
6437 		MPASS(ret == 0);
6438 	}
6439 #endif
6440 #ifdef INET6
6441 	pha.pa_type = PFIL_TYPE_IP6;
6442 	pha.pa_mbuf_chk = pf_check6_in;
6443 	pha.pa_flags = PFIL_IN;
6444 	pha.pa_rulname = "default-in6";
6445 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6446 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6447 	pla.pa_head = V_inet6_pfil_head;
6448 	pla.pa_hook = V_pf_ip6_in_hook;
6449 	ret = pfil_link(&pla);
6450 	MPASS(ret == 0);
6451 	pha.pa_mbuf_chk = pf_check6_out;
6452 	pha.pa_rulname = "default-out6";
6453 	pha.pa_flags = PFIL_OUT;
6454 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6455 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6456 	pla.pa_head = V_inet6_pfil_head;
6457 	pla.pa_hook = V_pf_ip6_out_hook;
6458 	ret = pfil_link(&pla);
6459 	MPASS(ret == 0);
6460 	if (V_pf_filter_local) {
6461 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6462 		pla.pa_head = V_inet6_local_pfil_head;
6463 		pla.pa_hook = V_pf_ip6_out_hook;
6464 		ret = pfil_link(&pla);
6465 		MPASS(ret == 0);
6466 	}
6467 #endif
6468 
6469 	atomic_store_bool(&V_pf_pfil_hooked, true);
6470 }
6471 
6472 static void
6473 dehook_pf_eth(void)
6474 {
6475 
6476 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6477 		return;
6478 
6479 	pfil_remove_hook(V_pf_eth_in_hook);
6480 	pfil_remove_hook(V_pf_eth_out_hook);
6481 
6482 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6483 }
6484 
6485 static void
6486 dehook_pf(void)
6487 {
6488 
6489 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6490 		return;
6491 
6492 #ifdef INET
6493 	pfil_remove_hook(V_pf_ip4_in_hook);
6494 	pfil_remove_hook(V_pf_ip4_out_hook);
6495 #endif
6496 #ifdef INET6
6497 	pfil_remove_hook(V_pf_ip6_in_hook);
6498 	pfil_remove_hook(V_pf_ip6_out_hook);
6499 #endif
6500 
6501 	atomic_store_bool(&V_pf_pfil_hooked, false);
6502 }
6503 
6504 static void
6505 pf_load_vnet(void)
6506 {
6507 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6508 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6509 
6510 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6511 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6512 
6513 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6514 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6515 #ifdef ALTQ
6516 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6517 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6518 #endif
6519 
6520 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6521 
6522 	pfattach_vnet();
6523 	V_pf_vnet_active = 1;
6524 }
6525 
6526 static int
6527 pf_load(void)
6528 {
6529 	int error;
6530 
6531 	sx_init(&pf_end_lock, "pf end thread");
6532 
6533 	pf_mtag_initialize();
6534 
6535 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6536 	if (pf_dev == NULL)
6537 		return (ENOMEM);
6538 
6539 	pf_end_threads = 0;
6540 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6541 	if (error != 0)
6542 		return (error);
6543 
6544 	pfi_initialize();
6545 
6546 	return (0);
6547 }
6548 
6549 static void
6550 pf_unload_vnet(void)
6551 {
6552 	int ret __diagused;
6553 
6554 	V_pf_vnet_active = 0;
6555 	V_pf_status.running = 0;
6556 	dehook_pf();
6557 	dehook_pf_eth();
6558 
6559 	PF_RULES_WLOCK();
6560 	pf_syncookies_cleanup();
6561 	shutdown_pf();
6562 	PF_RULES_WUNLOCK();
6563 
6564 	/* Make sure we've cleaned up ethernet rules before we continue. */
6565 	NET_EPOCH_DRAIN_CALLBACKS();
6566 
6567 	ret = swi_remove(V_pf_swi_cookie);
6568 	MPASS(ret == 0);
6569 	ret = intr_event_destroy(V_pf_swi_ie);
6570 	MPASS(ret == 0);
6571 
6572 	pf_unload_vnet_purge();
6573 
6574 	pf_normalize_cleanup();
6575 	PF_RULES_WLOCK();
6576 	pfi_cleanup_vnet();
6577 	PF_RULES_WUNLOCK();
6578 	pfr_cleanup();
6579 	pf_osfp_flush();
6580 	pf_cleanup();
6581 	if (IS_DEFAULT_VNET(curvnet))
6582 		pf_mtag_cleanup();
6583 
6584 	pf_cleanup_tagset(&V_pf_tags);
6585 #ifdef ALTQ
6586 	pf_cleanup_tagset(&V_pf_qids);
6587 #endif
6588 	uma_zdestroy(V_pf_tag_z);
6589 
6590 #ifdef PF_WANT_32_TO_64_COUNTER
6591 	PF_RULES_WLOCK();
6592 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6593 
6594 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6595 	MPASS(V_pf_allkifcount == 0);
6596 
6597 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6598 	V_pf_allrulecount--;
6599 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6600 
6601 	/*
6602 	 * There are known pf rule leaks when running the test suite.
6603 	 */
6604 #ifdef notyet
6605 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6606 	MPASS(V_pf_allrulecount == 0);
6607 #endif
6608 
6609 	PF_RULES_WUNLOCK();
6610 
6611 	free(V_pf_kifmarker, PFI_MTYPE);
6612 	free(V_pf_rulemarker, M_PFRULE);
6613 #endif
6614 
6615 	/* Free counters last as we updated them during shutdown. */
6616 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6617 	for (int i = 0; i < 2; i++) {
6618 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6619 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6620 	}
6621 	counter_u64_free(V_pf_default_rule.states_cur);
6622 	counter_u64_free(V_pf_default_rule.states_tot);
6623 	counter_u64_free(V_pf_default_rule.src_nodes);
6624 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6625 
6626 	for (int i = 0; i < PFRES_MAX; i++)
6627 		counter_u64_free(V_pf_status.counters[i]);
6628 	for (int i = 0; i < KLCNT_MAX; i++)
6629 		counter_u64_free(V_pf_status.lcounters[i]);
6630 	for (int i = 0; i < FCNT_MAX; i++)
6631 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6632 	for (int i = 0; i < SCNT_MAX; i++)
6633 		counter_u64_free(V_pf_status.scounters[i]);
6634 
6635 	rm_destroy(&V_pf_rules_lock);
6636 	sx_destroy(&V_pf_ioctl_lock);
6637 }
6638 
6639 static void
6640 pf_unload(void)
6641 {
6642 
6643 	sx_xlock(&pf_end_lock);
6644 	pf_end_threads = 1;
6645 	while (pf_end_threads < 2) {
6646 		wakeup_one(pf_purge_thread);
6647 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6648 	}
6649 	sx_xunlock(&pf_end_lock);
6650 
6651 	if (pf_dev != NULL)
6652 		destroy_dev(pf_dev);
6653 
6654 	pfi_cleanup();
6655 
6656 	sx_destroy(&pf_end_lock);
6657 }
6658 
6659 static void
6660 vnet_pf_init(void *unused __unused)
6661 {
6662 
6663 	pf_load_vnet();
6664 }
6665 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6666     vnet_pf_init, NULL);
6667 
6668 static void
6669 vnet_pf_uninit(const void *unused __unused)
6670 {
6671 
6672 	pf_unload_vnet();
6673 }
6674 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6675 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6676     vnet_pf_uninit, NULL);
6677 
6678 static int
6679 pf_modevent(module_t mod, int type, void *data)
6680 {
6681 	int error = 0;
6682 
6683 	switch(type) {
6684 	case MOD_LOAD:
6685 		error = pf_load();
6686 		break;
6687 	case MOD_UNLOAD:
6688 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6689 		 * the vnet_pf_uninit()s */
6690 		break;
6691 	default:
6692 		error = EINVAL;
6693 		break;
6694 	}
6695 
6696 	return (error);
6697 }
6698 
6699 static moduledata_t pf_mod = {
6700 	"pf",
6701 	pf_modevent,
6702 	0
6703 };
6704 
6705 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6706 MODULE_VERSION(pf, PF_MODVER);
6707