xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision afd74c400075d94e01dd3430844bb290834660ef)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_addr_copyout(struct pf_addr_wrap *);
133 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
134 			    struct pf_src_node *);
135 #ifdef ALTQ
136 static int		 pf_export_kaltq(struct pf_altq *,
137 			    struct pfioc_altq_v1 *, size_t);
138 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
139 			    struct pf_altq *, size_t);
140 #endif /* ALTQ */
141 
142 VNET_DEFINE(struct pf_krule,	pf_default_rule);
143 
144 static __inline int             pf_krule_compare(struct pf_krule *,
145 				    struct pf_krule *);
146 
147 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
148 
149 #ifdef ALTQ
150 VNET_DEFINE_STATIC(int,		pf_altq_running);
151 #define	V_pf_altq_running	VNET(pf_altq_running)
152 #endif
153 
154 #define	TAGID_MAX	 50000
155 struct pf_tagname {
156 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
157 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
158 	char			name[PF_TAG_NAME_SIZE];
159 	uint16_t		tag;
160 	int			ref;
161 };
162 
163 struct pf_tagset {
164 	TAILQ_HEAD(, pf_tagname)	*namehash;
165 	TAILQ_HEAD(, pf_tagname)	*taghash;
166 	unsigned int			 mask;
167 	uint32_t			 seed;
168 	BITSET_DEFINE(, TAGID_MAX)	 avail;
169 };
170 
171 VNET_DEFINE(struct pf_tagset, pf_tags);
172 #define	V_pf_tags	VNET(pf_tags)
173 static unsigned int	pf_rule_tag_hashsize;
174 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
175 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
176     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
177     "Size of pf(4) rule tag hashtable");
178 
179 #ifdef ALTQ
180 VNET_DEFINE(struct pf_tagset, pf_qids);
181 #define	V_pf_qids	VNET(pf_qids)
182 static unsigned int	pf_queue_tag_hashsize;
183 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
184 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
185     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
186     "Size of pf(4) queue tag hashtable");
187 #endif
188 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
189 #define	V_pf_tag_z		 VNET(pf_tag_z)
190 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
191 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
192 
193 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
194 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
195 #endif
196 
197 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
198 #define V_pf_filter_local	VNET(pf_filter_local)
199 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
200     &VNET_NAME(pf_filter_local), false,
201     "Enable filtering for packets delivered to local network stack");
202 
203 #ifdef PF_DEFAULT_TO_DROP
204 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
205 #else
206 VNET_DEFINE_STATIC(bool, default_to_drop);
207 #endif
208 #define	V_default_to_drop VNET(default_to_drop)
209 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
210     &VNET_NAME(default_to_drop), false,
211     "Make the default rule drop all packets.");
212 
213 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
214 			    unsigned int);
215 static void		 pf_cleanup_tagset(struct pf_tagset *);
216 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
217 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
218 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
219 static u_int16_t	 pf_tagname2tag(const char *);
220 static void		 tag_unref(struct pf_tagset *, u_int16_t);
221 
222 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
223 
224 struct cdev *pf_dev;
225 
226 /*
227  * XXX - These are new and need to be checked when moveing to a new version
228  */
229 static void		 pf_clear_all_states(void);
230 static unsigned int	 pf_clear_states(const struct pf_kstate_kill *);
231 static void		 pf_killstates(struct pf_kstate_kill *,
232 			    unsigned int *);
233 static int		 pf_killstates_row(struct pf_kstate_kill *,
234 			    struct pf_idhash *);
235 static int		 pf_killstates_nv(struct pfioc_nv *);
236 static int		 pf_clearstates_nv(struct pfioc_nv *);
237 static int		 pf_getstate(struct pfioc_nv *);
238 static int		 pf_getstatus(struct pfioc_nv *);
239 static int		 pf_clear_tables(void);
240 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
241 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
242 static int		 pf_keepcounters(struct pfioc_nv *);
243 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
244 
245 /*
246  * Wrapper functions for pfil(9) hooks
247  */
248 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 #ifdef INET
253 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
256     int flags, void *ruleset __unused, struct inpcb *inp);
257 #endif
258 #ifdef INET6
259 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
260     int flags, void *ruleset __unused, struct inpcb *inp);
261 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
262     int flags, void *ruleset __unused, struct inpcb *inp);
263 #endif
264 
265 static void		hook_pf_eth(void);
266 static void		hook_pf(void);
267 static void		dehook_pf_eth(void);
268 static void		dehook_pf(void);
269 static int		shutdown_pf(void);
270 static int		pf_load(void);
271 static void		pf_unload(void);
272 
273 static struct cdevsw pf_cdevsw = {
274 	.d_ioctl =	pfioctl,
275 	.d_name =	PF_NAME,
276 	.d_version =	D_VERSION,
277 };
278 
279 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
280 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
281 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
282 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
283 
284 /*
285  * We need a flag that is neither hooked nor running to know when
286  * the VNET is "valid".  We primarily need this to control (global)
287  * external event, e.g., eventhandlers.
288  */
289 VNET_DEFINE(int, pf_vnet_active);
290 #define V_pf_vnet_active	VNET(pf_vnet_active)
291 
292 int pf_end_threads;
293 struct proc *pf_purge_proc;
294 
295 VNET_DEFINE(struct rmlock, pf_rules_lock);
296 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
297 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
298 struct sx			pf_end_lock;
299 
300 /* pfsync */
301 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
302 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
303 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
304 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
305 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
306 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
307 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
308 
309 /* pflog */
310 pflog_packet_t			*pflog_packet_ptr = NULL;
311 
312 /*
313  * Copy a user-provided string, returning an error if truncation would occur.
314  * Avoid scanning past "sz" bytes in the source string since there's no
315  * guarantee that it's nul-terminated.
316  */
317 static int
318 pf_user_strcpy(char *dst, const char *src, size_t sz)
319 {
320 	if (strnlen(src, sz) == sz)
321 		return (EINVAL);
322 	(void)strlcpy(dst, src, sz);
323 	return (0);
324 }
325 
326 static void
327 pfattach_vnet(void)
328 {
329 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
330 
331 	bzero(&V_pf_status, sizeof(V_pf_status));
332 
333 	pf_initialize();
334 	pfr_initialize();
335 	pfi_initialize_vnet();
336 	pf_normalize_init();
337 	pf_syncookies_init();
338 
339 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
340 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
341 
342 	RB_INIT(&V_pf_anchors);
343 	pf_init_kruleset(&pf_main_ruleset);
344 
345 	pf_init_keth(V_pf_keth);
346 
347 	/* default rule should never be garbage collected */
348 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
349 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
350 	V_pf_default_rule.nr = -1;
351 	V_pf_default_rule.rtableid = -1;
352 
353 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
354 	for (int i = 0; i < 2; i++) {
355 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
356 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
357 	}
358 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
359 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
360 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
361 
362 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
363 	    M_WAITOK | M_ZERO);
364 
365 #ifdef PF_WANT_32_TO_64_COUNTER
366 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
367 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
368 	PF_RULES_WLOCK();
369 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
370 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
371 	V_pf_allrulecount++;
372 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
373 	PF_RULES_WUNLOCK();
374 #endif
375 
376 	/* initialize default timeouts */
377 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
378 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
379 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
380 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
381 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
382 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
383 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
384 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
385 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
386 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
387 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
388 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
390 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
391 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
392 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
393 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
394 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
395 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
396 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
397 
398 	V_pf_status.debug = PF_DEBUG_URGENT;
399 	/*
400 	 * XXX This is different than in OpenBSD where reassembly is enabled by
401 	 * defult. In FreeBSD we expect people to still use scrub rules and
402 	 * switch to the new syntax later. Only when they switch they must
403 	 * explicitly enable reassemle. We could change the default once the
404 	 * scrub rule functionality is hopefully removed some day in future.
405 	 */
406 	V_pf_status.reass = 0;
407 
408 	V_pf_pfil_hooked = false;
409 	V_pf_pfil_eth_hooked = false;
410 
411 	/* XXX do our best to avoid a conflict */
412 	V_pf_status.hostid = arc4random();
413 
414 	for (int i = 0; i < PFRES_MAX; i++)
415 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
416 	for (int i = 0; i < KLCNT_MAX; i++)
417 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
418 	for (int i = 0; i < FCNT_MAX; i++)
419 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
420 	for (int i = 0; i < SCNT_MAX; i++)
421 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
422 
423 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
424 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
425 		/* XXXGL: leaked all above. */
426 		return;
427 }
428 
429 static struct pf_kpool *
430 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
431     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
432     u_int8_t check_ticket)
433 {
434 	struct pf_kruleset	*ruleset;
435 	struct pf_krule		*rule;
436 	int			 rs_num;
437 
438 	ruleset = pf_find_kruleset(anchor);
439 	if (ruleset == NULL)
440 		return (NULL);
441 	rs_num = pf_get_ruleset_number(rule_action);
442 	if (rs_num >= PF_RULESET_MAX)
443 		return (NULL);
444 	if (active) {
445 		if (check_ticket && ticket !=
446 		    ruleset->rules[rs_num].active.ticket)
447 			return (NULL);
448 		if (r_last)
449 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
450 			    pf_krulequeue);
451 		else
452 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
453 	} else {
454 		if (check_ticket && ticket !=
455 		    ruleset->rules[rs_num].inactive.ticket)
456 			return (NULL);
457 		if (r_last)
458 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
459 			    pf_krulequeue);
460 		else
461 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
462 	}
463 	if (!r_last) {
464 		while ((rule != NULL) && (rule->nr != rule_number))
465 			rule = TAILQ_NEXT(rule, entries);
466 	}
467 	if (rule == NULL)
468 		return (NULL);
469 
470 	return (&rule->rpool);
471 }
472 
473 static void
474 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
475 {
476 	struct pf_kpooladdr	*mv_pool_pa;
477 
478 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
479 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
480 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
481 	}
482 }
483 
484 static void
485 pf_empty_kpool(struct pf_kpalist *poola)
486 {
487 	struct pf_kpooladdr *pa;
488 
489 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
490 		switch (pa->addr.type) {
491 		case PF_ADDR_DYNIFTL:
492 			pfi_dynaddr_remove(pa->addr.p.dyn);
493 			break;
494 		case PF_ADDR_TABLE:
495 			/* XXX: this could be unfinished pooladdr on pabuf */
496 			if (pa->addr.p.tbl != NULL)
497 				pfr_detach_table(pa->addr.p.tbl);
498 			break;
499 		}
500 		if (pa->kif)
501 			pfi_kkif_unref(pa->kif);
502 		TAILQ_REMOVE(poola, pa, entries);
503 		free(pa, M_PFRULE);
504 	}
505 }
506 
507 static void
508 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
509 {
510 
511 	PF_RULES_WASSERT();
512 	PF_UNLNKDRULES_ASSERT();
513 
514 	TAILQ_REMOVE(rulequeue, rule, entries);
515 
516 	rule->rule_ref |= PFRULE_REFS;
517 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
518 }
519 
520 static void
521 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
522 {
523 
524 	PF_RULES_WASSERT();
525 
526 	PF_UNLNKDRULES_LOCK();
527 	pf_unlink_rule_locked(rulequeue, rule);
528 	PF_UNLNKDRULES_UNLOCK();
529 }
530 
531 static void
532 pf_free_eth_rule(struct pf_keth_rule *rule)
533 {
534 	PF_RULES_WASSERT();
535 
536 	if (rule == NULL)
537 		return;
538 
539 	if (rule->tag)
540 		tag_unref(&V_pf_tags, rule->tag);
541 	if (rule->match_tag)
542 		tag_unref(&V_pf_tags, rule->match_tag);
543 #ifdef ALTQ
544 	pf_qid_unref(rule->qid);
545 #endif
546 
547 	if (rule->bridge_to)
548 		pfi_kkif_unref(rule->bridge_to);
549 	if (rule->kif)
550 		pfi_kkif_unref(rule->kif);
551 
552 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
553 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
554 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
555 		pfr_detach_table(rule->ipdst.addr.p.tbl);
556 
557 	counter_u64_free(rule->evaluations);
558 	for (int i = 0; i < 2; i++) {
559 		counter_u64_free(rule->packets[i]);
560 		counter_u64_free(rule->bytes[i]);
561 	}
562 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
563 	pf_keth_anchor_remove(rule);
564 
565 	free(rule, M_PFRULE);
566 }
567 
568 void
569 pf_free_rule(struct pf_krule *rule)
570 {
571 
572 	PF_RULES_WASSERT();
573 	PF_CONFIG_ASSERT();
574 
575 	if (rule->tag)
576 		tag_unref(&V_pf_tags, rule->tag);
577 	if (rule->match_tag)
578 		tag_unref(&V_pf_tags, rule->match_tag);
579 #ifdef ALTQ
580 	if (rule->pqid != rule->qid)
581 		pf_qid_unref(rule->pqid);
582 	pf_qid_unref(rule->qid);
583 #endif
584 	switch (rule->src.addr.type) {
585 	case PF_ADDR_DYNIFTL:
586 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
587 		break;
588 	case PF_ADDR_TABLE:
589 		pfr_detach_table(rule->src.addr.p.tbl);
590 		break;
591 	}
592 	switch (rule->dst.addr.type) {
593 	case PF_ADDR_DYNIFTL:
594 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
595 		break;
596 	case PF_ADDR_TABLE:
597 		pfr_detach_table(rule->dst.addr.p.tbl);
598 		break;
599 	}
600 	if (rule->overload_tbl)
601 		pfr_detach_table(rule->overload_tbl);
602 	if (rule->kif)
603 		pfi_kkif_unref(rule->kif);
604 	pf_kanchor_remove(rule);
605 	pf_empty_kpool(&rule->rpool.list);
606 
607 	pf_krule_free(rule);
608 }
609 
610 static void
611 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
612     unsigned int default_size)
613 {
614 	unsigned int i;
615 	unsigned int hashsize;
616 
617 	if (*tunable_size == 0 || !powerof2(*tunable_size))
618 		*tunable_size = default_size;
619 
620 	hashsize = *tunable_size;
621 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
622 	    M_WAITOK);
623 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
624 	    M_WAITOK);
625 	ts->mask = hashsize - 1;
626 	ts->seed = arc4random();
627 	for (i = 0; i < hashsize; i++) {
628 		TAILQ_INIT(&ts->namehash[i]);
629 		TAILQ_INIT(&ts->taghash[i]);
630 	}
631 	BIT_FILL(TAGID_MAX, &ts->avail);
632 }
633 
634 static void
635 pf_cleanup_tagset(struct pf_tagset *ts)
636 {
637 	unsigned int i;
638 	unsigned int hashsize;
639 	struct pf_tagname *t, *tmp;
640 
641 	/*
642 	 * Only need to clean up one of the hashes as each tag is hashed
643 	 * into each table.
644 	 */
645 	hashsize = ts->mask + 1;
646 	for (i = 0; i < hashsize; i++)
647 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
648 			uma_zfree(V_pf_tag_z, t);
649 
650 	free(ts->namehash, M_PFHASH);
651 	free(ts->taghash, M_PFHASH);
652 }
653 
654 static uint16_t
655 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
656 {
657 	size_t len;
658 
659 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
660 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
661 }
662 
663 static uint16_t
664 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
665 {
666 
667 	return (tag & ts->mask);
668 }
669 
670 static u_int16_t
671 tagname2tag(struct pf_tagset *ts, const char *tagname)
672 {
673 	struct pf_tagname	*tag;
674 	u_int32_t		 index;
675 	u_int16_t		 new_tagid;
676 
677 	PF_RULES_WASSERT();
678 
679 	index = tagname2hashindex(ts, tagname);
680 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
681 		if (strcmp(tagname, tag->name) == 0) {
682 			tag->ref++;
683 			return (tag->tag);
684 		}
685 
686 	/*
687 	 * new entry
688 	 *
689 	 * to avoid fragmentation, we do a linear search from the beginning
690 	 * and take the first free slot we find.
691 	 */
692 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
693 	/*
694 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
695 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
696 	 * set.  It may also return a bit number greater than TAGID_MAX due
697 	 * to rounding of the number of bits in the vector up to a multiple
698 	 * of the vector word size at declaration/allocation time.
699 	 */
700 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
701 		return (0);
702 
703 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
704 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
705 
706 	/* allocate and fill new struct pf_tagname */
707 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
708 	if (tag == NULL)
709 		return (0);
710 	strlcpy(tag->name, tagname, sizeof(tag->name));
711 	tag->tag = new_tagid;
712 	tag->ref = 1;
713 
714 	/* Insert into namehash */
715 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
716 
717 	/* Insert into taghash */
718 	index = tag2hashindex(ts, new_tagid);
719 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
720 
721 	return (tag->tag);
722 }
723 
724 static void
725 tag_unref(struct pf_tagset *ts, u_int16_t tag)
726 {
727 	struct pf_tagname	*t;
728 	uint16_t		 index;
729 
730 	PF_RULES_WASSERT();
731 
732 	index = tag2hashindex(ts, tag);
733 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
734 		if (tag == t->tag) {
735 			if (--t->ref == 0) {
736 				TAILQ_REMOVE(&ts->taghash[index], t,
737 				    taghash_entries);
738 				index = tagname2hashindex(ts, t->name);
739 				TAILQ_REMOVE(&ts->namehash[index], t,
740 				    namehash_entries);
741 				/* Bits are 0-based for BIT_SET() */
742 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
743 				uma_zfree(V_pf_tag_z, t);
744 			}
745 			break;
746 		}
747 }
748 
749 static uint16_t
750 pf_tagname2tag(const char *tagname)
751 {
752 	return (tagname2tag(&V_pf_tags, tagname));
753 }
754 
755 static int
756 pf_begin_eth(uint32_t *ticket, const char *anchor)
757 {
758 	struct pf_keth_rule *rule, *tmp;
759 	struct pf_keth_ruleset *rs;
760 
761 	PF_RULES_WASSERT();
762 
763 	rs = pf_find_or_create_keth_ruleset(anchor);
764 	if (rs == NULL)
765 		return (EINVAL);
766 
767 	/* Purge old inactive rules. */
768 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
769 	    tmp) {
770 		TAILQ_REMOVE(rs->inactive.rules, rule,
771 		    entries);
772 		pf_free_eth_rule(rule);
773 	}
774 
775 	*ticket = ++rs->inactive.ticket;
776 	rs->inactive.open = 1;
777 
778 	return (0);
779 }
780 
781 static void
782 pf_rollback_eth_cb(struct epoch_context *ctx)
783 {
784 	struct pf_keth_ruleset *rs;
785 
786 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
787 
788 	CURVNET_SET(rs->vnet);
789 
790 	PF_RULES_WLOCK();
791 	pf_rollback_eth(rs->inactive.ticket,
792 	    rs->anchor ? rs->anchor->path : "");
793 	PF_RULES_WUNLOCK();
794 
795 	CURVNET_RESTORE();
796 }
797 
798 static int
799 pf_rollback_eth(uint32_t ticket, const char *anchor)
800 {
801 	struct pf_keth_rule *rule, *tmp;
802 	struct pf_keth_ruleset *rs;
803 
804 	PF_RULES_WASSERT();
805 
806 	rs = pf_find_keth_ruleset(anchor);
807 	if (rs == NULL)
808 		return (EINVAL);
809 
810 	if (!rs->inactive.open ||
811 	    ticket != rs->inactive.ticket)
812 		return (0);
813 
814 	/* Purge old inactive rules. */
815 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
816 	    tmp) {
817 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
818 		pf_free_eth_rule(rule);
819 	}
820 
821 	rs->inactive.open = 0;
822 
823 	pf_remove_if_empty_keth_ruleset(rs);
824 
825 	return (0);
826 }
827 
828 #define	PF_SET_SKIP_STEPS(i)					\
829 	do {							\
830 		while (head[i] != cur) {			\
831 			head[i]->skip[i].ptr = cur;		\
832 			head[i] = TAILQ_NEXT(head[i], entries);	\
833 		}						\
834 	} while (0)
835 
836 static void
837 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
838 {
839 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
840 	int i;
841 
842 	cur = TAILQ_FIRST(rules);
843 	prev = cur;
844 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
845 		head[i] = cur;
846 	while (cur != NULL) {
847 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
848 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
849 		if (cur->direction != prev->direction)
850 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
851 		if (cur->proto != prev->proto)
852 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
853 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
854 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
855 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
856 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
857 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
858 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
859 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
860 		if (cur->ipdst.neg != prev->ipdst.neg ||
861 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
862 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
863 
864 		prev = cur;
865 		cur = TAILQ_NEXT(cur, entries);
866 	}
867 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
868 		PF_SET_SKIP_STEPS(i);
869 }
870 
871 static int
872 pf_commit_eth(uint32_t ticket, const char *anchor)
873 {
874 	struct pf_keth_ruleq *rules;
875 	struct pf_keth_ruleset *rs;
876 
877 	rs = pf_find_keth_ruleset(anchor);
878 	if (rs == NULL) {
879 		return (EINVAL);
880 	}
881 
882 	if (!rs->inactive.open ||
883 	    ticket != rs->inactive.ticket)
884 		return (EBUSY);
885 
886 	PF_RULES_WASSERT();
887 
888 	pf_eth_calc_skip_steps(rs->inactive.rules);
889 
890 	rules = rs->active.rules;
891 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
892 	rs->inactive.rules = rules;
893 	rs->inactive.ticket = rs->active.ticket;
894 
895 	/* Clean up inactive rules (i.e. previously active rules), only when
896 	 * we're sure they're no longer used. */
897 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
898 
899 	return (0);
900 }
901 
902 #ifdef ALTQ
903 static uint16_t
904 pf_qname2qid(const char *qname)
905 {
906 	return (tagname2tag(&V_pf_qids, qname));
907 }
908 
909 static void
910 pf_qid_unref(uint16_t qid)
911 {
912 	tag_unref(&V_pf_qids, qid);
913 }
914 
915 static int
916 pf_begin_altq(u_int32_t *ticket)
917 {
918 	struct pf_altq	*altq, *tmp;
919 	int		 error = 0;
920 
921 	PF_RULES_WASSERT();
922 
923 	/* Purge the old altq lists */
924 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
925 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
926 			/* detach and destroy the discipline */
927 			error = altq_remove(altq);
928 		}
929 		free(altq, M_PFALTQ);
930 	}
931 	TAILQ_INIT(V_pf_altq_ifs_inactive);
932 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
933 		pf_qid_unref(altq->qid);
934 		free(altq, M_PFALTQ);
935 	}
936 	TAILQ_INIT(V_pf_altqs_inactive);
937 	if (error)
938 		return (error);
939 	*ticket = ++V_ticket_altqs_inactive;
940 	V_altqs_inactive_open = 1;
941 	return (0);
942 }
943 
944 static int
945 pf_rollback_altq(u_int32_t ticket)
946 {
947 	struct pf_altq	*altq, *tmp;
948 	int		 error = 0;
949 
950 	PF_RULES_WASSERT();
951 
952 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
953 		return (0);
954 	/* Purge the old altq lists */
955 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
956 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
957 			/* detach and destroy the discipline */
958 			error = altq_remove(altq);
959 		}
960 		free(altq, M_PFALTQ);
961 	}
962 	TAILQ_INIT(V_pf_altq_ifs_inactive);
963 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
964 		pf_qid_unref(altq->qid);
965 		free(altq, M_PFALTQ);
966 	}
967 	TAILQ_INIT(V_pf_altqs_inactive);
968 	V_altqs_inactive_open = 0;
969 	return (error);
970 }
971 
972 static int
973 pf_commit_altq(u_int32_t ticket)
974 {
975 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
976 	struct pf_altq		*altq, *tmp;
977 	int			 err, error = 0;
978 
979 	PF_RULES_WASSERT();
980 
981 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
982 		return (EBUSY);
983 
984 	/* swap altqs, keep the old. */
985 	old_altqs = V_pf_altqs_active;
986 	old_altq_ifs = V_pf_altq_ifs_active;
987 	V_pf_altqs_active = V_pf_altqs_inactive;
988 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
989 	V_pf_altqs_inactive = old_altqs;
990 	V_pf_altq_ifs_inactive = old_altq_ifs;
991 	V_ticket_altqs_active = V_ticket_altqs_inactive;
992 
993 	/* Attach new disciplines */
994 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
995 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
996 			/* attach the discipline */
997 			error = altq_pfattach(altq);
998 			if (error == 0 && V_pf_altq_running)
999 				error = pf_enable_altq(altq);
1000 			if (error != 0)
1001 				return (error);
1002 		}
1003 	}
1004 
1005 	/* Purge the old altq lists */
1006 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1007 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1008 			/* detach and destroy the discipline */
1009 			if (V_pf_altq_running)
1010 				error = pf_disable_altq(altq);
1011 			err = altq_pfdetach(altq);
1012 			if (err != 0 && error == 0)
1013 				error = err;
1014 			err = altq_remove(altq);
1015 			if (err != 0 && error == 0)
1016 				error = err;
1017 		}
1018 		free(altq, M_PFALTQ);
1019 	}
1020 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1021 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1022 		pf_qid_unref(altq->qid);
1023 		free(altq, M_PFALTQ);
1024 	}
1025 	TAILQ_INIT(V_pf_altqs_inactive);
1026 
1027 	V_altqs_inactive_open = 0;
1028 	return (error);
1029 }
1030 
1031 static int
1032 pf_enable_altq(struct pf_altq *altq)
1033 {
1034 	struct ifnet		*ifp;
1035 	struct tb_profile	 tb;
1036 	int			 error = 0;
1037 
1038 	if ((ifp = ifunit(altq->ifname)) == NULL)
1039 		return (EINVAL);
1040 
1041 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1042 		error = altq_enable(&ifp->if_snd);
1043 
1044 	/* set tokenbucket regulator */
1045 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1046 		tb.rate = altq->ifbandwidth;
1047 		tb.depth = altq->tbrsize;
1048 		error = tbr_set(&ifp->if_snd, &tb);
1049 	}
1050 
1051 	return (error);
1052 }
1053 
1054 static int
1055 pf_disable_altq(struct pf_altq *altq)
1056 {
1057 	struct ifnet		*ifp;
1058 	struct tb_profile	 tb;
1059 	int			 error;
1060 
1061 	if ((ifp = ifunit(altq->ifname)) == NULL)
1062 		return (EINVAL);
1063 
1064 	/*
1065 	 * when the discipline is no longer referenced, it was overridden
1066 	 * by a new one.  if so, just return.
1067 	 */
1068 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1069 		return (0);
1070 
1071 	error = altq_disable(&ifp->if_snd);
1072 
1073 	if (error == 0) {
1074 		/* clear tokenbucket regulator */
1075 		tb.rate = 0;
1076 		error = tbr_set(&ifp->if_snd, &tb);
1077 	}
1078 
1079 	return (error);
1080 }
1081 
1082 static int
1083 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1084     struct pf_altq *altq)
1085 {
1086 	struct ifnet	*ifp1;
1087 	int		 error = 0;
1088 
1089 	/* Deactivate the interface in question */
1090 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1091 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1092 	    (remove && ifp1 == ifp)) {
1093 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1094 	} else {
1095 		error = altq_add(ifp1, altq);
1096 
1097 		if (ticket != V_ticket_altqs_inactive)
1098 			error = EBUSY;
1099 
1100 		if (error)
1101 			free(altq, M_PFALTQ);
1102 	}
1103 
1104 	return (error);
1105 }
1106 
1107 void
1108 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1109 {
1110 	struct pf_altq	*a1, *a2, *a3;
1111 	u_int32_t	 ticket;
1112 	int		 error = 0;
1113 
1114 	/*
1115 	 * No need to re-evaluate the configuration for events on interfaces
1116 	 * that do not support ALTQ, as it's not possible for such
1117 	 * interfaces to be part of the configuration.
1118 	 */
1119 	if (!ALTQ_IS_READY(&ifp->if_snd))
1120 		return;
1121 
1122 	/* Interrupt userland queue modifications */
1123 	if (V_altqs_inactive_open)
1124 		pf_rollback_altq(V_ticket_altqs_inactive);
1125 
1126 	/* Start new altq ruleset */
1127 	if (pf_begin_altq(&ticket))
1128 		return;
1129 
1130 	/* Copy the current active set */
1131 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1132 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1133 		if (a2 == NULL) {
1134 			error = ENOMEM;
1135 			break;
1136 		}
1137 		bcopy(a1, a2, sizeof(struct pf_altq));
1138 
1139 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1140 		if (error)
1141 			break;
1142 
1143 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1144 	}
1145 	if (error)
1146 		goto out;
1147 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1148 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1149 		if (a2 == NULL) {
1150 			error = ENOMEM;
1151 			break;
1152 		}
1153 		bcopy(a1, a2, sizeof(struct pf_altq));
1154 
1155 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1156 			error = EBUSY;
1157 			free(a2, M_PFALTQ);
1158 			break;
1159 		}
1160 		a2->altq_disc = NULL;
1161 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1162 			if (strncmp(a3->ifname, a2->ifname,
1163 				IFNAMSIZ) == 0) {
1164 				a2->altq_disc = a3->altq_disc;
1165 				break;
1166 			}
1167 		}
1168 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1169 		if (error)
1170 			break;
1171 
1172 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1173 	}
1174 
1175 out:
1176 	if (error != 0)
1177 		pf_rollback_altq(ticket);
1178 	else
1179 		pf_commit_altq(ticket);
1180 }
1181 #endif /* ALTQ */
1182 
1183 static struct pf_krule_global *
1184 pf_rule_tree_alloc(int flags)
1185 {
1186 	struct pf_krule_global *tree;
1187 
1188 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1189 	if (tree == NULL)
1190 		return (NULL);
1191 	RB_INIT(tree);
1192 	return (tree);
1193 }
1194 
1195 static void
1196 pf_rule_tree_free(struct pf_krule_global *tree)
1197 {
1198 
1199 	free(tree, M_TEMP);
1200 }
1201 
1202 static int
1203 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1204 {
1205 	struct pf_krule_global *tree;
1206 	struct pf_kruleset	*rs;
1207 	struct pf_krule		*rule;
1208 
1209 	PF_RULES_WASSERT();
1210 
1211 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1212 		return (EINVAL);
1213 	tree = pf_rule_tree_alloc(M_NOWAIT);
1214 	if (tree == NULL)
1215 		return (ENOMEM);
1216 	rs = pf_find_or_create_kruleset(anchor);
1217 	if (rs == NULL) {
1218 		free(tree, M_TEMP);
1219 		return (EINVAL);
1220 	}
1221 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1222 	rs->rules[rs_num].inactive.tree = tree;
1223 
1224 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1225 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1226 		rs->rules[rs_num].inactive.rcount--;
1227 	}
1228 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1229 	rs->rules[rs_num].inactive.open = 1;
1230 	return (0);
1231 }
1232 
1233 static int
1234 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1235 {
1236 	struct pf_kruleset	*rs;
1237 	struct pf_krule		*rule;
1238 
1239 	PF_RULES_WASSERT();
1240 
1241 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1242 		return (EINVAL);
1243 	rs = pf_find_kruleset(anchor);
1244 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1245 	    rs->rules[rs_num].inactive.ticket != ticket)
1246 		return (0);
1247 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1248 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1249 		rs->rules[rs_num].inactive.rcount--;
1250 	}
1251 	rs->rules[rs_num].inactive.open = 0;
1252 	return (0);
1253 }
1254 
1255 #define PF_MD5_UPD(st, elm)						\
1256 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1257 
1258 #define PF_MD5_UPD_STR(st, elm)						\
1259 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1260 
1261 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1262 		(stor) = htonl((st)->elm);				\
1263 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1264 } while (0)
1265 
1266 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1267 		(stor) = htons((st)->elm);				\
1268 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1269 } while (0)
1270 
1271 static void
1272 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1273 {
1274 	PF_MD5_UPD(pfr, addr.type);
1275 	switch (pfr->addr.type) {
1276 		case PF_ADDR_DYNIFTL:
1277 			PF_MD5_UPD(pfr, addr.v.ifname);
1278 			PF_MD5_UPD(pfr, addr.iflags);
1279 			break;
1280 		case PF_ADDR_TABLE:
1281 			PF_MD5_UPD(pfr, addr.v.tblname);
1282 			break;
1283 		case PF_ADDR_ADDRMASK:
1284 			/* XXX ignore af? */
1285 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1286 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1287 			break;
1288 	}
1289 
1290 	PF_MD5_UPD(pfr, port[0]);
1291 	PF_MD5_UPD(pfr, port[1]);
1292 	PF_MD5_UPD(pfr, neg);
1293 	PF_MD5_UPD(pfr, port_op);
1294 }
1295 
1296 static void
1297 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1298 {
1299 	u_int16_t x;
1300 	u_int32_t y;
1301 
1302 	pf_hash_rule_addr(ctx, &rule->src);
1303 	pf_hash_rule_addr(ctx, &rule->dst);
1304 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1305 		PF_MD5_UPD_STR(rule, label[i]);
1306 	PF_MD5_UPD_STR(rule, ifname);
1307 	PF_MD5_UPD_STR(rule, match_tagname);
1308 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1309 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1310 	PF_MD5_UPD_HTONL(rule, prob, y);
1311 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1312 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1313 	PF_MD5_UPD(rule, uid.op);
1314 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1315 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1316 	PF_MD5_UPD(rule, gid.op);
1317 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1318 	PF_MD5_UPD(rule, action);
1319 	PF_MD5_UPD(rule, direction);
1320 	PF_MD5_UPD(rule, af);
1321 	PF_MD5_UPD(rule, quick);
1322 	PF_MD5_UPD(rule, ifnot);
1323 	PF_MD5_UPD(rule, match_tag_not);
1324 	PF_MD5_UPD(rule, natpass);
1325 	PF_MD5_UPD(rule, keep_state);
1326 	PF_MD5_UPD(rule, proto);
1327 	PF_MD5_UPD(rule, type);
1328 	PF_MD5_UPD(rule, code);
1329 	PF_MD5_UPD(rule, flags);
1330 	PF_MD5_UPD(rule, flagset);
1331 	PF_MD5_UPD(rule, allow_opts);
1332 	PF_MD5_UPD(rule, rt);
1333 	PF_MD5_UPD(rule, tos);
1334 	PF_MD5_UPD(rule, scrub_flags);
1335 	PF_MD5_UPD(rule, min_ttl);
1336 	PF_MD5_UPD(rule, set_tos);
1337 	if (rule->anchor != NULL)
1338 		PF_MD5_UPD_STR(rule, anchor->path);
1339 }
1340 
1341 static void
1342 pf_hash_rule(struct pf_krule *rule)
1343 {
1344 	MD5_CTX		ctx;
1345 
1346 	MD5Init(&ctx);
1347 	pf_hash_rule_rolling(&ctx, rule);
1348 	MD5Final(rule->md5sum, &ctx);
1349 }
1350 
1351 static int
1352 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1353 {
1354 
1355 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1356 }
1357 
1358 static int
1359 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1360 {
1361 	struct pf_kruleset	*rs;
1362 	struct pf_krule		*rule, **old_array, *old_rule;
1363 	struct pf_krulequeue	*old_rules;
1364 	struct pf_krule_global  *old_tree;
1365 	int			 error;
1366 	u_int32_t		 old_rcount;
1367 
1368 	PF_RULES_WASSERT();
1369 
1370 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1371 		return (EINVAL);
1372 	rs = pf_find_kruleset(anchor);
1373 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1374 	    ticket != rs->rules[rs_num].inactive.ticket)
1375 		return (EBUSY);
1376 
1377 	/* Calculate checksum for the main ruleset */
1378 	if (rs == &pf_main_ruleset) {
1379 		error = pf_setup_pfsync_matching(rs);
1380 		if (error != 0)
1381 			return (error);
1382 	}
1383 
1384 	/* Swap rules, keep the old. */
1385 	old_rules = rs->rules[rs_num].active.ptr;
1386 	old_rcount = rs->rules[rs_num].active.rcount;
1387 	old_array = rs->rules[rs_num].active.ptr_array;
1388 	old_tree = rs->rules[rs_num].active.tree;
1389 
1390 	rs->rules[rs_num].active.ptr =
1391 	    rs->rules[rs_num].inactive.ptr;
1392 	rs->rules[rs_num].active.ptr_array =
1393 	    rs->rules[rs_num].inactive.ptr_array;
1394 	rs->rules[rs_num].active.tree =
1395 	    rs->rules[rs_num].inactive.tree;
1396 	rs->rules[rs_num].active.rcount =
1397 	    rs->rules[rs_num].inactive.rcount;
1398 
1399 	/* Attempt to preserve counter information. */
1400 	if (V_pf_status.keep_counters && old_tree != NULL) {
1401 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1402 		    entries) {
1403 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1404 			if (old_rule == NULL) {
1405 				continue;
1406 			}
1407 			pf_counter_u64_critical_enter();
1408 			pf_counter_u64_add_protected(&rule->evaluations,
1409 			    pf_counter_u64_fetch(&old_rule->evaluations));
1410 			pf_counter_u64_add_protected(&rule->packets[0],
1411 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1412 			pf_counter_u64_add_protected(&rule->packets[1],
1413 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1414 			pf_counter_u64_add_protected(&rule->bytes[0],
1415 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1416 			pf_counter_u64_add_protected(&rule->bytes[1],
1417 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1418 			pf_counter_u64_critical_exit();
1419 		}
1420 	}
1421 
1422 	rs->rules[rs_num].inactive.ptr = old_rules;
1423 	rs->rules[rs_num].inactive.ptr_array = old_array;
1424 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1425 	rs->rules[rs_num].inactive.rcount = old_rcount;
1426 
1427 	rs->rules[rs_num].active.ticket =
1428 	    rs->rules[rs_num].inactive.ticket;
1429 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1430 
1431 	/* Purge the old rule list. */
1432 	PF_UNLNKDRULES_LOCK();
1433 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1434 		pf_unlink_rule_locked(old_rules, rule);
1435 	PF_UNLNKDRULES_UNLOCK();
1436 	if (rs->rules[rs_num].inactive.ptr_array)
1437 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1438 	rs->rules[rs_num].inactive.ptr_array = NULL;
1439 	rs->rules[rs_num].inactive.rcount = 0;
1440 	rs->rules[rs_num].inactive.open = 0;
1441 	pf_remove_if_empty_kruleset(rs);
1442 	free(old_tree, M_TEMP);
1443 
1444 	return (0);
1445 }
1446 
1447 static int
1448 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1449 {
1450 	MD5_CTX			 ctx;
1451 	struct pf_krule		*rule;
1452 	int			 rs_cnt;
1453 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1454 
1455 	MD5Init(&ctx);
1456 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1457 		/* XXX PF_RULESET_SCRUB as well? */
1458 		if (rs_cnt == PF_RULESET_SCRUB)
1459 			continue;
1460 
1461 		if (rs->rules[rs_cnt].inactive.ptr_array)
1462 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1463 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1464 
1465 		if (rs->rules[rs_cnt].inactive.rcount) {
1466 			rs->rules[rs_cnt].inactive.ptr_array =
1467 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1468 			    sizeof(struct pf_rule **),
1469 			    M_TEMP, M_NOWAIT);
1470 
1471 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1472 				return (ENOMEM);
1473 		}
1474 
1475 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1476 		    entries) {
1477 			pf_hash_rule_rolling(&ctx, rule);
1478 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1479 		}
1480 	}
1481 
1482 	MD5Final(digest, &ctx);
1483 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1484 	return (0);
1485 }
1486 
1487 static int
1488 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1489 {
1490 	int error = 0;
1491 
1492 	switch (addr->type) {
1493 	case PF_ADDR_TABLE:
1494 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1495 		if (addr->p.tbl == NULL)
1496 			error = ENOMEM;
1497 		break;
1498 	default:
1499 		error = EINVAL;
1500 	}
1501 
1502 	return (error);
1503 }
1504 
1505 static int
1506 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1507     sa_family_t af)
1508 {
1509 	int error = 0;
1510 
1511 	switch (addr->type) {
1512 	case PF_ADDR_TABLE:
1513 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1514 		if (addr->p.tbl == NULL)
1515 			error = ENOMEM;
1516 		break;
1517 	case PF_ADDR_DYNIFTL:
1518 		error = pfi_dynaddr_setup(addr, af);
1519 		break;
1520 	}
1521 
1522 	return (error);
1523 }
1524 
1525 static void
1526 pf_addr_copyout(struct pf_addr_wrap *addr)
1527 {
1528 
1529 	switch (addr->type) {
1530 	case PF_ADDR_DYNIFTL:
1531 		pfi_dynaddr_copyout(addr);
1532 		break;
1533 	case PF_ADDR_TABLE:
1534 		pf_tbladdr_copyout(addr);
1535 		break;
1536 	}
1537 }
1538 
1539 static void
1540 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1541 {
1542 	int	secs = time_uptime, diff;
1543 
1544 	bzero(out, sizeof(struct pf_src_node));
1545 
1546 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1547 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1548 
1549 	if (in->rule.ptr != NULL)
1550 		out->rule.nr = in->rule.ptr->nr;
1551 
1552 	for (int i = 0; i < 2; i++) {
1553 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1554 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1555 	}
1556 
1557 	out->states = in->states;
1558 	out->conn = in->conn;
1559 	out->af = in->af;
1560 	out->ruletype = in->ruletype;
1561 
1562 	out->creation = secs - in->creation;
1563 	if (out->expire > secs)
1564 		out->expire -= secs;
1565 	else
1566 		out->expire = 0;
1567 
1568 	/* Adjust the connection rate estimate. */
1569 	diff = secs - in->conn_rate.last;
1570 	if (diff >= in->conn_rate.seconds)
1571 		out->conn_rate.count = 0;
1572 	else
1573 		out->conn_rate.count -=
1574 		    in->conn_rate.count * diff /
1575 		    in->conn_rate.seconds;
1576 }
1577 
1578 #ifdef ALTQ
1579 /*
1580  * Handle export of struct pf_kaltq to user binaries that may be using any
1581  * version of struct pf_altq.
1582  */
1583 static int
1584 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1585 {
1586 	u_int32_t version;
1587 
1588 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1589 		version = 0;
1590 	else
1591 		version = pa->version;
1592 
1593 	if (version > PFIOC_ALTQ_VERSION)
1594 		return (EINVAL);
1595 
1596 #define ASSIGN(x) exported_q->x = q->x
1597 #define COPY(x) \
1598 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1599 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1600 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1601 
1602 	switch (version) {
1603 	case 0: {
1604 		struct pf_altq_v0 *exported_q =
1605 		    &((struct pfioc_altq_v0 *)pa)->altq;
1606 
1607 		COPY(ifname);
1608 
1609 		ASSIGN(scheduler);
1610 		ASSIGN(tbrsize);
1611 		exported_q->tbrsize = SATU16(q->tbrsize);
1612 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1613 
1614 		COPY(qname);
1615 		COPY(parent);
1616 		ASSIGN(parent_qid);
1617 		exported_q->bandwidth = SATU32(q->bandwidth);
1618 		ASSIGN(priority);
1619 		ASSIGN(local_flags);
1620 
1621 		ASSIGN(qlimit);
1622 		ASSIGN(flags);
1623 
1624 		if (q->scheduler == ALTQT_HFSC) {
1625 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1626 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1627 			    SATU32(q->pq_u.hfsc_opts.x)
1628 
1629 			ASSIGN_OPT_SATU32(rtsc_m1);
1630 			ASSIGN_OPT(rtsc_d);
1631 			ASSIGN_OPT_SATU32(rtsc_m2);
1632 
1633 			ASSIGN_OPT_SATU32(lssc_m1);
1634 			ASSIGN_OPT(lssc_d);
1635 			ASSIGN_OPT_SATU32(lssc_m2);
1636 
1637 			ASSIGN_OPT_SATU32(ulsc_m1);
1638 			ASSIGN_OPT(ulsc_d);
1639 			ASSIGN_OPT_SATU32(ulsc_m2);
1640 
1641 			ASSIGN_OPT(flags);
1642 
1643 #undef ASSIGN_OPT
1644 #undef ASSIGN_OPT_SATU32
1645 		} else
1646 			COPY(pq_u);
1647 
1648 		ASSIGN(qid);
1649 		break;
1650 	}
1651 	case 1:	{
1652 		struct pf_altq_v1 *exported_q =
1653 		    &((struct pfioc_altq_v1 *)pa)->altq;
1654 
1655 		COPY(ifname);
1656 
1657 		ASSIGN(scheduler);
1658 		ASSIGN(tbrsize);
1659 		ASSIGN(ifbandwidth);
1660 
1661 		COPY(qname);
1662 		COPY(parent);
1663 		ASSIGN(parent_qid);
1664 		ASSIGN(bandwidth);
1665 		ASSIGN(priority);
1666 		ASSIGN(local_flags);
1667 
1668 		ASSIGN(qlimit);
1669 		ASSIGN(flags);
1670 		COPY(pq_u);
1671 
1672 		ASSIGN(qid);
1673 		break;
1674 	}
1675 	default:
1676 		panic("%s: unhandled struct pfioc_altq version", __func__);
1677 		break;
1678 	}
1679 
1680 #undef ASSIGN
1681 #undef COPY
1682 #undef SATU16
1683 #undef SATU32
1684 
1685 	return (0);
1686 }
1687 
1688 /*
1689  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1690  * that may be using any version of it.
1691  */
1692 static int
1693 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1694 {
1695 	u_int32_t version;
1696 
1697 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1698 		version = 0;
1699 	else
1700 		version = pa->version;
1701 
1702 	if (version > PFIOC_ALTQ_VERSION)
1703 		return (EINVAL);
1704 
1705 #define ASSIGN(x) q->x = imported_q->x
1706 #define COPY(x) \
1707 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1708 
1709 	switch (version) {
1710 	case 0: {
1711 		struct pf_altq_v0 *imported_q =
1712 		    &((struct pfioc_altq_v0 *)pa)->altq;
1713 
1714 		COPY(ifname);
1715 
1716 		ASSIGN(scheduler);
1717 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1718 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1719 
1720 		COPY(qname);
1721 		COPY(parent);
1722 		ASSIGN(parent_qid);
1723 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1724 		ASSIGN(priority);
1725 		ASSIGN(local_flags);
1726 
1727 		ASSIGN(qlimit);
1728 		ASSIGN(flags);
1729 
1730 		if (imported_q->scheduler == ALTQT_HFSC) {
1731 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1732 
1733 			/*
1734 			 * The m1 and m2 parameters are being copied from
1735 			 * 32-bit to 64-bit.
1736 			 */
1737 			ASSIGN_OPT(rtsc_m1);
1738 			ASSIGN_OPT(rtsc_d);
1739 			ASSIGN_OPT(rtsc_m2);
1740 
1741 			ASSIGN_OPT(lssc_m1);
1742 			ASSIGN_OPT(lssc_d);
1743 			ASSIGN_OPT(lssc_m2);
1744 
1745 			ASSIGN_OPT(ulsc_m1);
1746 			ASSIGN_OPT(ulsc_d);
1747 			ASSIGN_OPT(ulsc_m2);
1748 
1749 			ASSIGN_OPT(flags);
1750 
1751 #undef ASSIGN_OPT
1752 		} else
1753 			COPY(pq_u);
1754 
1755 		ASSIGN(qid);
1756 		break;
1757 	}
1758 	case 1: {
1759 		struct pf_altq_v1 *imported_q =
1760 		    &((struct pfioc_altq_v1 *)pa)->altq;
1761 
1762 		COPY(ifname);
1763 
1764 		ASSIGN(scheduler);
1765 		ASSIGN(tbrsize);
1766 		ASSIGN(ifbandwidth);
1767 
1768 		COPY(qname);
1769 		COPY(parent);
1770 		ASSIGN(parent_qid);
1771 		ASSIGN(bandwidth);
1772 		ASSIGN(priority);
1773 		ASSIGN(local_flags);
1774 
1775 		ASSIGN(qlimit);
1776 		ASSIGN(flags);
1777 		COPY(pq_u);
1778 
1779 		ASSIGN(qid);
1780 		break;
1781 	}
1782 	default:
1783 		panic("%s: unhandled struct pfioc_altq version", __func__);
1784 		break;
1785 	}
1786 
1787 #undef ASSIGN
1788 #undef COPY
1789 
1790 	return (0);
1791 }
1792 
1793 static struct pf_altq *
1794 pf_altq_get_nth_active(u_int32_t n)
1795 {
1796 	struct pf_altq		*altq;
1797 	u_int32_t		 nr;
1798 
1799 	nr = 0;
1800 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1801 		if (nr == n)
1802 			return (altq);
1803 		nr++;
1804 	}
1805 
1806 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1807 		if (nr == n)
1808 			return (altq);
1809 		nr++;
1810 	}
1811 
1812 	return (NULL);
1813 }
1814 #endif /* ALTQ */
1815 
1816 struct pf_krule *
1817 pf_krule_alloc(void)
1818 {
1819 	struct pf_krule *rule;
1820 
1821 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1822 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1823 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1824 	    M_WAITOK | M_ZERO);
1825 	return (rule);
1826 }
1827 
1828 void
1829 pf_krule_free(struct pf_krule *rule)
1830 {
1831 #ifdef PF_WANT_32_TO_64_COUNTER
1832 	bool wowned;
1833 #endif
1834 
1835 	if (rule == NULL)
1836 		return;
1837 
1838 #ifdef PF_WANT_32_TO_64_COUNTER
1839 	if (rule->allrulelinked) {
1840 		wowned = PF_RULES_WOWNED();
1841 		if (!wowned)
1842 			PF_RULES_WLOCK();
1843 		LIST_REMOVE(rule, allrulelist);
1844 		V_pf_allrulecount--;
1845 		if (!wowned)
1846 			PF_RULES_WUNLOCK();
1847 	}
1848 #endif
1849 
1850 	pf_counter_u64_deinit(&rule->evaluations);
1851 	for (int i = 0; i < 2; i++) {
1852 		pf_counter_u64_deinit(&rule->packets[i]);
1853 		pf_counter_u64_deinit(&rule->bytes[i]);
1854 	}
1855 	counter_u64_free(rule->states_cur);
1856 	counter_u64_free(rule->states_tot);
1857 	counter_u64_free(rule->src_nodes);
1858 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1859 
1860 	mtx_destroy(&rule->rpool.mtx);
1861 	free(rule, M_PFRULE);
1862 }
1863 
1864 static void
1865 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1866     struct pf_pooladdr *pool)
1867 {
1868 
1869 	bzero(pool, sizeof(*pool));
1870 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1871 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1872 }
1873 
1874 static int
1875 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1876     struct pf_kpooladdr *kpool)
1877 {
1878 	int ret;
1879 
1880 	bzero(kpool, sizeof(*kpool));
1881 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1882 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1883 	    sizeof(kpool->ifname));
1884 	return (ret);
1885 }
1886 
1887 static void
1888 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1889 {
1890 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1891 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1892 
1893 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1894 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1895 
1896 	kpool->tblidx = pool->tblidx;
1897 	kpool->proxy_port[0] = pool->proxy_port[0];
1898 	kpool->proxy_port[1] = pool->proxy_port[1];
1899 	kpool->opts = pool->opts;
1900 }
1901 
1902 static int
1903 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1904 {
1905 	int ret;
1906 
1907 #ifndef INET
1908 	if (rule->af == AF_INET) {
1909 		return (EAFNOSUPPORT);
1910 	}
1911 #endif /* INET */
1912 #ifndef INET6
1913 	if (rule->af == AF_INET6) {
1914 		return (EAFNOSUPPORT);
1915 	}
1916 #endif /* INET6 */
1917 
1918 	ret = pf_check_rule_addr(&rule->src);
1919 	if (ret != 0)
1920 		return (ret);
1921 	ret = pf_check_rule_addr(&rule->dst);
1922 	if (ret != 0)
1923 		return (ret);
1924 
1925 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1926 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1927 
1928 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1929 	if (ret != 0)
1930 		return (ret);
1931 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1932 	if (ret != 0)
1933 		return (ret);
1934 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1935 	if (ret != 0)
1936 		return (ret);
1937 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1938 	if (ret != 0)
1939 		return (ret);
1940 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1941 	    sizeof(rule->tagname));
1942 	if (ret != 0)
1943 		return (ret);
1944 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1945 	    sizeof(rule->match_tagname));
1946 	if (ret != 0)
1947 		return (ret);
1948 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1949 	    sizeof(rule->overload_tblname));
1950 	if (ret != 0)
1951 		return (ret);
1952 
1953 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1954 
1955 	/* Don't allow userspace to set evaluations, packets or bytes. */
1956 	/* kif, anchor, overload_tbl are not copied over. */
1957 
1958 	krule->os_fingerprint = rule->os_fingerprint;
1959 
1960 	krule->rtableid = rule->rtableid;
1961 	bcopy(rule->timeout, krule->timeout, sizeof(krule->timeout));
1962 	krule->max_states = rule->max_states;
1963 	krule->max_src_nodes = rule->max_src_nodes;
1964 	krule->max_src_states = rule->max_src_states;
1965 	krule->max_src_conn = rule->max_src_conn;
1966 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1967 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1968 	krule->qid = rule->qid;
1969 	krule->pqid = rule->pqid;
1970 	krule->nr = rule->nr;
1971 	krule->prob = rule->prob;
1972 	krule->cuid = rule->cuid;
1973 	krule->cpid = rule->cpid;
1974 
1975 	krule->return_icmp = rule->return_icmp;
1976 	krule->return_icmp6 = rule->return_icmp6;
1977 	krule->max_mss = rule->max_mss;
1978 	krule->tag = rule->tag;
1979 	krule->match_tag = rule->match_tag;
1980 	krule->scrub_flags = rule->scrub_flags;
1981 
1982 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1983 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1984 
1985 	krule->rule_flag = rule->rule_flag;
1986 	krule->action = rule->action;
1987 	krule->direction = rule->direction;
1988 	krule->log = rule->log;
1989 	krule->logif = rule->logif;
1990 	krule->quick = rule->quick;
1991 	krule->ifnot = rule->ifnot;
1992 	krule->match_tag_not = rule->match_tag_not;
1993 	krule->natpass = rule->natpass;
1994 
1995 	krule->keep_state = rule->keep_state;
1996 	krule->af = rule->af;
1997 	krule->proto = rule->proto;
1998 	krule->type = rule->type;
1999 	krule->code = rule->code;
2000 	krule->flags = rule->flags;
2001 	krule->flagset = rule->flagset;
2002 	krule->min_ttl = rule->min_ttl;
2003 	krule->allow_opts = rule->allow_opts;
2004 	krule->rt = rule->rt;
2005 	krule->return_ttl = rule->return_ttl;
2006 	krule->tos = rule->tos;
2007 	krule->set_tos = rule->set_tos;
2008 
2009 	krule->flush = rule->flush;
2010 	krule->prio = rule->prio;
2011 	krule->set_prio[0] = rule->set_prio[0];
2012 	krule->set_prio[1] = rule->set_prio[1];
2013 
2014 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2015 
2016 	return (0);
2017 }
2018 
2019 int
2020 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2021     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2022     uid_t uid, pid_t pid)
2023 {
2024 	struct pf_kruleset	*ruleset;
2025 	struct pf_krule		*tail;
2026 	struct pf_kpooladdr	*pa;
2027 	struct pfi_kkif		*kif = NULL;
2028 	int			 rs_num;
2029 	int			 error = 0;
2030 
2031 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2032 		error = EINVAL;
2033 		goto errout_unlocked;
2034 	}
2035 
2036 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2037 
2038 	if (rule->ifname[0])
2039 		kif = pf_kkif_create(M_WAITOK);
2040 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2041 	for (int i = 0; i < 2; i++) {
2042 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2043 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2044 	}
2045 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2046 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2047 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2048 	rule->cuid = uid;
2049 	rule->cpid = pid;
2050 	TAILQ_INIT(&rule->rpool.list);
2051 
2052 	PF_CONFIG_LOCK();
2053 	PF_RULES_WLOCK();
2054 #ifdef PF_WANT_32_TO_64_COUNTER
2055 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2056 	MPASS(!rule->allrulelinked);
2057 	rule->allrulelinked = true;
2058 	V_pf_allrulecount++;
2059 #endif
2060 	ruleset = pf_find_kruleset(anchor);
2061 	if (ruleset == NULL)
2062 		ERROUT(EINVAL);
2063 	rs_num = pf_get_ruleset_number(rule->action);
2064 	if (rs_num >= PF_RULESET_MAX)
2065 		ERROUT(EINVAL);
2066 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2067 		DPFPRINTF(PF_DEBUG_MISC,
2068 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2069 		    ruleset->rules[rs_num].inactive.ticket));
2070 		ERROUT(EBUSY);
2071 	}
2072 	if (pool_ticket != V_ticket_pabuf) {
2073 		DPFPRINTF(PF_DEBUG_MISC,
2074 		    ("pool_ticket: %d != %d\n", pool_ticket,
2075 		    V_ticket_pabuf));
2076 		ERROUT(EBUSY);
2077 	}
2078 	/*
2079 	 * XXXMJG hack: there is no mechanism to ensure they started the
2080 	 * transaction. Ticket checked above may happen to match by accident,
2081 	 * even if nobody called DIOCXBEGIN, let alone this process.
2082 	 * Partially work around it by checking if the RB tree got allocated,
2083 	 * see pf_begin_rules.
2084 	 */
2085 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2086 		ERROUT(EINVAL);
2087 	}
2088 
2089 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2090 	    pf_krulequeue);
2091 	if (tail)
2092 		rule->nr = tail->nr + 1;
2093 	else
2094 		rule->nr = 0;
2095 	if (rule->ifname[0]) {
2096 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2097 		kif = NULL;
2098 		pfi_kkif_ref(rule->kif);
2099 	} else
2100 		rule->kif = NULL;
2101 
2102 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2103 		error = EBUSY;
2104 
2105 #ifdef ALTQ
2106 	/* set queue IDs */
2107 	if (rule->qname[0] != 0) {
2108 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2109 			error = EBUSY;
2110 		else if (rule->pqname[0] != 0) {
2111 			if ((rule->pqid =
2112 			    pf_qname2qid(rule->pqname)) == 0)
2113 				error = EBUSY;
2114 		} else
2115 			rule->pqid = rule->qid;
2116 	}
2117 #endif
2118 	if (rule->tagname[0])
2119 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2120 			error = EBUSY;
2121 	if (rule->match_tagname[0])
2122 		if ((rule->match_tag =
2123 		    pf_tagname2tag(rule->match_tagname)) == 0)
2124 			error = EBUSY;
2125 	if (rule->rt && !rule->direction)
2126 		error = EINVAL;
2127 	if (!rule->log)
2128 		rule->logif = 0;
2129 	if (rule->logif >= PFLOGIFS_MAX)
2130 		error = EINVAL;
2131 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2132 		error = ENOMEM;
2133 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2134 		error = ENOMEM;
2135 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2136 		error = EINVAL;
2137 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2138 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2139 	    rule->set_prio[1] > PF_PRIO_MAX))
2140 		error = EINVAL;
2141 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2142 		if (pa->addr.type == PF_ADDR_TABLE) {
2143 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2144 			    pa->addr.v.tblname);
2145 			if (pa->addr.p.tbl == NULL)
2146 				error = ENOMEM;
2147 		}
2148 
2149 	rule->overload_tbl = NULL;
2150 	if (rule->overload_tblname[0]) {
2151 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2152 		    rule->overload_tblname)) == NULL)
2153 			error = EINVAL;
2154 		else
2155 			rule->overload_tbl->pfrkt_flags |=
2156 			    PFR_TFLAG_ACTIVE;
2157 	}
2158 
2159 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2160 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2161 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2162 	    (rule->rt > PF_NOPFROUTE)) &&
2163 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2164 		error = EINVAL;
2165 
2166 	if (error) {
2167 		pf_free_rule(rule);
2168 		rule = NULL;
2169 		ERROUT(error);
2170 	}
2171 
2172 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2173 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2174 	    rule, entries);
2175 	ruleset->rules[rs_num].inactive.rcount++;
2176 
2177 	PF_RULES_WUNLOCK();
2178 	pf_hash_rule(rule);
2179 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2180 		PF_RULES_WLOCK();
2181 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2182 		ruleset->rules[rs_num].inactive.rcount--;
2183 		pf_free_rule(rule);
2184 		rule = NULL;
2185 		ERROUT(EEXIST);
2186 	}
2187 	PF_CONFIG_UNLOCK();
2188 
2189 	return (0);
2190 
2191 #undef ERROUT
2192 errout:
2193 	PF_RULES_WUNLOCK();
2194 	PF_CONFIG_UNLOCK();
2195 errout_unlocked:
2196 	pf_kkif_free(kif);
2197 	pf_krule_free(rule);
2198 	return (error);
2199 }
2200 
2201 static bool
2202 pf_label_match(const struct pf_krule *rule, const char *label)
2203 {
2204 	int i = 0;
2205 
2206 	while (*rule->label[i]) {
2207 		if (strcmp(rule->label[i], label) == 0)
2208 			return (true);
2209 		i++;
2210 	}
2211 
2212 	return (false);
2213 }
2214 
2215 static unsigned int
2216 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2217 {
2218 	struct pf_kstate *s;
2219 	int more = 0;
2220 
2221 	s = pf_find_state_all(key, dir, &more);
2222 	if (s == NULL)
2223 		return (0);
2224 
2225 	if (more) {
2226 		PF_STATE_UNLOCK(s);
2227 		return (0);
2228 	}
2229 
2230 	pf_unlink_state(s);
2231 	return (1);
2232 }
2233 
2234 static int
2235 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2236 {
2237 	struct pf_kstate	*s;
2238 	struct pf_state_key	*sk;
2239 	struct pf_addr		*srcaddr, *dstaddr;
2240 	struct pf_state_key_cmp	 match_key;
2241 	int			 idx, killed = 0;
2242 	unsigned int		 dir;
2243 	u_int16_t		 srcport, dstport;
2244 	struct pfi_kkif		*kif;
2245 
2246 relock_DIOCKILLSTATES:
2247 	PF_HASHROW_LOCK(ih);
2248 	LIST_FOREACH(s, &ih->states, entry) {
2249 		/* For floating states look at the original kif. */
2250 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2251 
2252 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2253 		if (s->direction == PF_OUT) {
2254 			srcaddr = &sk->addr[1];
2255 			dstaddr = &sk->addr[0];
2256 			srcport = sk->port[1];
2257 			dstport = sk->port[0];
2258 		} else {
2259 			srcaddr = &sk->addr[0];
2260 			dstaddr = &sk->addr[1];
2261 			srcport = sk->port[0];
2262 			dstport = sk->port[1];
2263 		}
2264 
2265 		if (psk->psk_af && sk->af != psk->psk_af)
2266 			continue;
2267 
2268 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2269 			continue;
2270 
2271 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2272 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2273 			continue;
2274 
2275 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2276 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2277 			continue;
2278 
2279 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2280 		    &psk->psk_rt_addr.addr.v.a.addr,
2281 		    &psk->psk_rt_addr.addr.v.a.mask,
2282 		    &s->rt_addr, sk->af))
2283 			continue;
2284 
2285 		if (psk->psk_src.port_op != 0 &&
2286 		    ! pf_match_port(psk->psk_src.port_op,
2287 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2288 			continue;
2289 
2290 		if (psk->psk_dst.port_op != 0 &&
2291 		    ! pf_match_port(psk->psk_dst.port_op,
2292 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2293 			continue;
2294 
2295 		if (psk->psk_label[0] &&
2296 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2297 			continue;
2298 
2299 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2300 		    kif->pfik_name))
2301 			continue;
2302 
2303 		if (psk->psk_kill_match) {
2304 			/* Create the key to find matching states, with lock
2305 			 * held. */
2306 
2307 			bzero(&match_key, sizeof(match_key));
2308 
2309 			if (s->direction == PF_OUT) {
2310 				dir = PF_IN;
2311 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2312 			} else {
2313 				dir = PF_OUT;
2314 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2315 			}
2316 
2317 			match_key.af = s->key[idx]->af;
2318 			match_key.proto = s->key[idx]->proto;
2319 			PF_ACPY(&match_key.addr[0],
2320 			    &s->key[idx]->addr[1], match_key.af);
2321 			match_key.port[0] = s->key[idx]->port[1];
2322 			PF_ACPY(&match_key.addr[1],
2323 			    &s->key[idx]->addr[0], match_key.af);
2324 			match_key.port[1] = s->key[idx]->port[0];
2325 		}
2326 
2327 		pf_unlink_state(s);
2328 		killed++;
2329 
2330 		if (psk->psk_kill_match)
2331 			killed += pf_kill_matching_state(&match_key, dir);
2332 
2333 		goto relock_DIOCKILLSTATES;
2334 	}
2335 	PF_HASHROW_UNLOCK(ih);
2336 
2337 	return (killed);
2338 }
2339 
2340 int
2341 pf_start(void)
2342 {
2343 	int error = 0;
2344 
2345 	sx_xlock(&V_pf_ioctl_lock);
2346 	if (V_pf_status.running)
2347 		error = EEXIST;
2348 	else {
2349 		hook_pf();
2350 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2351 			hook_pf_eth();
2352 		V_pf_status.running = 1;
2353 		V_pf_status.since = time_second;
2354 		new_unrhdr64(&V_pf_stateid, time_second);
2355 
2356 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2357 	}
2358 	sx_xunlock(&V_pf_ioctl_lock);
2359 
2360 	return (error);
2361 }
2362 
2363 int
2364 pf_stop(void)
2365 {
2366 	int error = 0;
2367 
2368 	sx_xlock(&V_pf_ioctl_lock);
2369 	if (!V_pf_status.running)
2370 		error = ENOENT;
2371 	else {
2372 		V_pf_status.running = 0;
2373 		dehook_pf();
2374 		dehook_pf_eth();
2375 		V_pf_status.since = time_second;
2376 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2377 	}
2378 	sx_xunlock(&V_pf_ioctl_lock);
2379 
2380 	return (error);
2381 }
2382 
2383 static int
2384 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2385 {
2386 	int			 error = 0;
2387 	PF_RULES_RLOCK_TRACKER;
2388 
2389 #define	ERROUT_IOCTL(target, x)					\
2390     do {								\
2391 	    error = (x);						\
2392 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2393 	    goto target;						\
2394     } while (0)
2395 
2396 
2397 	/* XXX keep in sync with switch() below */
2398 	if (securelevel_gt(td->td_ucred, 2))
2399 		switch (cmd) {
2400 		case DIOCGETRULES:
2401 		case DIOCGETRULENV:
2402 		case DIOCGETADDRS:
2403 		case DIOCGETADDR:
2404 		case DIOCGETSTATE:
2405 		case DIOCGETSTATENV:
2406 		case DIOCSETSTATUSIF:
2407 		case DIOCGETSTATUSNV:
2408 		case DIOCCLRSTATUS:
2409 		case DIOCNATLOOK:
2410 		case DIOCSETDEBUG:
2411 #ifdef COMPAT_FREEBSD14
2412 		case DIOCGETSTATES:
2413 		case DIOCGETSTATESV2:
2414 #endif
2415 		case DIOCGETTIMEOUT:
2416 		case DIOCCLRRULECTRS:
2417 		case DIOCGETLIMIT:
2418 		case DIOCGETALTQSV0:
2419 		case DIOCGETALTQSV1:
2420 		case DIOCGETALTQV0:
2421 		case DIOCGETALTQV1:
2422 		case DIOCGETQSTATSV0:
2423 		case DIOCGETQSTATSV1:
2424 		case DIOCGETRULESETS:
2425 		case DIOCGETRULESET:
2426 		case DIOCRGETTABLES:
2427 		case DIOCRGETTSTATS:
2428 		case DIOCRCLRTSTATS:
2429 		case DIOCRCLRADDRS:
2430 		case DIOCRADDADDRS:
2431 		case DIOCRDELADDRS:
2432 		case DIOCRSETADDRS:
2433 		case DIOCRGETADDRS:
2434 		case DIOCRGETASTATS:
2435 		case DIOCRCLRASTATS:
2436 		case DIOCRTSTADDRS:
2437 		case DIOCOSFPGET:
2438 		case DIOCGETSRCNODES:
2439 		case DIOCCLRSRCNODES:
2440 		case DIOCGETSYNCOOKIES:
2441 		case DIOCIGETIFACES:
2442 		case DIOCGIFSPEEDV0:
2443 		case DIOCGIFSPEEDV1:
2444 		case DIOCSETIFFLAG:
2445 		case DIOCCLRIFFLAG:
2446 		case DIOCGETETHRULES:
2447 		case DIOCGETETHRULE:
2448 		case DIOCGETETHRULESETS:
2449 		case DIOCGETETHRULESET:
2450 			break;
2451 		case DIOCRCLRTABLES:
2452 		case DIOCRADDTABLES:
2453 		case DIOCRDELTABLES:
2454 		case DIOCRSETTFLAGS:
2455 			if (((struct pfioc_table *)addr)->pfrio_flags &
2456 			    PFR_FLAG_DUMMY)
2457 				break; /* dummy operation ok */
2458 			return (EPERM);
2459 		default:
2460 			return (EPERM);
2461 		}
2462 
2463 	if (!(flags & FWRITE))
2464 		switch (cmd) {
2465 		case DIOCGETRULES:
2466 		case DIOCGETADDRS:
2467 		case DIOCGETADDR:
2468 		case DIOCGETSTATE:
2469 		case DIOCGETSTATENV:
2470 		case DIOCGETSTATUSNV:
2471 #ifdef COMPAT_FREEBSD14
2472 		case DIOCGETSTATES:
2473 		case DIOCGETSTATESV2:
2474 #endif
2475 		case DIOCGETTIMEOUT:
2476 		case DIOCGETLIMIT:
2477 		case DIOCGETALTQSV0:
2478 		case DIOCGETALTQSV1:
2479 		case DIOCGETALTQV0:
2480 		case DIOCGETALTQV1:
2481 		case DIOCGETQSTATSV0:
2482 		case DIOCGETQSTATSV1:
2483 		case DIOCGETRULESETS:
2484 		case DIOCGETRULESET:
2485 		case DIOCNATLOOK:
2486 		case DIOCRGETTABLES:
2487 		case DIOCRGETTSTATS:
2488 		case DIOCRGETADDRS:
2489 		case DIOCRGETASTATS:
2490 		case DIOCRTSTADDRS:
2491 		case DIOCOSFPGET:
2492 		case DIOCGETSRCNODES:
2493 		case DIOCGETSYNCOOKIES:
2494 		case DIOCIGETIFACES:
2495 		case DIOCGIFSPEEDV1:
2496 		case DIOCGIFSPEEDV0:
2497 		case DIOCGETRULENV:
2498 		case DIOCGETETHRULES:
2499 		case DIOCGETETHRULE:
2500 		case DIOCGETETHRULESETS:
2501 		case DIOCGETETHRULESET:
2502 			break;
2503 		case DIOCRCLRTABLES:
2504 		case DIOCRADDTABLES:
2505 		case DIOCRDELTABLES:
2506 		case DIOCRCLRTSTATS:
2507 		case DIOCRCLRADDRS:
2508 		case DIOCRADDADDRS:
2509 		case DIOCRDELADDRS:
2510 		case DIOCRSETADDRS:
2511 		case DIOCRSETTFLAGS:
2512 			if (((struct pfioc_table *)addr)->pfrio_flags &
2513 			    PFR_FLAG_DUMMY) {
2514 				flags |= FWRITE; /* need write lock for dummy */
2515 				break; /* dummy operation ok */
2516 			}
2517 			return (EACCES);
2518 		default:
2519 			return (EACCES);
2520 		}
2521 
2522 	CURVNET_SET(TD_TO_VNET(td));
2523 
2524 	switch (cmd) {
2525 #ifdef COMPAT_FREEBSD14
2526 	case DIOCSTART:
2527 		error = pf_start();
2528 		break;
2529 
2530 	case DIOCSTOP:
2531 		error = pf_stop();
2532 		break;
2533 #endif
2534 
2535 	case DIOCGETETHRULES: {
2536 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2537 		nvlist_t		*nvl;
2538 		void			*packed;
2539 		struct pf_keth_rule	*tail;
2540 		struct pf_keth_ruleset	*rs;
2541 		u_int32_t		 ticket, nr;
2542 		const char		*anchor = "";
2543 
2544 		nvl = NULL;
2545 		packed = NULL;
2546 
2547 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2548 
2549 		if (nv->len > pf_ioctl_maxcount)
2550 			ERROUT(ENOMEM);
2551 
2552 		/* Copy the request in */
2553 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2554 		if (packed == NULL)
2555 			ERROUT(ENOMEM);
2556 
2557 		error = copyin(nv->data, packed, nv->len);
2558 		if (error)
2559 			ERROUT(error);
2560 
2561 		nvl = nvlist_unpack(packed, nv->len, 0);
2562 		if (nvl == NULL)
2563 			ERROUT(EBADMSG);
2564 
2565 		if (! nvlist_exists_string(nvl, "anchor"))
2566 			ERROUT(EBADMSG);
2567 
2568 		anchor = nvlist_get_string(nvl, "anchor");
2569 
2570 		rs = pf_find_keth_ruleset(anchor);
2571 
2572 		nvlist_destroy(nvl);
2573 		nvl = NULL;
2574 		free(packed, M_NVLIST);
2575 		packed = NULL;
2576 
2577 		if (rs == NULL)
2578 			ERROUT(ENOENT);
2579 
2580 		/* Reply */
2581 		nvl = nvlist_create(0);
2582 		if (nvl == NULL)
2583 			ERROUT(ENOMEM);
2584 
2585 		PF_RULES_RLOCK();
2586 
2587 		ticket = rs->active.ticket;
2588 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2589 		if (tail)
2590 			nr = tail->nr + 1;
2591 		else
2592 			nr = 0;
2593 
2594 		PF_RULES_RUNLOCK();
2595 
2596 		nvlist_add_number(nvl, "ticket", ticket);
2597 		nvlist_add_number(nvl, "nr", nr);
2598 
2599 		packed = nvlist_pack(nvl, &nv->len);
2600 		if (packed == NULL)
2601 			ERROUT(ENOMEM);
2602 
2603 		if (nv->size == 0)
2604 			ERROUT(0);
2605 		else if (nv->size < nv->len)
2606 			ERROUT(ENOSPC);
2607 
2608 		error = copyout(packed, nv->data, nv->len);
2609 
2610 #undef ERROUT
2611 DIOCGETETHRULES_error:
2612 		free(packed, M_NVLIST);
2613 		nvlist_destroy(nvl);
2614 		break;
2615 	}
2616 
2617 	case DIOCGETETHRULE: {
2618 		struct epoch_tracker	 et;
2619 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2620 		nvlist_t		*nvl = NULL;
2621 		void			*nvlpacked = NULL;
2622 		struct pf_keth_rule	*rule = NULL;
2623 		struct pf_keth_ruleset	*rs;
2624 		u_int32_t		 ticket, nr;
2625 		bool			 clear = false;
2626 		const char		*anchor;
2627 
2628 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2629 
2630 		if (nv->len > pf_ioctl_maxcount)
2631 			ERROUT(ENOMEM);
2632 
2633 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2634 		if (nvlpacked == NULL)
2635 			ERROUT(ENOMEM);
2636 
2637 		error = copyin(nv->data, nvlpacked, nv->len);
2638 		if (error)
2639 			ERROUT(error);
2640 
2641 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2642 		if (nvl == NULL)
2643 			ERROUT(EBADMSG);
2644 		if (! nvlist_exists_number(nvl, "ticket"))
2645 			ERROUT(EBADMSG);
2646 		ticket = nvlist_get_number(nvl, "ticket");
2647 		if (! nvlist_exists_string(nvl, "anchor"))
2648 			ERROUT(EBADMSG);
2649 		anchor = nvlist_get_string(nvl, "anchor");
2650 
2651 		if (nvlist_exists_bool(nvl, "clear"))
2652 			clear = nvlist_get_bool(nvl, "clear");
2653 
2654 		if (clear && !(flags & FWRITE))
2655 			ERROUT(EACCES);
2656 
2657 		if (! nvlist_exists_number(nvl, "nr"))
2658 			ERROUT(EBADMSG);
2659 		nr = nvlist_get_number(nvl, "nr");
2660 
2661 		PF_RULES_RLOCK();
2662 		rs = pf_find_keth_ruleset(anchor);
2663 		if (rs == NULL) {
2664 			PF_RULES_RUNLOCK();
2665 			ERROUT(ENOENT);
2666 		}
2667 		if (ticket != rs->active.ticket) {
2668 			PF_RULES_RUNLOCK();
2669 			ERROUT(EBUSY);
2670 		}
2671 
2672 		nvlist_destroy(nvl);
2673 		nvl = NULL;
2674 		free(nvlpacked, M_NVLIST);
2675 		nvlpacked = NULL;
2676 
2677 		rule = TAILQ_FIRST(rs->active.rules);
2678 		while ((rule != NULL) && (rule->nr != nr))
2679 			rule = TAILQ_NEXT(rule, entries);
2680 		if (rule == NULL) {
2681 			PF_RULES_RUNLOCK();
2682 			ERROUT(ENOENT);
2683 		}
2684 		/* Make sure rule can't go away. */
2685 		NET_EPOCH_ENTER(et);
2686 		PF_RULES_RUNLOCK();
2687 		nvl = pf_keth_rule_to_nveth_rule(rule);
2688 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2689 			ERROUT(EBUSY);
2690 		NET_EPOCH_EXIT(et);
2691 		if (nvl == NULL)
2692 			ERROUT(ENOMEM);
2693 
2694 		nvlpacked = nvlist_pack(nvl, &nv->len);
2695 		if (nvlpacked == NULL)
2696 			ERROUT(ENOMEM);
2697 
2698 		if (nv->size == 0)
2699 			ERROUT(0);
2700 		else if (nv->size < nv->len)
2701 			ERROUT(ENOSPC);
2702 
2703 		error = copyout(nvlpacked, nv->data, nv->len);
2704 		if (error == 0 && clear) {
2705 			counter_u64_zero(rule->evaluations);
2706 			for (int i = 0; i < 2; i++) {
2707 				counter_u64_zero(rule->packets[i]);
2708 				counter_u64_zero(rule->bytes[i]);
2709 			}
2710 		}
2711 
2712 #undef ERROUT
2713 DIOCGETETHRULE_error:
2714 		free(nvlpacked, M_NVLIST);
2715 		nvlist_destroy(nvl);
2716 		break;
2717 	}
2718 
2719 	case DIOCADDETHRULE: {
2720 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2721 		nvlist_t		*nvl = NULL;
2722 		void			*nvlpacked = NULL;
2723 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2724 		struct pf_keth_ruleset	*ruleset = NULL;
2725 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2726 		const char		*anchor = "", *anchor_call = "";
2727 
2728 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2729 
2730 		if (nv->len > pf_ioctl_maxcount)
2731 			ERROUT(ENOMEM);
2732 
2733 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2734 		if (nvlpacked == NULL)
2735 			ERROUT(ENOMEM);
2736 
2737 		error = copyin(nv->data, nvlpacked, nv->len);
2738 		if (error)
2739 			ERROUT(error);
2740 
2741 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2742 		if (nvl == NULL)
2743 			ERROUT(EBADMSG);
2744 
2745 		if (! nvlist_exists_number(nvl, "ticket"))
2746 			ERROUT(EBADMSG);
2747 
2748 		if (nvlist_exists_string(nvl, "anchor"))
2749 			anchor = nvlist_get_string(nvl, "anchor");
2750 		if (nvlist_exists_string(nvl, "anchor_call"))
2751 			anchor_call = nvlist_get_string(nvl, "anchor_call");
2752 
2753 		ruleset = pf_find_keth_ruleset(anchor);
2754 		if (ruleset == NULL)
2755 			ERROUT(EINVAL);
2756 
2757 		if (nvlist_get_number(nvl, "ticket") !=
2758 		    ruleset->inactive.ticket) {
2759 			DPFPRINTF(PF_DEBUG_MISC,
2760 			    ("ticket: %d != %d\n",
2761 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
2762 			    ruleset->inactive.ticket));
2763 			ERROUT(EBUSY);
2764 		}
2765 
2766 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
2767 		if (rule == NULL)
2768 			ERROUT(ENOMEM);
2769 		rule->timestamp = NULL;
2770 
2771 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
2772 		if (error != 0)
2773 			ERROUT(error);
2774 
2775 		if (rule->ifname[0])
2776 			kif = pf_kkif_create(M_WAITOK);
2777 		if (rule->bridge_to_name[0])
2778 			bridge_to_kif = pf_kkif_create(M_WAITOK);
2779 		rule->evaluations = counter_u64_alloc(M_WAITOK);
2780 		for (int i = 0; i < 2; i++) {
2781 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
2782 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
2783 		}
2784 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2785 		    M_WAITOK | M_ZERO);
2786 
2787 		PF_RULES_WLOCK();
2788 
2789 		if (rule->ifname[0]) {
2790 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
2791 			pfi_kkif_ref(rule->kif);
2792 		} else
2793 			rule->kif = NULL;
2794 		if (rule->bridge_to_name[0]) {
2795 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
2796 			    rule->bridge_to_name);
2797 			pfi_kkif_ref(rule->bridge_to);
2798 		} else
2799 			rule->bridge_to = NULL;
2800 
2801 #ifdef ALTQ
2802 		/* set queue IDs */
2803 		if (rule->qname[0] != 0) {
2804 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2805 				error = EBUSY;
2806 			else
2807 				rule->qid = rule->qid;
2808 		}
2809 #endif
2810 		if (rule->tagname[0])
2811 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2812 				error = EBUSY;
2813 		if (rule->match_tagname[0])
2814 			if ((rule->match_tag = pf_tagname2tag(
2815 			    rule->match_tagname)) == 0)
2816 				error = EBUSY;
2817 
2818 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
2819 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
2820 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
2821 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
2822 
2823 		if (error) {
2824 			pf_free_eth_rule(rule);
2825 			PF_RULES_WUNLOCK();
2826 			ERROUT(error);
2827 		}
2828 
2829 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
2830 			pf_free_eth_rule(rule);
2831 			PF_RULES_WUNLOCK();
2832 			ERROUT(EINVAL);
2833 		}
2834 
2835 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
2836 		if (tail)
2837 			rule->nr = tail->nr + 1;
2838 		else
2839 			rule->nr = 0;
2840 
2841 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
2842 
2843 		PF_RULES_WUNLOCK();
2844 
2845 #undef ERROUT
2846 DIOCADDETHRULE_error:
2847 		nvlist_destroy(nvl);
2848 		free(nvlpacked, M_NVLIST);
2849 		break;
2850 	}
2851 
2852 	case DIOCGETETHRULESETS: {
2853 		struct epoch_tracker	 et;
2854 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2855 		nvlist_t		*nvl = NULL;
2856 		void			*nvlpacked = NULL;
2857 		struct pf_keth_ruleset	*ruleset;
2858 		struct pf_keth_anchor	*anchor;
2859 		int			 nr = 0;
2860 
2861 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
2862 
2863 		if (nv->len > pf_ioctl_maxcount)
2864 			ERROUT(ENOMEM);
2865 
2866 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2867 		if (nvlpacked == NULL)
2868 			ERROUT(ENOMEM);
2869 
2870 		error = copyin(nv->data, nvlpacked, nv->len);
2871 		if (error)
2872 			ERROUT(error);
2873 
2874 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2875 		if (nvl == NULL)
2876 			ERROUT(EBADMSG);
2877 		if (! nvlist_exists_string(nvl, "path"))
2878 			ERROUT(EBADMSG);
2879 
2880 		NET_EPOCH_ENTER(et);
2881 
2882 		if ((ruleset = pf_find_keth_ruleset(
2883 		    nvlist_get_string(nvl, "path"))) == NULL) {
2884 			NET_EPOCH_EXIT(et);
2885 			ERROUT(ENOENT);
2886 		}
2887 
2888 		if (ruleset->anchor == NULL) {
2889 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
2890 				if (anchor->parent == NULL)
2891 					nr++;
2892 		} else {
2893 			RB_FOREACH(anchor, pf_keth_anchor_node,
2894 			    &ruleset->anchor->children)
2895 				nr++;
2896 		}
2897 
2898 		NET_EPOCH_EXIT(et);
2899 
2900 		nvlist_destroy(nvl);
2901 		nvl = NULL;
2902 		free(nvlpacked, M_NVLIST);
2903 		nvlpacked = NULL;
2904 
2905 		nvl = nvlist_create(0);
2906 		if (nvl == NULL)
2907 			ERROUT(ENOMEM);
2908 
2909 		nvlist_add_number(nvl, "nr", nr);
2910 
2911 		nvlpacked = nvlist_pack(nvl, &nv->len);
2912 		if (nvlpacked == NULL)
2913 			ERROUT(ENOMEM);
2914 
2915 		if (nv->size == 0)
2916 			ERROUT(0);
2917 		else if (nv->size < nv->len)
2918 			ERROUT(ENOSPC);
2919 
2920 		error = copyout(nvlpacked, nv->data, nv->len);
2921 
2922 #undef ERROUT
2923 DIOCGETETHRULESETS_error:
2924 		free(nvlpacked, M_NVLIST);
2925 		nvlist_destroy(nvl);
2926 		break;
2927 	}
2928 
2929 	case DIOCGETETHRULESET: {
2930 		struct epoch_tracker	 et;
2931 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2932 		nvlist_t		*nvl = NULL;
2933 		void			*nvlpacked = NULL;
2934 		struct pf_keth_ruleset	*ruleset;
2935 		struct pf_keth_anchor	*anchor;
2936 		int			 nr = 0, req_nr = 0;
2937 		bool			 found = false;
2938 
2939 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
2940 
2941 		if (nv->len > pf_ioctl_maxcount)
2942 			ERROUT(ENOMEM);
2943 
2944 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2945 		if (nvlpacked == NULL)
2946 			ERROUT(ENOMEM);
2947 
2948 		error = copyin(nv->data, nvlpacked, nv->len);
2949 		if (error)
2950 			ERROUT(error);
2951 
2952 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2953 		if (nvl == NULL)
2954 			ERROUT(EBADMSG);
2955 		if (! nvlist_exists_string(nvl, "path"))
2956 			ERROUT(EBADMSG);
2957 		if (! nvlist_exists_number(nvl, "nr"))
2958 			ERROUT(EBADMSG);
2959 
2960 		req_nr = nvlist_get_number(nvl, "nr");
2961 
2962 		NET_EPOCH_ENTER(et);
2963 
2964 		if ((ruleset = pf_find_keth_ruleset(
2965 		    nvlist_get_string(nvl, "path"))) == NULL) {
2966 			NET_EPOCH_EXIT(et);
2967 			ERROUT(ENOENT);
2968 		}
2969 
2970 		nvlist_destroy(nvl);
2971 		nvl = NULL;
2972 		free(nvlpacked, M_NVLIST);
2973 		nvlpacked = NULL;
2974 
2975 		nvl = nvlist_create(0);
2976 		if (nvl == NULL) {
2977 			NET_EPOCH_EXIT(et);
2978 			ERROUT(ENOMEM);
2979 		}
2980 
2981 		if (ruleset->anchor == NULL) {
2982 			RB_FOREACH(anchor, pf_keth_anchor_global,
2983 			    &V_pf_keth_anchors) {
2984 				if (anchor->parent == NULL && nr++ == req_nr) {
2985 					found = true;
2986 					break;
2987 				}
2988 			}
2989 		} else {
2990 			RB_FOREACH(anchor, pf_keth_anchor_node,
2991 			     &ruleset->anchor->children) {
2992 				if (nr++ == req_nr) {
2993 					found = true;
2994 					break;
2995 				}
2996 			}
2997 		}
2998 
2999 		NET_EPOCH_EXIT(et);
3000 		if (found) {
3001 			nvlist_add_number(nvl, "nr", nr);
3002 			nvlist_add_string(nvl, "name", anchor->name);
3003 			if (ruleset->anchor)
3004 				nvlist_add_string(nvl, "path",
3005 				    ruleset->anchor->path);
3006 			else
3007 				nvlist_add_string(nvl, "path", "");
3008 		} else {
3009 			ERROUT(EBUSY);
3010 		}
3011 
3012 		nvlpacked = nvlist_pack(nvl, &nv->len);
3013 		if (nvlpacked == NULL)
3014 			ERROUT(ENOMEM);
3015 
3016 		if (nv->size == 0)
3017 			ERROUT(0);
3018 		else if (nv->size < nv->len)
3019 			ERROUT(ENOSPC);
3020 
3021 		error = copyout(nvlpacked, nv->data, nv->len);
3022 
3023 #undef ERROUT
3024 DIOCGETETHRULESET_error:
3025 		free(nvlpacked, M_NVLIST);
3026 		nvlist_destroy(nvl);
3027 		break;
3028 	}
3029 
3030 	case DIOCADDRULENV: {
3031 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3032 		nvlist_t	*nvl = NULL;
3033 		void		*nvlpacked = NULL;
3034 		struct pf_krule	*rule = NULL;
3035 		const char	*anchor = "", *anchor_call = "";
3036 		uint32_t	 ticket = 0, pool_ticket = 0;
3037 
3038 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3039 
3040 		if (nv->len > pf_ioctl_maxcount)
3041 			ERROUT(ENOMEM);
3042 
3043 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3044 		error = copyin(nv->data, nvlpacked, nv->len);
3045 		if (error)
3046 			ERROUT(error);
3047 
3048 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3049 		if (nvl == NULL)
3050 			ERROUT(EBADMSG);
3051 
3052 		if (! nvlist_exists_number(nvl, "ticket"))
3053 			ERROUT(EINVAL);
3054 		ticket = nvlist_get_number(nvl, "ticket");
3055 
3056 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3057 			ERROUT(EINVAL);
3058 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3059 
3060 		if (! nvlist_exists_nvlist(nvl, "rule"))
3061 			ERROUT(EINVAL);
3062 
3063 		rule = pf_krule_alloc();
3064 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3065 		    rule);
3066 		if (error)
3067 			ERROUT(error);
3068 
3069 		if (nvlist_exists_string(nvl, "anchor"))
3070 			anchor = nvlist_get_string(nvl, "anchor");
3071 		if (nvlist_exists_string(nvl, "anchor_call"))
3072 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3073 
3074 		if ((error = nvlist_error(nvl)))
3075 			ERROUT(error);
3076 
3077 		/* Frees rule on error */
3078 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3079 		    anchor_call, td->td_ucred->cr_ruid,
3080 		    td->td_proc ? td->td_proc->p_pid : 0);
3081 
3082 		nvlist_destroy(nvl);
3083 		free(nvlpacked, M_NVLIST);
3084 		break;
3085 #undef ERROUT
3086 DIOCADDRULENV_error:
3087 		pf_krule_free(rule);
3088 		nvlist_destroy(nvl);
3089 		free(nvlpacked, M_NVLIST);
3090 
3091 		break;
3092 	}
3093 	case DIOCADDRULE: {
3094 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3095 		struct pf_krule		*rule;
3096 
3097 		rule = pf_krule_alloc();
3098 		error = pf_rule_to_krule(&pr->rule, rule);
3099 		if (error != 0) {
3100 			pf_krule_free(rule);
3101 			break;
3102 		}
3103 
3104 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3105 
3106 		/* Frees rule on error */
3107 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3108 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3109 		    td->td_proc ? td->td_proc->p_pid : 0);
3110 		break;
3111 	}
3112 
3113 	case DIOCGETRULES: {
3114 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3115 		struct pf_kruleset	*ruleset;
3116 		struct pf_krule		*tail;
3117 		int			 rs_num;
3118 
3119 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3120 
3121 		PF_RULES_WLOCK();
3122 		ruleset = pf_find_kruleset(pr->anchor);
3123 		if (ruleset == NULL) {
3124 			PF_RULES_WUNLOCK();
3125 			error = EINVAL;
3126 			break;
3127 		}
3128 		rs_num = pf_get_ruleset_number(pr->rule.action);
3129 		if (rs_num >= PF_RULESET_MAX) {
3130 			PF_RULES_WUNLOCK();
3131 			error = EINVAL;
3132 			break;
3133 		}
3134 		tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
3135 		    pf_krulequeue);
3136 		if (tail)
3137 			pr->nr = tail->nr + 1;
3138 		else
3139 			pr->nr = 0;
3140 		pr->ticket = ruleset->rules[rs_num].active.ticket;
3141 		PF_RULES_WUNLOCK();
3142 		break;
3143 	}
3144 
3145 	case DIOCGETRULENV: {
3146 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3147 		nvlist_t		*nvrule = NULL;
3148 		nvlist_t		*nvl = NULL;
3149 		struct pf_kruleset	*ruleset;
3150 		struct pf_krule		*rule;
3151 		void			*nvlpacked = NULL;
3152 		int			 rs_num, nr;
3153 		bool			 clear_counter = false;
3154 
3155 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3156 
3157 		if (nv->len > pf_ioctl_maxcount)
3158 			ERROUT(ENOMEM);
3159 
3160 		/* Copy the request in */
3161 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3162 		if (nvlpacked == NULL)
3163 			ERROUT(ENOMEM);
3164 
3165 		error = copyin(nv->data, nvlpacked, nv->len);
3166 		if (error)
3167 			ERROUT(error);
3168 
3169 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3170 		if (nvl == NULL)
3171 			ERROUT(EBADMSG);
3172 
3173 		if (! nvlist_exists_string(nvl, "anchor"))
3174 			ERROUT(EBADMSG);
3175 		if (! nvlist_exists_number(nvl, "ruleset"))
3176 			ERROUT(EBADMSG);
3177 		if (! nvlist_exists_number(nvl, "ticket"))
3178 			ERROUT(EBADMSG);
3179 		if (! nvlist_exists_number(nvl, "nr"))
3180 			ERROUT(EBADMSG);
3181 
3182 		if (nvlist_exists_bool(nvl, "clear_counter"))
3183 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3184 
3185 		if (clear_counter && !(flags & FWRITE))
3186 			ERROUT(EACCES);
3187 
3188 		nr = nvlist_get_number(nvl, "nr");
3189 
3190 		PF_RULES_WLOCK();
3191 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3192 		if (ruleset == NULL) {
3193 			PF_RULES_WUNLOCK();
3194 			ERROUT(ENOENT);
3195 		}
3196 
3197 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3198 		if (rs_num >= PF_RULESET_MAX) {
3199 			PF_RULES_WUNLOCK();
3200 			ERROUT(EINVAL);
3201 		}
3202 
3203 		if (nvlist_get_number(nvl, "ticket") !=
3204 		    ruleset->rules[rs_num].active.ticket) {
3205 			PF_RULES_WUNLOCK();
3206 			ERROUT(EBUSY);
3207 		}
3208 
3209 		if ((error = nvlist_error(nvl))) {
3210 			PF_RULES_WUNLOCK();
3211 			ERROUT(error);
3212 		}
3213 
3214 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3215 		while ((rule != NULL) && (rule->nr != nr))
3216 			rule = TAILQ_NEXT(rule, entries);
3217 		if (rule == NULL) {
3218 			PF_RULES_WUNLOCK();
3219 			ERROUT(EBUSY);
3220 		}
3221 
3222 		nvrule = pf_krule_to_nvrule(rule);
3223 
3224 		nvlist_destroy(nvl);
3225 		nvl = nvlist_create(0);
3226 		if (nvl == NULL) {
3227 			PF_RULES_WUNLOCK();
3228 			ERROUT(ENOMEM);
3229 		}
3230 		nvlist_add_number(nvl, "nr", nr);
3231 		nvlist_add_nvlist(nvl, "rule", nvrule);
3232 		nvlist_destroy(nvrule);
3233 		nvrule = NULL;
3234 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3235 			PF_RULES_WUNLOCK();
3236 			ERROUT(EBUSY);
3237 		}
3238 
3239 		free(nvlpacked, M_NVLIST);
3240 		nvlpacked = nvlist_pack(nvl, &nv->len);
3241 		if (nvlpacked == NULL) {
3242 			PF_RULES_WUNLOCK();
3243 			ERROUT(ENOMEM);
3244 		}
3245 
3246 		if (nv->size == 0) {
3247 			PF_RULES_WUNLOCK();
3248 			ERROUT(0);
3249 		}
3250 		else if (nv->size < nv->len) {
3251 			PF_RULES_WUNLOCK();
3252 			ERROUT(ENOSPC);
3253 		}
3254 
3255 		if (clear_counter) {
3256 			pf_counter_u64_zero(&rule->evaluations);
3257 			for (int i = 0; i < 2; i++) {
3258 				pf_counter_u64_zero(&rule->packets[i]);
3259 				pf_counter_u64_zero(&rule->bytes[i]);
3260 			}
3261 			counter_u64_zero(rule->states_tot);
3262 		}
3263 		PF_RULES_WUNLOCK();
3264 
3265 		error = copyout(nvlpacked, nv->data, nv->len);
3266 
3267 #undef ERROUT
3268 DIOCGETRULENV_error:
3269 		free(nvlpacked, M_NVLIST);
3270 		nvlist_destroy(nvrule);
3271 		nvlist_destroy(nvl);
3272 
3273 		break;
3274 	}
3275 
3276 	case DIOCCHANGERULE: {
3277 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3278 		struct pf_kruleset	*ruleset;
3279 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3280 		struct pfi_kkif		*kif = NULL;
3281 		struct pf_kpooladdr	*pa;
3282 		u_int32_t		 nr = 0;
3283 		int			 rs_num;
3284 
3285 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3286 
3287 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3288 		    pcr->action > PF_CHANGE_GET_TICKET) {
3289 			error = EINVAL;
3290 			break;
3291 		}
3292 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3293 			error = EINVAL;
3294 			break;
3295 		}
3296 
3297 		if (pcr->action != PF_CHANGE_REMOVE) {
3298 			newrule = pf_krule_alloc();
3299 			error = pf_rule_to_krule(&pcr->rule, newrule);
3300 			if (error != 0) {
3301 				pf_krule_free(newrule);
3302 				break;
3303 			}
3304 
3305 			if (newrule->ifname[0])
3306 				kif = pf_kkif_create(M_WAITOK);
3307 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3308 			for (int i = 0; i < 2; i++) {
3309 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3310 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3311 			}
3312 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3313 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3314 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3315 			newrule->cuid = td->td_ucred->cr_ruid;
3316 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3317 			TAILQ_INIT(&newrule->rpool.list);
3318 		}
3319 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3320 
3321 		PF_CONFIG_LOCK();
3322 		PF_RULES_WLOCK();
3323 #ifdef PF_WANT_32_TO_64_COUNTER
3324 		if (newrule != NULL) {
3325 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3326 			newrule->allrulelinked = true;
3327 			V_pf_allrulecount++;
3328 		}
3329 #endif
3330 
3331 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3332 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3333 		    pcr->pool_ticket != V_ticket_pabuf)
3334 			ERROUT(EBUSY);
3335 
3336 		ruleset = pf_find_kruleset(pcr->anchor);
3337 		if (ruleset == NULL)
3338 			ERROUT(EINVAL);
3339 
3340 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3341 		if (rs_num >= PF_RULESET_MAX)
3342 			ERROUT(EINVAL);
3343 
3344 		/*
3345 		 * XXXMJG: there is no guarantee that the ruleset was
3346 		 * created by the usual route of calling DIOCXBEGIN.
3347 		 * As a result it is possible the rule tree will not
3348 		 * be allocated yet. Hack around it by doing it here.
3349 		 * Note it is fine to let the tree persist in case of
3350 		 * error as it will be freed down the road on future
3351 		 * updates (if need be).
3352 		 */
3353 		if (ruleset->rules[rs_num].active.tree == NULL) {
3354 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3355 			if (ruleset->rules[rs_num].active.tree == NULL) {
3356 				ERROUT(ENOMEM);
3357 			}
3358 		}
3359 
3360 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3361 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3362 			ERROUT(0);
3363 		} else if (pcr->ticket !=
3364 			    ruleset->rules[rs_num].active.ticket)
3365 				ERROUT(EINVAL);
3366 
3367 		if (pcr->action != PF_CHANGE_REMOVE) {
3368 			if (newrule->ifname[0]) {
3369 				newrule->kif = pfi_kkif_attach(kif,
3370 				    newrule->ifname);
3371 				kif = NULL;
3372 				pfi_kkif_ref(newrule->kif);
3373 			} else
3374 				newrule->kif = NULL;
3375 
3376 			if (newrule->rtableid > 0 &&
3377 			    newrule->rtableid >= rt_numfibs)
3378 				error = EBUSY;
3379 
3380 #ifdef ALTQ
3381 			/* set queue IDs */
3382 			if (newrule->qname[0] != 0) {
3383 				if ((newrule->qid =
3384 				    pf_qname2qid(newrule->qname)) == 0)
3385 					error = EBUSY;
3386 				else if (newrule->pqname[0] != 0) {
3387 					if ((newrule->pqid =
3388 					    pf_qname2qid(newrule->pqname)) == 0)
3389 						error = EBUSY;
3390 				} else
3391 					newrule->pqid = newrule->qid;
3392 			}
3393 #endif /* ALTQ */
3394 			if (newrule->tagname[0])
3395 				if ((newrule->tag =
3396 				    pf_tagname2tag(newrule->tagname)) == 0)
3397 					error = EBUSY;
3398 			if (newrule->match_tagname[0])
3399 				if ((newrule->match_tag = pf_tagname2tag(
3400 				    newrule->match_tagname)) == 0)
3401 					error = EBUSY;
3402 			if (newrule->rt && !newrule->direction)
3403 				error = EINVAL;
3404 			if (!newrule->log)
3405 				newrule->logif = 0;
3406 			if (newrule->logif >= PFLOGIFS_MAX)
3407 				error = EINVAL;
3408 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3409 				error = ENOMEM;
3410 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3411 				error = ENOMEM;
3412 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3413 				error = EINVAL;
3414 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3415 				if (pa->addr.type == PF_ADDR_TABLE) {
3416 					pa->addr.p.tbl =
3417 					    pfr_attach_table(ruleset,
3418 					    pa->addr.v.tblname);
3419 					if (pa->addr.p.tbl == NULL)
3420 						error = ENOMEM;
3421 				}
3422 
3423 			newrule->overload_tbl = NULL;
3424 			if (newrule->overload_tblname[0]) {
3425 				if ((newrule->overload_tbl = pfr_attach_table(
3426 				    ruleset, newrule->overload_tblname)) ==
3427 				    NULL)
3428 					error = EINVAL;
3429 				else
3430 					newrule->overload_tbl->pfrkt_flags |=
3431 					    PFR_TFLAG_ACTIVE;
3432 			}
3433 
3434 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3435 			if (((((newrule->action == PF_NAT) ||
3436 			    (newrule->action == PF_RDR) ||
3437 			    (newrule->action == PF_BINAT) ||
3438 			    (newrule->rt > PF_NOPFROUTE)) &&
3439 			    !newrule->anchor)) &&
3440 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3441 				error = EINVAL;
3442 
3443 			if (error) {
3444 				pf_free_rule(newrule);
3445 				PF_RULES_WUNLOCK();
3446 				PF_CONFIG_UNLOCK();
3447 				break;
3448 			}
3449 
3450 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3451 		}
3452 		pf_empty_kpool(&V_pf_pabuf);
3453 
3454 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3455 			oldrule = TAILQ_FIRST(
3456 			    ruleset->rules[rs_num].active.ptr);
3457 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3458 			oldrule = TAILQ_LAST(
3459 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3460 		else {
3461 			oldrule = TAILQ_FIRST(
3462 			    ruleset->rules[rs_num].active.ptr);
3463 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3464 				oldrule = TAILQ_NEXT(oldrule, entries);
3465 			if (oldrule == NULL) {
3466 				if (newrule != NULL)
3467 					pf_free_rule(newrule);
3468 				PF_RULES_WUNLOCK();
3469 				PF_CONFIG_UNLOCK();
3470 				error = EINVAL;
3471 				break;
3472 			}
3473 		}
3474 
3475 		if (pcr->action == PF_CHANGE_REMOVE) {
3476 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3477 			    oldrule);
3478 			RB_REMOVE(pf_krule_global,
3479 			    ruleset->rules[rs_num].active.tree, oldrule);
3480 			ruleset->rules[rs_num].active.rcount--;
3481 		} else {
3482 			pf_hash_rule(newrule);
3483 			if (RB_INSERT(pf_krule_global,
3484 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3485 				pf_free_rule(newrule);
3486 				PF_RULES_WUNLOCK();
3487 				PF_CONFIG_UNLOCK();
3488 				error = EEXIST;
3489 				break;
3490 			}
3491 
3492 			if (oldrule == NULL)
3493 				TAILQ_INSERT_TAIL(
3494 				    ruleset->rules[rs_num].active.ptr,
3495 				    newrule, entries);
3496 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3497 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3498 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3499 			else
3500 				TAILQ_INSERT_AFTER(
3501 				    ruleset->rules[rs_num].active.ptr,
3502 				    oldrule, newrule, entries);
3503 			ruleset->rules[rs_num].active.rcount++;
3504 		}
3505 
3506 		nr = 0;
3507 		TAILQ_FOREACH(oldrule,
3508 		    ruleset->rules[rs_num].active.ptr, entries)
3509 			oldrule->nr = nr++;
3510 
3511 		ruleset->rules[rs_num].active.ticket++;
3512 
3513 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3514 		pf_remove_if_empty_kruleset(ruleset);
3515 
3516 		PF_RULES_WUNLOCK();
3517 		PF_CONFIG_UNLOCK();
3518 		break;
3519 
3520 #undef ERROUT
3521 DIOCCHANGERULE_error:
3522 		PF_RULES_WUNLOCK();
3523 		PF_CONFIG_UNLOCK();
3524 		pf_krule_free(newrule);
3525 		pf_kkif_free(kif);
3526 		break;
3527 	}
3528 
3529 	case DIOCCLRSTATESNV: {
3530 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3531 		break;
3532 	}
3533 
3534 	case DIOCKILLSTATESNV: {
3535 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3536 		break;
3537 	}
3538 
3539 	case DIOCADDSTATE: {
3540 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3541 		struct pfsync_state_1301	*sp = &ps->state;
3542 
3543 		if (sp->timeout >= PFTM_MAX) {
3544 			error = EINVAL;
3545 			break;
3546 		}
3547 		if (V_pfsync_state_import_ptr != NULL) {
3548 			PF_RULES_RLOCK();
3549 			error = V_pfsync_state_import_ptr(
3550 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3551 			    PFSYNC_MSG_VERSION_1301);
3552 			PF_RULES_RUNLOCK();
3553 		} else
3554 			error = EOPNOTSUPP;
3555 		break;
3556 	}
3557 
3558 	case DIOCGETSTATE: {
3559 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3560 		struct pf_kstate	*s;
3561 
3562 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3563 		if (s == NULL) {
3564 			error = ENOENT;
3565 			break;
3566 		}
3567 
3568 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3569 		    s, PFSYNC_MSG_VERSION_1301);
3570 		PF_STATE_UNLOCK(s);
3571 		break;
3572 	}
3573 
3574 	case DIOCGETSTATENV: {
3575 		error = pf_getstate((struct pfioc_nv *)addr);
3576 		break;
3577 	}
3578 
3579 #ifdef COMPAT_FREEBSD14
3580 	case DIOCGETSTATES: {
3581 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3582 		struct pf_kstate	*s;
3583 		struct pfsync_state_1301	*pstore, *p;
3584 		int			 i, nr;
3585 		size_t			 slice_count = 16, count;
3586 		void			*out;
3587 
3588 		if (ps->ps_len <= 0) {
3589 			nr = uma_zone_get_cur(V_pf_state_z);
3590 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3591 			break;
3592 		}
3593 
3594 		out = ps->ps_states;
3595 		pstore = mallocarray(slice_count,
3596 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3597 		nr = 0;
3598 
3599 		for (i = 0; i <= pf_hashmask; i++) {
3600 			struct pf_idhash *ih = &V_pf_idhash[i];
3601 
3602 DIOCGETSTATES_retry:
3603 			p = pstore;
3604 
3605 			if (LIST_EMPTY(&ih->states))
3606 				continue;
3607 
3608 			PF_HASHROW_LOCK(ih);
3609 			count = 0;
3610 			LIST_FOREACH(s, &ih->states, entry) {
3611 				if (s->timeout == PFTM_UNLINKED)
3612 					continue;
3613 				count++;
3614 			}
3615 
3616 			if (count > slice_count) {
3617 				PF_HASHROW_UNLOCK(ih);
3618 				free(pstore, M_TEMP);
3619 				slice_count = count * 2;
3620 				pstore = mallocarray(slice_count,
3621 				    sizeof(struct pfsync_state_1301), M_TEMP,
3622 				    M_WAITOK | M_ZERO);
3623 				goto DIOCGETSTATES_retry;
3624 			}
3625 
3626 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3627 				PF_HASHROW_UNLOCK(ih);
3628 				goto DIOCGETSTATES_full;
3629 			}
3630 
3631 			LIST_FOREACH(s, &ih->states, entry) {
3632 				if (s->timeout == PFTM_UNLINKED)
3633 					continue;
3634 
3635 				pfsync_state_export((union pfsync_state_union*)p,
3636 				    s, PFSYNC_MSG_VERSION_1301);
3637 				p++;
3638 				nr++;
3639 			}
3640 			PF_HASHROW_UNLOCK(ih);
3641 			error = copyout(pstore, out,
3642 			    sizeof(struct pfsync_state_1301) * count);
3643 			if (error)
3644 				break;
3645 			out = ps->ps_states + nr;
3646 		}
3647 DIOCGETSTATES_full:
3648 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3649 		free(pstore, M_TEMP);
3650 
3651 		break;
3652 	}
3653 
3654 	case DIOCGETSTATESV2: {
3655 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3656 		struct pf_kstate	*s;
3657 		struct pf_state_export	*pstore, *p;
3658 		int i, nr;
3659 		size_t slice_count = 16, count;
3660 		void *out;
3661 
3662 		if (ps->ps_req_version > PF_STATE_VERSION) {
3663 			error = ENOTSUP;
3664 			break;
3665 		}
3666 
3667 		if (ps->ps_len <= 0) {
3668 			nr = uma_zone_get_cur(V_pf_state_z);
3669 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3670 			break;
3671 		}
3672 
3673 		out = ps->ps_states;
3674 		pstore = mallocarray(slice_count,
3675 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3676 		nr = 0;
3677 
3678 		for (i = 0; i <= pf_hashmask; i++) {
3679 			struct pf_idhash *ih = &V_pf_idhash[i];
3680 
3681 DIOCGETSTATESV2_retry:
3682 			p = pstore;
3683 
3684 			if (LIST_EMPTY(&ih->states))
3685 				continue;
3686 
3687 			PF_HASHROW_LOCK(ih);
3688 			count = 0;
3689 			LIST_FOREACH(s, &ih->states, entry) {
3690 				if (s->timeout == PFTM_UNLINKED)
3691 					continue;
3692 				count++;
3693 			}
3694 
3695 			if (count > slice_count) {
3696 				PF_HASHROW_UNLOCK(ih);
3697 				free(pstore, M_TEMP);
3698 				slice_count = count * 2;
3699 				pstore = mallocarray(slice_count,
3700 				    sizeof(struct pf_state_export), M_TEMP,
3701 				    M_WAITOK | M_ZERO);
3702 				goto DIOCGETSTATESV2_retry;
3703 			}
3704 
3705 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3706 				PF_HASHROW_UNLOCK(ih);
3707 				goto DIOCGETSTATESV2_full;
3708 			}
3709 
3710 			LIST_FOREACH(s, &ih->states, entry) {
3711 				if (s->timeout == PFTM_UNLINKED)
3712 					continue;
3713 
3714 				pf_state_export(p, s);
3715 				p++;
3716 				nr++;
3717 			}
3718 			PF_HASHROW_UNLOCK(ih);
3719 			error = copyout(pstore, out,
3720 			    sizeof(struct pf_state_export) * count);
3721 			if (error)
3722 				break;
3723 			out = ps->ps_states + nr;
3724 		}
3725 DIOCGETSTATESV2_full:
3726 		ps->ps_len = nr * sizeof(struct pf_state_export);
3727 		free(pstore, M_TEMP);
3728 
3729 		break;
3730 	}
3731 #endif
3732 	case DIOCGETSTATUSNV: {
3733 		error = pf_getstatus((struct pfioc_nv *)addr);
3734 		break;
3735 	}
3736 
3737 	case DIOCSETSTATUSIF: {
3738 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3739 
3740 		if (pi->ifname[0] == 0) {
3741 			bzero(V_pf_status.ifname, IFNAMSIZ);
3742 			break;
3743 		}
3744 		PF_RULES_WLOCK();
3745 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3746 		PF_RULES_WUNLOCK();
3747 		break;
3748 	}
3749 
3750 	case DIOCCLRSTATUS: {
3751 		PF_RULES_WLOCK();
3752 		for (int i = 0; i < PFRES_MAX; i++)
3753 			counter_u64_zero(V_pf_status.counters[i]);
3754 		for (int i = 0; i < FCNT_MAX; i++)
3755 			pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3756 		for (int i = 0; i < SCNT_MAX; i++)
3757 			counter_u64_zero(V_pf_status.scounters[i]);
3758 		for (int i = 0; i < KLCNT_MAX; i++)
3759 			counter_u64_zero(V_pf_status.lcounters[i]);
3760 		V_pf_status.since = time_second;
3761 		if (*V_pf_status.ifname)
3762 			pfi_update_status(V_pf_status.ifname, NULL);
3763 		PF_RULES_WUNLOCK();
3764 		break;
3765 	}
3766 
3767 	case DIOCNATLOOK: {
3768 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3769 		struct pf_state_key	*sk;
3770 		struct pf_kstate	*state;
3771 		struct pf_state_key_cmp	 key;
3772 		int			 m = 0, direction = pnl->direction;
3773 		int			 sidx, didx;
3774 
3775 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3776 		sidx = (direction == PF_IN) ? 1 : 0;
3777 		didx = (direction == PF_IN) ? 0 : 1;
3778 
3779 		if (!pnl->proto ||
3780 		    PF_AZERO(&pnl->saddr, pnl->af) ||
3781 		    PF_AZERO(&pnl->daddr, pnl->af) ||
3782 		    ((pnl->proto == IPPROTO_TCP ||
3783 		    pnl->proto == IPPROTO_UDP) &&
3784 		    (!pnl->dport || !pnl->sport)))
3785 			error = EINVAL;
3786 		else {
3787 			bzero(&key, sizeof(key));
3788 			key.af = pnl->af;
3789 			key.proto = pnl->proto;
3790 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
3791 			key.port[sidx] = pnl->sport;
3792 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
3793 			key.port[didx] = pnl->dport;
3794 
3795 			state = pf_find_state_all(&key, direction, &m);
3796 			if (state == NULL) {
3797 				error = ENOENT;
3798 			} else {
3799 				if (m > 1) {
3800 					PF_STATE_UNLOCK(state);
3801 					error = E2BIG;	/* more than one state */
3802 				} else {
3803 					sk = state->key[sidx];
3804 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
3805 					pnl->rsport = sk->port[sidx];
3806 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
3807 					pnl->rdport = sk->port[didx];
3808 					PF_STATE_UNLOCK(state);
3809 				}
3810 			}
3811 		}
3812 		break;
3813 	}
3814 
3815 	case DIOCSETTIMEOUT: {
3816 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3817 		int		 old;
3818 
3819 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
3820 		    pt->seconds < 0) {
3821 			error = EINVAL;
3822 			break;
3823 		}
3824 		PF_RULES_WLOCK();
3825 		old = V_pf_default_rule.timeout[pt->timeout];
3826 		if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0)
3827 			pt->seconds = 1;
3828 		V_pf_default_rule.timeout[pt->timeout] = pt->seconds;
3829 		if (pt->timeout == PFTM_INTERVAL && pt->seconds < old)
3830 			wakeup(pf_purge_thread);
3831 		pt->seconds = old;
3832 		PF_RULES_WUNLOCK();
3833 		break;
3834 	}
3835 
3836 	case DIOCGETTIMEOUT: {
3837 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
3838 
3839 		if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
3840 			error = EINVAL;
3841 			break;
3842 		}
3843 		PF_RULES_RLOCK();
3844 		pt->seconds = V_pf_default_rule.timeout[pt->timeout];
3845 		PF_RULES_RUNLOCK();
3846 		break;
3847 	}
3848 
3849 	case DIOCGETLIMIT: {
3850 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3851 
3852 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
3853 			error = EINVAL;
3854 			break;
3855 		}
3856 		PF_RULES_RLOCK();
3857 		pl->limit = V_pf_limits[pl->index].limit;
3858 		PF_RULES_RUNLOCK();
3859 		break;
3860 	}
3861 
3862 	case DIOCSETLIMIT: {
3863 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
3864 		int			 old_limit;
3865 
3866 		PF_RULES_WLOCK();
3867 		if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
3868 		    V_pf_limits[pl->index].zone == NULL) {
3869 			PF_RULES_WUNLOCK();
3870 			error = EINVAL;
3871 			break;
3872 		}
3873 		uma_zone_set_max(V_pf_limits[pl->index].zone, pl->limit);
3874 		old_limit = V_pf_limits[pl->index].limit;
3875 		V_pf_limits[pl->index].limit = pl->limit;
3876 		pl->limit = old_limit;
3877 		PF_RULES_WUNLOCK();
3878 		break;
3879 	}
3880 
3881 	case DIOCSETDEBUG: {
3882 		u_int32_t	*level = (u_int32_t *)addr;
3883 
3884 		PF_RULES_WLOCK();
3885 		V_pf_status.debug = *level;
3886 		PF_RULES_WUNLOCK();
3887 		break;
3888 	}
3889 
3890 	case DIOCCLRRULECTRS: {
3891 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
3892 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
3893 		struct pf_krule		*rule;
3894 
3895 		PF_RULES_WLOCK();
3896 		TAILQ_FOREACH(rule,
3897 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
3898 			pf_counter_u64_zero(&rule->evaluations);
3899 			for (int i = 0; i < 2; i++) {
3900 				pf_counter_u64_zero(&rule->packets[i]);
3901 				pf_counter_u64_zero(&rule->bytes[i]);
3902 			}
3903 		}
3904 		PF_RULES_WUNLOCK();
3905 		break;
3906 	}
3907 
3908 	case DIOCGIFSPEEDV0:
3909 	case DIOCGIFSPEEDV1: {
3910 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
3911 		struct pf_ifspeed_v1	ps;
3912 		struct ifnet		*ifp;
3913 
3914 		if (psp->ifname[0] == '\0') {
3915 			error = EINVAL;
3916 			break;
3917 		}
3918 
3919 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
3920 		if (error != 0)
3921 			break;
3922 		ifp = ifunit(ps.ifname);
3923 		if (ifp != NULL) {
3924 			psp->baudrate32 =
3925 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
3926 			if (cmd == DIOCGIFSPEEDV1)
3927 				psp->baudrate = ifp->if_baudrate;
3928 		} else {
3929 			error = EINVAL;
3930 		}
3931 		break;
3932 	}
3933 
3934 #ifdef ALTQ
3935 	case DIOCSTARTALTQ: {
3936 		struct pf_altq		*altq;
3937 
3938 		PF_RULES_WLOCK();
3939 		/* enable all altq interfaces on active list */
3940 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3941 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3942 				error = pf_enable_altq(altq);
3943 				if (error != 0)
3944 					break;
3945 			}
3946 		}
3947 		if (error == 0)
3948 			V_pf_altq_running = 1;
3949 		PF_RULES_WUNLOCK();
3950 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
3951 		break;
3952 	}
3953 
3954 	case DIOCSTOPALTQ: {
3955 		struct pf_altq		*altq;
3956 
3957 		PF_RULES_WLOCK();
3958 		/* disable all altq interfaces on active list */
3959 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
3960 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
3961 				error = pf_disable_altq(altq);
3962 				if (error != 0)
3963 					break;
3964 			}
3965 		}
3966 		if (error == 0)
3967 			V_pf_altq_running = 0;
3968 		PF_RULES_WUNLOCK();
3969 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
3970 		break;
3971 	}
3972 
3973 	case DIOCADDALTQV0:
3974 	case DIOCADDALTQV1: {
3975 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
3976 		struct pf_altq		*altq, *a;
3977 		struct ifnet		*ifp;
3978 
3979 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
3980 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
3981 		if (error)
3982 			break;
3983 		altq->local_flags = 0;
3984 
3985 		PF_RULES_WLOCK();
3986 		if (pa->ticket != V_ticket_altqs_inactive) {
3987 			PF_RULES_WUNLOCK();
3988 			free(altq, M_PFALTQ);
3989 			error = EBUSY;
3990 			break;
3991 		}
3992 
3993 		/*
3994 		 * if this is for a queue, find the discipline and
3995 		 * copy the necessary fields
3996 		 */
3997 		if (altq->qname[0] != 0) {
3998 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
3999 				PF_RULES_WUNLOCK();
4000 				error = EBUSY;
4001 				free(altq, M_PFALTQ);
4002 				break;
4003 			}
4004 			altq->altq_disc = NULL;
4005 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4006 				if (strncmp(a->ifname, altq->ifname,
4007 				    IFNAMSIZ) == 0) {
4008 					altq->altq_disc = a->altq_disc;
4009 					break;
4010 				}
4011 			}
4012 		}
4013 
4014 		if ((ifp = ifunit(altq->ifname)) == NULL)
4015 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4016 		else
4017 			error = altq_add(ifp, altq);
4018 
4019 		if (error) {
4020 			PF_RULES_WUNLOCK();
4021 			free(altq, M_PFALTQ);
4022 			break;
4023 		}
4024 
4025 		if (altq->qname[0] != 0)
4026 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4027 		else
4028 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4029 		/* version error check done on import above */
4030 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4031 		PF_RULES_WUNLOCK();
4032 		break;
4033 	}
4034 
4035 	case DIOCGETALTQSV0:
4036 	case DIOCGETALTQSV1: {
4037 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4038 		struct pf_altq		*altq;
4039 
4040 		PF_RULES_RLOCK();
4041 		pa->nr = 0;
4042 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4043 			pa->nr++;
4044 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4045 			pa->nr++;
4046 		pa->ticket = V_ticket_altqs_active;
4047 		PF_RULES_RUNLOCK();
4048 		break;
4049 	}
4050 
4051 	case DIOCGETALTQV0:
4052 	case DIOCGETALTQV1: {
4053 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4054 		struct pf_altq		*altq;
4055 
4056 		PF_RULES_RLOCK();
4057 		if (pa->ticket != V_ticket_altqs_active) {
4058 			PF_RULES_RUNLOCK();
4059 			error = EBUSY;
4060 			break;
4061 		}
4062 		altq = pf_altq_get_nth_active(pa->nr);
4063 		if (altq == NULL) {
4064 			PF_RULES_RUNLOCK();
4065 			error = EBUSY;
4066 			break;
4067 		}
4068 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4069 		PF_RULES_RUNLOCK();
4070 		break;
4071 	}
4072 
4073 	case DIOCCHANGEALTQV0:
4074 	case DIOCCHANGEALTQV1:
4075 		/* CHANGEALTQ not supported yet! */
4076 		error = ENODEV;
4077 		break;
4078 
4079 	case DIOCGETQSTATSV0:
4080 	case DIOCGETQSTATSV1: {
4081 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4082 		struct pf_altq		*altq;
4083 		int			 nbytes;
4084 		u_int32_t		 version;
4085 
4086 		PF_RULES_RLOCK();
4087 		if (pq->ticket != V_ticket_altqs_active) {
4088 			PF_RULES_RUNLOCK();
4089 			error = EBUSY;
4090 			break;
4091 		}
4092 		nbytes = pq->nbytes;
4093 		altq = pf_altq_get_nth_active(pq->nr);
4094 		if (altq == NULL) {
4095 			PF_RULES_RUNLOCK();
4096 			error = EBUSY;
4097 			break;
4098 		}
4099 
4100 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4101 			PF_RULES_RUNLOCK();
4102 			error = ENXIO;
4103 			break;
4104 		}
4105 		PF_RULES_RUNLOCK();
4106 		if (cmd == DIOCGETQSTATSV0)
4107 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4108 		else
4109 			version = pq->version;
4110 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4111 		if (error == 0) {
4112 			pq->scheduler = altq->scheduler;
4113 			pq->nbytes = nbytes;
4114 		}
4115 		break;
4116 	}
4117 #endif /* ALTQ */
4118 
4119 	case DIOCBEGINADDRS: {
4120 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4121 
4122 		PF_RULES_WLOCK();
4123 		pf_empty_kpool(&V_pf_pabuf);
4124 		pp->ticket = ++V_ticket_pabuf;
4125 		PF_RULES_WUNLOCK();
4126 		break;
4127 	}
4128 
4129 	case DIOCADDADDR: {
4130 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4131 		struct pf_kpooladdr	*pa;
4132 		struct pfi_kkif		*kif = NULL;
4133 
4134 #ifndef INET
4135 		if (pp->af == AF_INET) {
4136 			error = EAFNOSUPPORT;
4137 			break;
4138 		}
4139 #endif /* INET */
4140 #ifndef INET6
4141 		if (pp->af == AF_INET6) {
4142 			error = EAFNOSUPPORT;
4143 			break;
4144 		}
4145 #endif /* INET6 */
4146 		if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
4147 		    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
4148 		    pp->addr.addr.type != PF_ADDR_TABLE) {
4149 			error = EINVAL;
4150 			break;
4151 		}
4152 		if (pp->addr.addr.p.dyn != NULL) {
4153 			error = EINVAL;
4154 			break;
4155 		}
4156 		pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
4157 		error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
4158 		if (error != 0)
4159 			break;
4160 		if (pa->ifname[0])
4161 			kif = pf_kkif_create(M_WAITOK);
4162 		PF_RULES_WLOCK();
4163 		if (pp->ticket != V_ticket_pabuf) {
4164 			PF_RULES_WUNLOCK();
4165 			if (pa->ifname[0])
4166 				pf_kkif_free(kif);
4167 			free(pa, M_PFRULE);
4168 			error = EBUSY;
4169 			break;
4170 		}
4171 		if (pa->ifname[0]) {
4172 			pa->kif = pfi_kkif_attach(kif, pa->ifname);
4173 			kif = NULL;
4174 			pfi_kkif_ref(pa->kif);
4175 		} else
4176 			pa->kif = NULL;
4177 		if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
4178 		    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
4179 			if (pa->ifname[0])
4180 				pfi_kkif_unref(pa->kif);
4181 			PF_RULES_WUNLOCK();
4182 			free(pa, M_PFRULE);
4183 			break;
4184 		}
4185 		TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
4186 		PF_RULES_WUNLOCK();
4187 		break;
4188 	}
4189 
4190 	case DIOCGETADDRS: {
4191 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4192 		struct pf_kpool		*pool;
4193 		struct pf_kpooladdr	*pa;
4194 
4195 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4196 		pp->nr = 0;
4197 
4198 		PF_RULES_RLOCK();
4199 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4200 		    pp->r_num, 0, 1, 0);
4201 		if (pool == NULL) {
4202 			PF_RULES_RUNLOCK();
4203 			error = EBUSY;
4204 			break;
4205 		}
4206 		TAILQ_FOREACH(pa, &pool->list, entries)
4207 			pp->nr++;
4208 		PF_RULES_RUNLOCK();
4209 		break;
4210 	}
4211 
4212 	case DIOCGETADDR: {
4213 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4214 		struct pf_kpool		*pool;
4215 		struct pf_kpooladdr	*pa;
4216 		u_int32_t		 nr = 0;
4217 
4218 		pp->anchor[sizeof(pp->anchor) - 1] = 0;
4219 
4220 		PF_RULES_RLOCK();
4221 		pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
4222 		    pp->r_num, 0, 1, 1);
4223 		if (pool == NULL) {
4224 			PF_RULES_RUNLOCK();
4225 			error = EBUSY;
4226 			break;
4227 		}
4228 		pa = TAILQ_FIRST(&pool->list);
4229 		while ((pa != NULL) && (nr < pp->nr)) {
4230 			pa = TAILQ_NEXT(pa, entries);
4231 			nr++;
4232 		}
4233 		if (pa == NULL) {
4234 			PF_RULES_RUNLOCK();
4235 			error = EBUSY;
4236 			break;
4237 		}
4238 		pf_kpooladdr_to_pooladdr(pa, &pp->addr);
4239 		pf_addr_copyout(&pp->addr.addr);
4240 		PF_RULES_RUNLOCK();
4241 		break;
4242 	}
4243 
4244 	case DIOCCHANGEADDR: {
4245 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4246 		struct pf_kpool		*pool;
4247 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4248 		struct pf_kruleset	*ruleset;
4249 		struct pfi_kkif		*kif = NULL;
4250 
4251 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4252 
4253 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4254 		    pca->action > PF_CHANGE_REMOVE) {
4255 			error = EINVAL;
4256 			break;
4257 		}
4258 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4259 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4260 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4261 			error = EINVAL;
4262 			break;
4263 		}
4264 		if (pca->addr.addr.p.dyn != NULL) {
4265 			error = EINVAL;
4266 			break;
4267 		}
4268 
4269 		if (pca->action != PF_CHANGE_REMOVE) {
4270 #ifndef INET
4271 			if (pca->af == AF_INET) {
4272 				error = EAFNOSUPPORT;
4273 				break;
4274 			}
4275 #endif /* INET */
4276 #ifndef INET6
4277 			if (pca->af == AF_INET6) {
4278 				error = EAFNOSUPPORT;
4279 				break;
4280 			}
4281 #endif /* INET6 */
4282 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4283 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4284 			if (newpa->ifname[0])
4285 				kif = pf_kkif_create(M_WAITOK);
4286 			newpa->kif = NULL;
4287 		}
4288 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4289 		PF_RULES_WLOCK();
4290 		ruleset = pf_find_kruleset(pca->anchor);
4291 		if (ruleset == NULL)
4292 			ERROUT(EBUSY);
4293 
4294 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4295 		    pca->r_num, pca->r_last, 1, 1);
4296 		if (pool == NULL)
4297 			ERROUT(EBUSY);
4298 
4299 		if (pca->action != PF_CHANGE_REMOVE) {
4300 			if (newpa->ifname[0]) {
4301 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4302 				pfi_kkif_ref(newpa->kif);
4303 				kif = NULL;
4304 			}
4305 
4306 			switch (newpa->addr.type) {
4307 			case PF_ADDR_DYNIFTL:
4308 				error = pfi_dynaddr_setup(&newpa->addr,
4309 				    pca->af);
4310 				break;
4311 			case PF_ADDR_TABLE:
4312 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4313 				    newpa->addr.v.tblname);
4314 				if (newpa->addr.p.tbl == NULL)
4315 					error = ENOMEM;
4316 				break;
4317 			}
4318 			if (error)
4319 				goto DIOCCHANGEADDR_error;
4320 		}
4321 
4322 		switch (pca->action) {
4323 		case PF_CHANGE_ADD_HEAD:
4324 			oldpa = TAILQ_FIRST(&pool->list);
4325 			break;
4326 		case PF_CHANGE_ADD_TAIL:
4327 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4328 			break;
4329 		default:
4330 			oldpa = TAILQ_FIRST(&pool->list);
4331 			for (int i = 0; oldpa && i < pca->nr; i++)
4332 				oldpa = TAILQ_NEXT(oldpa, entries);
4333 
4334 			if (oldpa == NULL)
4335 				ERROUT(EINVAL);
4336 		}
4337 
4338 		if (pca->action == PF_CHANGE_REMOVE) {
4339 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4340 			switch (oldpa->addr.type) {
4341 			case PF_ADDR_DYNIFTL:
4342 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4343 				break;
4344 			case PF_ADDR_TABLE:
4345 				pfr_detach_table(oldpa->addr.p.tbl);
4346 				break;
4347 			}
4348 			if (oldpa->kif)
4349 				pfi_kkif_unref(oldpa->kif);
4350 			free(oldpa, M_PFRULE);
4351 		} else {
4352 			if (oldpa == NULL)
4353 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4354 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4355 			    pca->action == PF_CHANGE_ADD_BEFORE)
4356 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4357 			else
4358 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4359 				    newpa, entries);
4360 		}
4361 
4362 		pool->cur = TAILQ_FIRST(&pool->list);
4363 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4364 		PF_RULES_WUNLOCK();
4365 		break;
4366 
4367 #undef ERROUT
4368 DIOCCHANGEADDR_error:
4369 		if (newpa != NULL) {
4370 			if (newpa->kif)
4371 				pfi_kkif_unref(newpa->kif);
4372 			free(newpa, M_PFRULE);
4373 		}
4374 		PF_RULES_WUNLOCK();
4375 		pf_kkif_free(kif);
4376 		break;
4377 	}
4378 
4379 	case DIOCGETRULESETS: {
4380 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4381 		struct pf_kruleset	*ruleset;
4382 		struct pf_kanchor	*anchor;
4383 
4384 		pr->path[sizeof(pr->path) - 1] = 0;
4385 
4386 		PF_RULES_RLOCK();
4387 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4388 			PF_RULES_RUNLOCK();
4389 			error = ENOENT;
4390 			break;
4391 		}
4392 		pr->nr = 0;
4393 		if (ruleset->anchor == NULL) {
4394 			/* XXX kludge for pf_main_ruleset */
4395 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4396 				if (anchor->parent == NULL)
4397 					pr->nr++;
4398 		} else {
4399 			RB_FOREACH(anchor, pf_kanchor_node,
4400 			    &ruleset->anchor->children)
4401 				pr->nr++;
4402 		}
4403 		PF_RULES_RUNLOCK();
4404 		break;
4405 	}
4406 
4407 	case DIOCGETRULESET: {
4408 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4409 		struct pf_kruleset	*ruleset;
4410 		struct pf_kanchor	*anchor;
4411 		u_int32_t		 nr = 0;
4412 
4413 		pr->path[sizeof(pr->path) - 1] = 0;
4414 
4415 		PF_RULES_RLOCK();
4416 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4417 			PF_RULES_RUNLOCK();
4418 			error = ENOENT;
4419 			break;
4420 		}
4421 		pr->name[0] = 0;
4422 		if (ruleset->anchor == NULL) {
4423 			/* XXX kludge for pf_main_ruleset */
4424 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4425 				if (anchor->parent == NULL && nr++ == pr->nr) {
4426 					strlcpy(pr->name, anchor->name,
4427 					    sizeof(pr->name));
4428 					break;
4429 				}
4430 		} else {
4431 			RB_FOREACH(anchor, pf_kanchor_node,
4432 			    &ruleset->anchor->children)
4433 				if (nr++ == pr->nr) {
4434 					strlcpy(pr->name, anchor->name,
4435 					    sizeof(pr->name));
4436 					break;
4437 				}
4438 		}
4439 		if (!pr->name[0])
4440 			error = EBUSY;
4441 		PF_RULES_RUNLOCK();
4442 		break;
4443 	}
4444 
4445 	case DIOCRCLRTABLES: {
4446 		struct pfioc_table *io = (struct pfioc_table *)addr;
4447 
4448 		if (io->pfrio_esize != 0) {
4449 			error = ENODEV;
4450 			break;
4451 		}
4452 		PF_RULES_WLOCK();
4453 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4454 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4455 		PF_RULES_WUNLOCK();
4456 		break;
4457 	}
4458 
4459 	case DIOCRADDTABLES: {
4460 		struct pfioc_table *io = (struct pfioc_table *)addr;
4461 		struct pfr_table *pfrts;
4462 		size_t totlen;
4463 
4464 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4465 			error = ENODEV;
4466 			break;
4467 		}
4468 
4469 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4470 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4471 			error = ENOMEM;
4472 			break;
4473 		}
4474 
4475 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4476 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4477 		    M_TEMP, M_WAITOK);
4478 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4479 		if (error) {
4480 			free(pfrts, M_TEMP);
4481 			break;
4482 		}
4483 		PF_RULES_WLOCK();
4484 		error = pfr_add_tables(pfrts, io->pfrio_size,
4485 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4486 		PF_RULES_WUNLOCK();
4487 		free(pfrts, M_TEMP);
4488 		break;
4489 	}
4490 
4491 	case DIOCRDELTABLES: {
4492 		struct pfioc_table *io = (struct pfioc_table *)addr;
4493 		struct pfr_table *pfrts;
4494 		size_t totlen;
4495 
4496 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4497 			error = ENODEV;
4498 			break;
4499 		}
4500 
4501 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4502 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4503 			error = ENOMEM;
4504 			break;
4505 		}
4506 
4507 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4508 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4509 		    M_TEMP, M_WAITOK);
4510 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4511 		if (error) {
4512 			free(pfrts, M_TEMP);
4513 			break;
4514 		}
4515 		PF_RULES_WLOCK();
4516 		error = pfr_del_tables(pfrts, io->pfrio_size,
4517 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4518 		PF_RULES_WUNLOCK();
4519 		free(pfrts, M_TEMP);
4520 		break;
4521 	}
4522 
4523 	case DIOCRGETTABLES: {
4524 		struct pfioc_table *io = (struct pfioc_table *)addr;
4525 		struct pfr_table *pfrts;
4526 		size_t totlen;
4527 		int n;
4528 
4529 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4530 			error = ENODEV;
4531 			break;
4532 		}
4533 		PF_RULES_RLOCK();
4534 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4535 		if (n < 0) {
4536 			PF_RULES_RUNLOCK();
4537 			error = EINVAL;
4538 			break;
4539 		}
4540 		io->pfrio_size = min(io->pfrio_size, n);
4541 
4542 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4543 
4544 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4545 		    M_TEMP, M_NOWAIT | M_ZERO);
4546 		if (pfrts == NULL) {
4547 			error = ENOMEM;
4548 			PF_RULES_RUNLOCK();
4549 			break;
4550 		}
4551 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4552 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4553 		PF_RULES_RUNLOCK();
4554 		if (error == 0)
4555 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4556 		free(pfrts, M_TEMP);
4557 		break;
4558 	}
4559 
4560 	case DIOCRGETTSTATS: {
4561 		struct pfioc_table *io = (struct pfioc_table *)addr;
4562 		struct pfr_tstats *pfrtstats;
4563 		size_t totlen;
4564 		int n;
4565 
4566 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4567 			error = ENODEV;
4568 			break;
4569 		}
4570 		PF_TABLE_STATS_LOCK();
4571 		PF_RULES_RLOCK();
4572 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4573 		if (n < 0) {
4574 			PF_RULES_RUNLOCK();
4575 			PF_TABLE_STATS_UNLOCK();
4576 			error = EINVAL;
4577 			break;
4578 		}
4579 		io->pfrio_size = min(io->pfrio_size, n);
4580 
4581 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4582 		pfrtstats = mallocarray(io->pfrio_size,
4583 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4584 		if (pfrtstats == NULL) {
4585 			error = ENOMEM;
4586 			PF_RULES_RUNLOCK();
4587 			PF_TABLE_STATS_UNLOCK();
4588 			break;
4589 		}
4590 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4591 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4592 		PF_RULES_RUNLOCK();
4593 		PF_TABLE_STATS_UNLOCK();
4594 		if (error == 0)
4595 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4596 		free(pfrtstats, M_TEMP);
4597 		break;
4598 	}
4599 
4600 	case DIOCRCLRTSTATS: {
4601 		struct pfioc_table *io = (struct pfioc_table *)addr;
4602 		struct pfr_table *pfrts;
4603 		size_t totlen;
4604 
4605 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4606 			error = ENODEV;
4607 			break;
4608 		}
4609 
4610 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4611 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4612 			/* We used to count tables and use the minimum required
4613 			 * size, so we didn't fail on overly large requests.
4614 			 * Keep doing so. */
4615 			io->pfrio_size = pf_ioctl_maxcount;
4616 			break;
4617 		}
4618 
4619 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4620 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4621 		    M_TEMP, M_WAITOK);
4622 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4623 		if (error) {
4624 			free(pfrts, M_TEMP);
4625 			break;
4626 		}
4627 
4628 		PF_TABLE_STATS_LOCK();
4629 		PF_RULES_RLOCK();
4630 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4631 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4632 		PF_RULES_RUNLOCK();
4633 		PF_TABLE_STATS_UNLOCK();
4634 		free(pfrts, M_TEMP);
4635 		break;
4636 	}
4637 
4638 	case DIOCRSETTFLAGS: {
4639 		struct pfioc_table *io = (struct pfioc_table *)addr;
4640 		struct pfr_table *pfrts;
4641 		size_t totlen;
4642 		int n;
4643 
4644 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4645 			error = ENODEV;
4646 			break;
4647 		}
4648 
4649 		PF_RULES_RLOCK();
4650 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4651 		if (n < 0) {
4652 			PF_RULES_RUNLOCK();
4653 			error = EINVAL;
4654 			break;
4655 		}
4656 
4657 		io->pfrio_size = min(io->pfrio_size, n);
4658 		PF_RULES_RUNLOCK();
4659 
4660 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4661 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4662 		    M_TEMP, M_WAITOK);
4663 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4664 		if (error) {
4665 			free(pfrts, M_TEMP);
4666 			break;
4667 		}
4668 		PF_RULES_WLOCK();
4669 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4670 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4671 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4672 		PF_RULES_WUNLOCK();
4673 		free(pfrts, M_TEMP);
4674 		break;
4675 	}
4676 
4677 	case DIOCRCLRADDRS: {
4678 		struct pfioc_table *io = (struct pfioc_table *)addr;
4679 
4680 		if (io->pfrio_esize != 0) {
4681 			error = ENODEV;
4682 			break;
4683 		}
4684 		PF_RULES_WLOCK();
4685 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4686 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4687 		PF_RULES_WUNLOCK();
4688 		break;
4689 	}
4690 
4691 	case DIOCRADDADDRS: {
4692 		struct pfioc_table *io = (struct pfioc_table *)addr;
4693 		struct pfr_addr *pfras;
4694 		size_t totlen;
4695 
4696 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4697 			error = ENODEV;
4698 			break;
4699 		}
4700 		if (io->pfrio_size < 0 ||
4701 		    io->pfrio_size > pf_ioctl_maxcount ||
4702 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4703 			error = EINVAL;
4704 			break;
4705 		}
4706 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4707 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4708 		    M_TEMP, M_WAITOK);
4709 		error = copyin(io->pfrio_buffer, pfras, totlen);
4710 		if (error) {
4711 			free(pfras, M_TEMP);
4712 			break;
4713 		}
4714 		PF_RULES_WLOCK();
4715 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4716 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4717 		    PFR_FLAG_USERIOCTL);
4718 		PF_RULES_WUNLOCK();
4719 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4720 			error = copyout(pfras, io->pfrio_buffer, totlen);
4721 		free(pfras, M_TEMP);
4722 		break;
4723 	}
4724 
4725 	case DIOCRDELADDRS: {
4726 		struct pfioc_table *io = (struct pfioc_table *)addr;
4727 		struct pfr_addr *pfras;
4728 		size_t totlen;
4729 
4730 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4731 			error = ENODEV;
4732 			break;
4733 		}
4734 		if (io->pfrio_size < 0 ||
4735 		    io->pfrio_size > pf_ioctl_maxcount ||
4736 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4737 			error = EINVAL;
4738 			break;
4739 		}
4740 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4741 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4742 		    M_TEMP, M_WAITOK);
4743 		error = copyin(io->pfrio_buffer, pfras, totlen);
4744 		if (error) {
4745 			free(pfras, M_TEMP);
4746 			break;
4747 		}
4748 		PF_RULES_WLOCK();
4749 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4750 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4751 		    PFR_FLAG_USERIOCTL);
4752 		PF_RULES_WUNLOCK();
4753 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4754 			error = copyout(pfras, io->pfrio_buffer, totlen);
4755 		free(pfras, M_TEMP);
4756 		break;
4757 	}
4758 
4759 	case DIOCRSETADDRS: {
4760 		struct pfioc_table *io = (struct pfioc_table *)addr;
4761 		struct pfr_addr *pfras;
4762 		size_t totlen, count;
4763 
4764 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4765 			error = ENODEV;
4766 			break;
4767 		}
4768 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4769 			error = EINVAL;
4770 			break;
4771 		}
4772 		count = max(io->pfrio_size, io->pfrio_size2);
4773 		if (count > pf_ioctl_maxcount ||
4774 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4775 			error = EINVAL;
4776 			break;
4777 		}
4778 		totlen = count * sizeof(struct pfr_addr);
4779 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4780 		    M_WAITOK);
4781 		error = copyin(io->pfrio_buffer, pfras, totlen);
4782 		if (error) {
4783 			free(pfras, M_TEMP);
4784 			break;
4785 		}
4786 		PF_RULES_WLOCK();
4787 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4788 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4789 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4790 		    PFR_FLAG_USERIOCTL, 0);
4791 		PF_RULES_WUNLOCK();
4792 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4793 			error = copyout(pfras, io->pfrio_buffer, totlen);
4794 		free(pfras, M_TEMP);
4795 		break;
4796 	}
4797 
4798 	case DIOCRGETADDRS: {
4799 		struct pfioc_table *io = (struct pfioc_table *)addr;
4800 		struct pfr_addr *pfras;
4801 		size_t totlen;
4802 
4803 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4804 			error = ENODEV;
4805 			break;
4806 		}
4807 		if (io->pfrio_size < 0 ||
4808 		    io->pfrio_size > pf_ioctl_maxcount ||
4809 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4810 			error = EINVAL;
4811 			break;
4812 		}
4813 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4814 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4815 		    M_TEMP, M_WAITOK | M_ZERO);
4816 		PF_RULES_RLOCK();
4817 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4818 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4819 		PF_RULES_RUNLOCK();
4820 		if (error == 0)
4821 			error = copyout(pfras, io->pfrio_buffer, totlen);
4822 		free(pfras, M_TEMP);
4823 		break;
4824 	}
4825 
4826 	case DIOCRGETASTATS: {
4827 		struct pfioc_table *io = (struct pfioc_table *)addr;
4828 		struct pfr_astats *pfrastats;
4829 		size_t totlen;
4830 
4831 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4832 			error = ENODEV;
4833 			break;
4834 		}
4835 		if (io->pfrio_size < 0 ||
4836 		    io->pfrio_size > pf_ioctl_maxcount ||
4837 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4838 			error = EINVAL;
4839 			break;
4840 		}
4841 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4842 		pfrastats = mallocarray(io->pfrio_size,
4843 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4844 		PF_RULES_RLOCK();
4845 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4846 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4847 		PF_RULES_RUNLOCK();
4848 		if (error == 0)
4849 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4850 		free(pfrastats, M_TEMP);
4851 		break;
4852 	}
4853 
4854 	case DIOCRCLRASTATS: {
4855 		struct pfioc_table *io = (struct pfioc_table *)addr;
4856 		struct pfr_addr *pfras;
4857 		size_t totlen;
4858 
4859 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4860 			error = ENODEV;
4861 			break;
4862 		}
4863 		if (io->pfrio_size < 0 ||
4864 		    io->pfrio_size > pf_ioctl_maxcount ||
4865 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4866 			error = EINVAL;
4867 			break;
4868 		}
4869 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4870 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4871 		    M_TEMP, M_WAITOK);
4872 		error = copyin(io->pfrio_buffer, pfras, totlen);
4873 		if (error) {
4874 			free(pfras, M_TEMP);
4875 			break;
4876 		}
4877 		PF_RULES_WLOCK();
4878 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4879 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4880 		    PFR_FLAG_USERIOCTL);
4881 		PF_RULES_WUNLOCK();
4882 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4883 			error = copyout(pfras, io->pfrio_buffer, totlen);
4884 		free(pfras, M_TEMP);
4885 		break;
4886 	}
4887 
4888 	case DIOCRTSTADDRS: {
4889 		struct pfioc_table *io = (struct pfioc_table *)addr;
4890 		struct pfr_addr *pfras;
4891 		size_t totlen;
4892 
4893 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4894 			error = ENODEV;
4895 			break;
4896 		}
4897 		if (io->pfrio_size < 0 ||
4898 		    io->pfrio_size > pf_ioctl_maxcount ||
4899 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4900 			error = EINVAL;
4901 			break;
4902 		}
4903 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4904 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4905 		    M_TEMP, M_WAITOK);
4906 		error = copyin(io->pfrio_buffer, pfras, totlen);
4907 		if (error) {
4908 			free(pfras, M_TEMP);
4909 			break;
4910 		}
4911 		PF_RULES_RLOCK();
4912 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
4913 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
4914 		    PFR_FLAG_USERIOCTL);
4915 		PF_RULES_RUNLOCK();
4916 		if (error == 0)
4917 			error = copyout(pfras, io->pfrio_buffer, totlen);
4918 		free(pfras, M_TEMP);
4919 		break;
4920 	}
4921 
4922 	case DIOCRINADEFINE: {
4923 		struct pfioc_table *io = (struct pfioc_table *)addr;
4924 		struct pfr_addr *pfras;
4925 		size_t totlen;
4926 
4927 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4928 			error = ENODEV;
4929 			break;
4930 		}
4931 		if (io->pfrio_size < 0 ||
4932 		    io->pfrio_size > pf_ioctl_maxcount ||
4933 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4934 			error = EINVAL;
4935 			break;
4936 		}
4937 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4938 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4939 		    M_TEMP, M_WAITOK);
4940 		error = copyin(io->pfrio_buffer, pfras, totlen);
4941 		if (error) {
4942 			free(pfras, M_TEMP);
4943 			break;
4944 		}
4945 		PF_RULES_WLOCK();
4946 		error = pfr_ina_define(&io->pfrio_table, pfras,
4947 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
4948 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4949 		PF_RULES_WUNLOCK();
4950 		free(pfras, M_TEMP);
4951 		break;
4952 	}
4953 
4954 	case DIOCOSFPADD: {
4955 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4956 		PF_RULES_WLOCK();
4957 		error = pf_osfp_add(io);
4958 		PF_RULES_WUNLOCK();
4959 		break;
4960 	}
4961 
4962 	case DIOCOSFPGET: {
4963 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
4964 		PF_RULES_RLOCK();
4965 		error = pf_osfp_get(io);
4966 		PF_RULES_RUNLOCK();
4967 		break;
4968 	}
4969 
4970 	case DIOCXBEGIN: {
4971 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
4972 		struct pfioc_trans_e	*ioes, *ioe;
4973 		size_t			 totlen;
4974 		int			 i;
4975 
4976 		if (io->esize != sizeof(*ioe)) {
4977 			error = ENODEV;
4978 			break;
4979 		}
4980 		if (io->size < 0 ||
4981 		    io->size > pf_ioctl_maxcount ||
4982 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
4983 			error = EINVAL;
4984 			break;
4985 		}
4986 		totlen = sizeof(struct pfioc_trans_e) * io->size;
4987 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
4988 		    M_TEMP, M_WAITOK);
4989 		error = copyin(io->array, ioes, totlen);
4990 		if (error) {
4991 			free(ioes, M_TEMP);
4992 			break;
4993 		}
4994 		/* Ensure there's no more ethernet rules to clean up. */
4995 		NET_EPOCH_DRAIN_CALLBACKS();
4996 		PF_RULES_WLOCK();
4997 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
4998 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
4999 			switch (ioe->rs_num) {
5000 			case PF_RULESET_ETH:
5001 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5002 					PF_RULES_WUNLOCK();
5003 					free(ioes, M_TEMP);
5004 					goto fail;
5005 				}
5006 				break;
5007 #ifdef ALTQ
5008 			case PF_RULESET_ALTQ:
5009 				if (ioe->anchor[0]) {
5010 					PF_RULES_WUNLOCK();
5011 					free(ioes, M_TEMP);
5012 					error = EINVAL;
5013 					goto fail;
5014 				}
5015 				if ((error = pf_begin_altq(&ioe->ticket))) {
5016 					PF_RULES_WUNLOCK();
5017 					free(ioes, M_TEMP);
5018 					goto fail;
5019 				}
5020 				break;
5021 #endif /* ALTQ */
5022 			case PF_RULESET_TABLE:
5023 			    {
5024 				struct pfr_table table;
5025 
5026 				bzero(&table, sizeof(table));
5027 				strlcpy(table.pfrt_anchor, ioe->anchor,
5028 				    sizeof(table.pfrt_anchor));
5029 				if ((error = pfr_ina_begin(&table,
5030 				    &ioe->ticket, NULL, 0))) {
5031 					PF_RULES_WUNLOCK();
5032 					free(ioes, M_TEMP);
5033 					goto fail;
5034 				}
5035 				break;
5036 			    }
5037 			default:
5038 				if ((error = pf_begin_rules(&ioe->ticket,
5039 				    ioe->rs_num, ioe->anchor))) {
5040 					PF_RULES_WUNLOCK();
5041 					free(ioes, M_TEMP);
5042 					goto fail;
5043 				}
5044 				break;
5045 			}
5046 		}
5047 		PF_RULES_WUNLOCK();
5048 		error = copyout(ioes, io->array, totlen);
5049 		free(ioes, M_TEMP);
5050 		break;
5051 	}
5052 
5053 	case DIOCXROLLBACK: {
5054 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5055 		struct pfioc_trans_e	*ioe, *ioes;
5056 		size_t			 totlen;
5057 		int			 i;
5058 
5059 		if (io->esize != sizeof(*ioe)) {
5060 			error = ENODEV;
5061 			break;
5062 		}
5063 		if (io->size < 0 ||
5064 		    io->size > pf_ioctl_maxcount ||
5065 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5066 			error = EINVAL;
5067 			break;
5068 		}
5069 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5070 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5071 		    M_TEMP, M_WAITOK);
5072 		error = copyin(io->array, ioes, totlen);
5073 		if (error) {
5074 			free(ioes, M_TEMP);
5075 			break;
5076 		}
5077 		PF_RULES_WLOCK();
5078 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5079 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5080 			switch (ioe->rs_num) {
5081 			case PF_RULESET_ETH:
5082 				if ((error = pf_rollback_eth(ioe->ticket,
5083 				    ioe->anchor))) {
5084 					PF_RULES_WUNLOCK();
5085 					free(ioes, M_TEMP);
5086 					goto fail; /* really bad */
5087 				}
5088 				break;
5089 #ifdef ALTQ
5090 			case PF_RULESET_ALTQ:
5091 				if (ioe->anchor[0]) {
5092 					PF_RULES_WUNLOCK();
5093 					free(ioes, M_TEMP);
5094 					error = EINVAL;
5095 					goto fail;
5096 				}
5097 				if ((error = pf_rollback_altq(ioe->ticket))) {
5098 					PF_RULES_WUNLOCK();
5099 					free(ioes, M_TEMP);
5100 					goto fail; /* really bad */
5101 				}
5102 				break;
5103 #endif /* ALTQ */
5104 			case PF_RULESET_TABLE:
5105 			    {
5106 				struct pfr_table table;
5107 
5108 				bzero(&table, sizeof(table));
5109 				strlcpy(table.pfrt_anchor, ioe->anchor,
5110 				    sizeof(table.pfrt_anchor));
5111 				if ((error = pfr_ina_rollback(&table,
5112 				    ioe->ticket, NULL, 0))) {
5113 					PF_RULES_WUNLOCK();
5114 					free(ioes, M_TEMP);
5115 					goto fail; /* really bad */
5116 				}
5117 				break;
5118 			    }
5119 			default:
5120 				if ((error = pf_rollback_rules(ioe->ticket,
5121 				    ioe->rs_num, ioe->anchor))) {
5122 					PF_RULES_WUNLOCK();
5123 					free(ioes, M_TEMP);
5124 					goto fail; /* really bad */
5125 				}
5126 				break;
5127 			}
5128 		}
5129 		PF_RULES_WUNLOCK();
5130 		free(ioes, M_TEMP);
5131 		break;
5132 	}
5133 
5134 	case DIOCXCOMMIT: {
5135 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5136 		struct pfioc_trans_e	*ioe, *ioes;
5137 		struct pf_kruleset	*rs;
5138 		struct pf_keth_ruleset	*ers;
5139 		size_t			 totlen;
5140 		int			 i;
5141 
5142 		if (io->esize != sizeof(*ioe)) {
5143 			error = ENODEV;
5144 			break;
5145 		}
5146 
5147 		if (io->size < 0 ||
5148 		    io->size > pf_ioctl_maxcount ||
5149 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5150 			error = EINVAL;
5151 			break;
5152 		}
5153 
5154 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5155 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5156 		    M_TEMP, M_WAITOK);
5157 		error = copyin(io->array, ioes, totlen);
5158 		if (error) {
5159 			free(ioes, M_TEMP);
5160 			break;
5161 		}
5162 		PF_RULES_WLOCK();
5163 		/* First makes sure everything will succeed. */
5164 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5165 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5166 			switch (ioe->rs_num) {
5167 			case PF_RULESET_ETH:
5168 				ers = pf_find_keth_ruleset(ioe->anchor);
5169 				if (ers == NULL || ioe->ticket == 0 ||
5170 				    ioe->ticket != ers->inactive.ticket) {
5171 					PF_RULES_WUNLOCK();
5172 					free(ioes, M_TEMP);
5173 					error = EINVAL;
5174 					goto fail;
5175 				}
5176 				break;
5177 #ifdef ALTQ
5178 			case PF_RULESET_ALTQ:
5179 				if (ioe->anchor[0]) {
5180 					PF_RULES_WUNLOCK();
5181 					free(ioes, M_TEMP);
5182 					error = EINVAL;
5183 					goto fail;
5184 				}
5185 				if (!V_altqs_inactive_open || ioe->ticket !=
5186 				    V_ticket_altqs_inactive) {
5187 					PF_RULES_WUNLOCK();
5188 					free(ioes, M_TEMP);
5189 					error = EBUSY;
5190 					goto fail;
5191 				}
5192 				break;
5193 #endif /* ALTQ */
5194 			case PF_RULESET_TABLE:
5195 				rs = pf_find_kruleset(ioe->anchor);
5196 				if (rs == NULL || !rs->topen || ioe->ticket !=
5197 				    rs->tticket) {
5198 					PF_RULES_WUNLOCK();
5199 					free(ioes, M_TEMP);
5200 					error = EBUSY;
5201 					goto fail;
5202 				}
5203 				break;
5204 			default:
5205 				if (ioe->rs_num < 0 || ioe->rs_num >=
5206 				    PF_RULESET_MAX) {
5207 					PF_RULES_WUNLOCK();
5208 					free(ioes, M_TEMP);
5209 					error = EINVAL;
5210 					goto fail;
5211 				}
5212 				rs = pf_find_kruleset(ioe->anchor);
5213 				if (rs == NULL ||
5214 				    !rs->rules[ioe->rs_num].inactive.open ||
5215 				    rs->rules[ioe->rs_num].inactive.ticket !=
5216 				    ioe->ticket) {
5217 					PF_RULES_WUNLOCK();
5218 					free(ioes, M_TEMP);
5219 					error = EBUSY;
5220 					goto fail;
5221 				}
5222 				break;
5223 			}
5224 		}
5225 		/* Now do the commit - no errors should happen here. */
5226 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5227 			switch (ioe->rs_num) {
5228 			case PF_RULESET_ETH:
5229 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5230 					PF_RULES_WUNLOCK();
5231 					free(ioes, M_TEMP);
5232 					goto fail; /* really bad */
5233 				}
5234 				break;
5235 #ifdef ALTQ
5236 			case PF_RULESET_ALTQ:
5237 				if ((error = pf_commit_altq(ioe->ticket))) {
5238 					PF_RULES_WUNLOCK();
5239 					free(ioes, M_TEMP);
5240 					goto fail; /* really bad */
5241 				}
5242 				break;
5243 #endif /* ALTQ */
5244 			case PF_RULESET_TABLE:
5245 			    {
5246 				struct pfr_table table;
5247 
5248 				bzero(&table, sizeof(table));
5249 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5250 				    sizeof(table.pfrt_anchor));
5251 				if ((error = pfr_ina_commit(&table,
5252 				    ioe->ticket, NULL, NULL, 0))) {
5253 					PF_RULES_WUNLOCK();
5254 					free(ioes, M_TEMP);
5255 					goto fail; /* really bad */
5256 				}
5257 				break;
5258 			    }
5259 			default:
5260 				if ((error = pf_commit_rules(ioe->ticket,
5261 				    ioe->rs_num, ioe->anchor))) {
5262 					PF_RULES_WUNLOCK();
5263 					free(ioes, M_TEMP);
5264 					goto fail; /* really bad */
5265 				}
5266 				break;
5267 			}
5268 		}
5269 		PF_RULES_WUNLOCK();
5270 
5271 		/* Only hook into EtherNet taffic if we've got rules for it. */
5272 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5273 			hook_pf_eth();
5274 		else
5275 			dehook_pf_eth();
5276 
5277 		free(ioes, M_TEMP);
5278 		break;
5279 	}
5280 
5281 	case DIOCGETSRCNODES: {
5282 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5283 		struct pf_srchash	*sh;
5284 		struct pf_ksrc_node	*n;
5285 		struct pf_src_node	*p, *pstore;
5286 		uint32_t		 i, nr = 0;
5287 
5288 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5289 				i++, sh++) {
5290 			PF_HASHROW_LOCK(sh);
5291 			LIST_FOREACH(n, &sh->nodes, entry)
5292 				nr++;
5293 			PF_HASHROW_UNLOCK(sh);
5294 		}
5295 
5296 		psn->psn_len = min(psn->psn_len,
5297 		    sizeof(struct pf_src_node) * nr);
5298 
5299 		if (psn->psn_len == 0) {
5300 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5301 			break;
5302 		}
5303 
5304 		nr = 0;
5305 
5306 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5307 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5308 		    i++, sh++) {
5309 		    PF_HASHROW_LOCK(sh);
5310 		    LIST_FOREACH(n, &sh->nodes, entry) {
5311 
5312 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5313 				break;
5314 
5315 			pf_src_node_copy(n, p);
5316 
5317 			p++;
5318 			nr++;
5319 		    }
5320 		    PF_HASHROW_UNLOCK(sh);
5321 		}
5322 		error = copyout(pstore, psn->psn_src_nodes,
5323 		    sizeof(struct pf_src_node) * nr);
5324 		if (error) {
5325 			free(pstore, M_TEMP);
5326 			break;
5327 		}
5328 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5329 		free(pstore, M_TEMP);
5330 		break;
5331 	}
5332 
5333 	case DIOCCLRSRCNODES: {
5334 		pf_clear_srcnodes(NULL);
5335 		pf_purge_expired_src_nodes();
5336 		break;
5337 	}
5338 
5339 	case DIOCKILLSRCNODES:
5340 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5341 		break;
5342 
5343 #ifdef COMPAT_FREEBSD13
5344 	case DIOCKEEPCOUNTERS_FREEBSD13:
5345 #endif
5346 	case DIOCKEEPCOUNTERS:
5347 		error = pf_keepcounters((struct pfioc_nv *)addr);
5348 		break;
5349 
5350 	case DIOCGETSYNCOOKIES:
5351 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5352 		break;
5353 
5354 	case DIOCSETSYNCOOKIES:
5355 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5356 		break;
5357 
5358 	case DIOCSETHOSTID: {
5359 		u_int32_t	*hostid = (u_int32_t *)addr;
5360 
5361 		PF_RULES_WLOCK();
5362 		if (*hostid == 0)
5363 			V_pf_status.hostid = arc4random();
5364 		else
5365 			V_pf_status.hostid = *hostid;
5366 		PF_RULES_WUNLOCK();
5367 		break;
5368 	}
5369 
5370 	case DIOCOSFPFLUSH:
5371 		PF_RULES_WLOCK();
5372 		pf_osfp_flush();
5373 		PF_RULES_WUNLOCK();
5374 		break;
5375 
5376 	case DIOCIGETIFACES: {
5377 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5378 		struct pfi_kif *ifstore;
5379 		size_t bufsiz;
5380 
5381 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5382 			error = ENODEV;
5383 			break;
5384 		}
5385 
5386 		if (io->pfiio_size < 0 ||
5387 		    io->pfiio_size > pf_ioctl_maxcount ||
5388 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5389 			error = EINVAL;
5390 			break;
5391 		}
5392 
5393 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5394 
5395 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5396 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5397 		    M_TEMP, M_WAITOK | M_ZERO);
5398 
5399 		PF_RULES_RLOCK();
5400 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5401 		PF_RULES_RUNLOCK();
5402 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5403 		free(ifstore, M_TEMP);
5404 		break;
5405 	}
5406 
5407 	case DIOCSETIFFLAG: {
5408 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5409 
5410 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5411 
5412 		PF_RULES_WLOCK();
5413 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5414 		PF_RULES_WUNLOCK();
5415 		break;
5416 	}
5417 
5418 	case DIOCCLRIFFLAG: {
5419 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5420 
5421 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5422 
5423 		PF_RULES_WLOCK();
5424 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5425 		PF_RULES_WUNLOCK();
5426 		break;
5427 	}
5428 
5429 	case DIOCSETREASS: {
5430 		u_int32_t	*reass = (u_int32_t *)addr;
5431 
5432 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5433 		/* Removal of DF flag without reassembly enabled is not a
5434 		 * valid combination. Disable reassembly in such case. */
5435 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5436 			V_pf_status.reass = 0;
5437 		break;
5438 	}
5439 
5440 	default:
5441 		error = ENODEV;
5442 		break;
5443 	}
5444 fail:
5445 	CURVNET_RESTORE();
5446 
5447 #undef ERROUT_IOCTL
5448 
5449 	return (error);
5450 }
5451 
5452 void
5453 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5454 {
5455 	bzero(sp, sizeof(union pfsync_state_union));
5456 
5457 	/* copy from state key */
5458 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5459 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5460 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5461 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5462 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5463 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5464 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5465 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5466 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5467 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5468 
5469 	/* copy from state */
5470 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5471 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5472 	sp->pfs_1301.creation = htonl(time_uptime - st->creation);
5473 	sp->pfs_1301.expire = pf_state_expires(st);
5474 	if (sp->pfs_1301.expire <= time_uptime)
5475 		sp->pfs_1301.expire = htonl(0);
5476 	else
5477 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5478 
5479 	sp->pfs_1301.direction = st->direction;
5480 	sp->pfs_1301.log = st->act.log;
5481 	sp->pfs_1301.timeout = st->timeout;
5482 
5483 	switch (msg_version) {
5484 		case PFSYNC_MSG_VERSION_1301:
5485 			sp->pfs_1301.state_flags = st->state_flags;
5486 			break;
5487 		case PFSYNC_MSG_VERSION_1400:
5488 			sp->pfs_1400.state_flags = htons(st->state_flags);
5489 			sp->pfs_1400.qid = htons(st->act.qid);
5490 			sp->pfs_1400.pqid = htons(st->act.pqid);
5491 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5492 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5493 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5494 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5495 			sp->pfs_1400.set_tos = st->act.set_tos;
5496 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5497 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5498 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5499 			sp->pfs_1400.rt = st->rt;
5500 			if (st->rt_kif)
5501 				strlcpy(sp->pfs_1400.rt_ifname,
5502 				    st->rt_kif->pfik_name,
5503 				    sizeof(sp->pfs_1400.rt_ifname));
5504 			break;
5505 		default:
5506 			panic("%s: Unsupported pfsync_msg_version %d",
5507 			    __func__, msg_version);
5508 	}
5509 
5510 	if (st->src_node)
5511 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5512 	if (st->nat_src_node)
5513 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5514 
5515 	sp->pfs_1301.id = st->id;
5516 	sp->pfs_1301.creatorid = st->creatorid;
5517 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5518 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5519 
5520 	if (st->rule.ptr == NULL)
5521 		sp->pfs_1301.rule = htonl(-1);
5522 	else
5523 		sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5524 	if (st->anchor.ptr == NULL)
5525 		sp->pfs_1301.anchor = htonl(-1);
5526 	else
5527 		sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5528 	if (st->nat_rule.ptr == NULL)
5529 		sp->pfs_1301.nat_rule = htonl(-1);
5530 	else
5531 		sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5532 
5533 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5534 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5535 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5536 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5537 }
5538 
5539 void
5540 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5541 {
5542 	bzero(sp, sizeof(*sp));
5543 
5544 	sp->version = PF_STATE_VERSION;
5545 
5546 	/* copy from state key */
5547 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5548 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5549 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5550 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5551 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5552 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5553 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5554 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5555 	sp->proto = st->key[PF_SK_WIRE]->proto;
5556 	sp->af = st->key[PF_SK_WIRE]->af;
5557 
5558 	/* copy from state */
5559 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5560 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5561 	    sizeof(sp->orig_ifname));
5562 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5563 	sp->creation = htonl(time_uptime - st->creation);
5564 	sp->expire = pf_state_expires(st);
5565 	if (sp->expire <= time_uptime)
5566 		sp->expire = htonl(0);
5567 	else
5568 		sp->expire = htonl(sp->expire - time_uptime);
5569 
5570 	sp->direction = st->direction;
5571 	sp->log = st->act.log;
5572 	sp->timeout = st->timeout;
5573 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5574 	sp->state_flags_compat = st->state_flags;
5575 	sp->state_flags = htons(st->state_flags);
5576 	if (st->src_node)
5577 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5578 	if (st->nat_src_node)
5579 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5580 
5581 	sp->id = st->id;
5582 	sp->creatorid = st->creatorid;
5583 	pf_state_peer_hton(&st->src, &sp->src);
5584 	pf_state_peer_hton(&st->dst, &sp->dst);
5585 
5586 	if (st->rule.ptr == NULL)
5587 		sp->rule = htonl(-1);
5588 	else
5589 		sp->rule = htonl(st->rule.ptr->nr);
5590 	if (st->anchor.ptr == NULL)
5591 		sp->anchor = htonl(-1);
5592 	else
5593 		sp->anchor = htonl(st->anchor.ptr->nr);
5594 	if (st->nat_rule.ptr == NULL)
5595 		sp->nat_rule = htonl(-1);
5596 	else
5597 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5598 
5599 	sp->packets[0] = st->packets[0];
5600 	sp->packets[1] = st->packets[1];
5601 	sp->bytes[0] = st->bytes[0];
5602 	sp->bytes[1] = st->bytes[1];
5603 
5604 	sp->qid = htons(st->act.qid);
5605 	sp->pqid = htons(st->act.pqid);
5606 	sp->dnpipe = htons(st->act.dnpipe);
5607 	sp->dnrpipe = htons(st->act.dnrpipe);
5608 	sp->rtableid = htonl(st->act.rtableid);
5609 	sp->min_ttl = st->act.min_ttl;
5610 	sp->set_tos = st->act.set_tos;
5611 	sp->max_mss = htons(st->act.max_mss);
5612 	sp->rt = st->rt;
5613 	if (st->rt_kif)
5614 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5615 		    sizeof(sp->rt_ifname));
5616 	sp->set_prio[0] = st->act.set_prio[0];
5617 	sp->set_prio[1] = st->act.set_prio[1];
5618 
5619 }
5620 
5621 static void
5622 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5623 {
5624 	struct pfr_ktable *kt;
5625 
5626 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5627 
5628 	kt = aw->p.tbl;
5629 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5630 		kt = kt->pfrkt_root;
5631 	aw->p.tbl = NULL;
5632 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5633 		kt->pfrkt_cnt : -1;
5634 }
5635 
5636 static int
5637 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5638     size_t number, char **names)
5639 {
5640 	nvlist_t        *nvc;
5641 
5642 	nvc = nvlist_create(0);
5643 	if (nvc == NULL)
5644 		return (ENOMEM);
5645 
5646 	for (int i = 0; i < number; i++) {
5647 		nvlist_append_number_array(nvc, "counters",
5648 		    counter_u64_fetch(counters[i]));
5649 		nvlist_append_string_array(nvc, "names",
5650 		    names[i]);
5651 		nvlist_append_number_array(nvc, "ids",
5652 		    i);
5653 	}
5654 	nvlist_add_nvlist(nvl, name, nvc);
5655 	nvlist_destroy(nvc);
5656 
5657 	return (0);
5658 }
5659 
5660 static int
5661 pf_getstatus(struct pfioc_nv *nv)
5662 {
5663 	nvlist_t        *nvl = NULL, *nvc = NULL;
5664 	void            *nvlpacked = NULL;
5665 	int              error;
5666 	struct pf_status s;
5667 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5668 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5669 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5670 	PF_RULES_RLOCK_TRACKER;
5671 
5672 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5673 
5674 	PF_RULES_RLOCK();
5675 
5676 	nvl = nvlist_create(0);
5677 	if (nvl == NULL)
5678 		ERROUT(ENOMEM);
5679 
5680 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5681 	nvlist_add_number(nvl, "since", V_pf_status.since);
5682 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5683 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5684 	nvlist_add_number(nvl, "states", V_pf_status.states);
5685 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5686 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5687 	nvlist_add_bool(nvl, "syncookies_active",
5688 	    V_pf_status.syncookies_active);
5689 
5690 	/* counters */
5691 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5692 	    PFRES_MAX, pf_reasons);
5693 	if (error != 0)
5694 		ERROUT(error);
5695 
5696 	/* lcounters */
5697 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5698 	    KLCNT_MAX, pf_lcounter);
5699 	if (error != 0)
5700 		ERROUT(error);
5701 
5702 	/* fcounters */
5703 	nvc = nvlist_create(0);
5704 	if (nvc == NULL)
5705 		ERROUT(ENOMEM);
5706 
5707 	for (int i = 0; i < FCNT_MAX; i++) {
5708 		nvlist_append_number_array(nvc, "counters",
5709 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5710 		nvlist_append_string_array(nvc, "names",
5711 		    pf_fcounter[i]);
5712 		nvlist_append_number_array(nvc, "ids",
5713 		    i);
5714 	}
5715 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5716 	nvlist_destroy(nvc);
5717 	nvc = NULL;
5718 
5719 	/* scounters */
5720 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5721 	    SCNT_MAX, pf_fcounter);
5722 	if (error != 0)
5723 		ERROUT(error);
5724 
5725 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5726 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5727 	    PF_MD5_DIGEST_LENGTH);
5728 
5729 	pfi_update_status(V_pf_status.ifname, &s);
5730 
5731 	/* pcounters / bcounters */
5732 	for (int i = 0; i < 2; i++) {
5733 		for (int j = 0; j < 2; j++) {
5734 			for (int k = 0; k < 2; k++) {
5735 				nvlist_append_number_array(nvl, "pcounters",
5736 				    s.pcounters[i][j][k]);
5737 			}
5738 			nvlist_append_number_array(nvl, "bcounters",
5739 			    s.bcounters[i][j]);
5740 		}
5741 	}
5742 
5743 	nvlpacked = nvlist_pack(nvl, &nv->len);
5744 	if (nvlpacked == NULL)
5745 		ERROUT(ENOMEM);
5746 
5747 	if (nv->size == 0)
5748 		ERROUT(0);
5749 	else if (nv->size < nv->len)
5750 		ERROUT(ENOSPC);
5751 
5752 	PF_RULES_RUNLOCK();
5753 	error = copyout(nvlpacked, nv->data, nv->len);
5754 	goto done;
5755 
5756 #undef ERROUT
5757 errout:
5758 	PF_RULES_RUNLOCK();
5759 done:
5760 	free(nvlpacked, M_NVLIST);
5761 	nvlist_destroy(nvc);
5762 	nvlist_destroy(nvl);
5763 
5764 	return (error);
5765 }
5766 
5767 /*
5768  * XXX - Check for version mismatch!!!
5769  */
5770 static void
5771 pf_clear_all_states(void)
5772 {
5773 	struct pf_kstate	*s;
5774 	u_int i;
5775 
5776 	for (i = 0; i <= pf_hashmask; i++) {
5777 		struct pf_idhash *ih = &V_pf_idhash[i];
5778 relock:
5779 		PF_HASHROW_LOCK(ih);
5780 		LIST_FOREACH(s, &ih->states, entry) {
5781 			s->timeout = PFTM_PURGE;
5782 			/* Don't send out individual delete messages. */
5783 			s->state_flags |= PFSTATE_NOSYNC;
5784 			pf_unlink_state(s);
5785 			goto relock;
5786 		}
5787 		PF_HASHROW_UNLOCK(ih);
5788 	}
5789 }
5790 
5791 static int
5792 pf_clear_tables(void)
5793 {
5794 	struct pfioc_table io;
5795 	int error;
5796 
5797 	bzero(&io, sizeof(io));
5798 
5799 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5800 	    io.pfrio_flags);
5801 
5802 	return (error);
5803 }
5804 
5805 static void
5806 pf_clear_srcnodes(struct pf_ksrc_node *n)
5807 {
5808 	struct pf_kstate *s;
5809 	int i;
5810 
5811 	for (i = 0; i <= pf_hashmask; i++) {
5812 		struct pf_idhash *ih = &V_pf_idhash[i];
5813 
5814 		PF_HASHROW_LOCK(ih);
5815 		LIST_FOREACH(s, &ih->states, entry) {
5816 			if (n == NULL || n == s->src_node)
5817 				s->src_node = NULL;
5818 			if (n == NULL || n == s->nat_src_node)
5819 				s->nat_src_node = NULL;
5820 		}
5821 		PF_HASHROW_UNLOCK(ih);
5822 	}
5823 
5824 	if (n == NULL) {
5825 		struct pf_srchash *sh;
5826 
5827 		for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask;
5828 		    i++, sh++) {
5829 			PF_HASHROW_LOCK(sh);
5830 			LIST_FOREACH(n, &sh->nodes, entry) {
5831 				n->expire = 1;
5832 				n->states = 0;
5833 			}
5834 			PF_HASHROW_UNLOCK(sh);
5835 		}
5836 	} else {
5837 		/* XXX: hash slot should already be locked here. */
5838 		n->expire = 1;
5839 		n->states = 0;
5840 	}
5841 }
5842 
5843 static void
5844 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5845 {
5846 	struct pf_ksrc_node_list	 kill;
5847 
5848 	LIST_INIT(&kill);
5849 	for (int i = 0; i <= pf_srchashmask; i++) {
5850 		struct pf_srchash *sh = &V_pf_srchash[i];
5851 		struct pf_ksrc_node *sn, *tmp;
5852 
5853 		PF_HASHROW_LOCK(sh);
5854 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5855 			if (PF_MATCHA(psnk->psnk_src.neg,
5856 			      &psnk->psnk_src.addr.v.a.addr,
5857 			      &psnk->psnk_src.addr.v.a.mask,
5858 			      &sn->addr, sn->af) &&
5859 			    PF_MATCHA(psnk->psnk_dst.neg,
5860 			      &psnk->psnk_dst.addr.v.a.addr,
5861 			      &psnk->psnk_dst.addr.v.a.mask,
5862 			      &sn->raddr, sn->af)) {
5863 				pf_unlink_src_node(sn);
5864 				LIST_INSERT_HEAD(&kill, sn, entry);
5865 				sn->expire = 1;
5866 			}
5867 		PF_HASHROW_UNLOCK(sh);
5868 	}
5869 
5870 	for (int i = 0; i <= pf_hashmask; i++) {
5871 		struct pf_idhash *ih = &V_pf_idhash[i];
5872 		struct pf_kstate *s;
5873 
5874 		PF_HASHROW_LOCK(ih);
5875 		LIST_FOREACH(s, &ih->states, entry) {
5876 			if (s->src_node && s->src_node->expire == 1)
5877 				s->src_node = NULL;
5878 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5879 				s->nat_src_node = NULL;
5880 		}
5881 		PF_HASHROW_UNLOCK(ih);
5882 	}
5883 
5884 	psnk->psnk_killed = pf_free_src_nodes(&kill);
5885 }
5886 
5887 static int
5888 pf_keepcounters(struct pfioc_nv *nv)
5889 {
5890 	nvlist_t	*nvl = NULL;
5891 	void		*nvlpacked = NULL;
5892 	int		 error = 0;
5893 
5894 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5895 
5896 	if (nv->len > pf_ioctl_maxcount)
5897 		ERROUT(ENOMEM);
5898 
5899 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
5900 	if (nvlpacked == NULL)
5901 		ERROUT(ENOMEM);
5902 
5903 	error = copyin(nv->data, nvlpacked, nv->len);
5904 	if (error)
5905 		ERROUT(error);
5906 
5907 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
5908 	if (nvl == NULL)
5909 		ERROUT(EBADMSG);
5910 
5911 	if (! nvlist_exists_bool(nvl, "keep_counters"))
5912 		ERROUT(EBADMSG);
5913 
5914 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
5915 
5916 on_error:
5917 	nvlist_destroy(nvl);
5918 	free(nvlpacked, M_NVLIST);
5919 	return (error);
5920 }
5921 
5922 static unsigned int
5923 pf_clear_states(const struct pf_kstate_kill *kill)
5924 {
5925 	struct pf_state_key_cmp	 match_key;
5926 	struct pf_kstate	*s;
5927 	struct pfi_kkif	*kif;
5928 	int		 idx;
5929 	unsigned int	 killed = 0, dir;
5930 
5931 	for (unsigned int i = 0; i <= pf_hashmask; i++) {
5932 		struct pf_idhash *ih = &V_pf_idhash[i];
5933 
5934 relock_DIOCCLRSTATES:
5935 		PF_HASHROW_LOCK(ih);
5936 		LIST_FOREACH(s, &ih->states, entry) {
5937 			/* For floating states look at the original kif. */
5938 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
5939 
5940 			if (kill->psk_ifname[0] &&
5941 			    strcmp(kill->psk_ifname,
5942 			    kif->pfik_name))
5943 				continue;
5944 
5945 			if (kill->psk_kill_match) {
5946 				bzero(&match_key, sizeof(match_key));
5947 
5948 				if (s->direction == PF_OUT) {
5949 					dir = PF_IN;
5950 					idx = PF_SK_STACK;
5951 				} else {
5952 					dir = PF_OUT;
5953 					idx = PF_SK_WIRE;
5954 				}
5955 
5956 				match_key.af = s->key[idx]->af;
5957 				match_key.proto = s->key[idx]->proto;
5958 				PF_ACPY(&match_key.addr[0],
5959 				    &s->key[idx]->addr[1], match_key.af);
5960 				match_key.port[0] = s->key[idx]->port[1];
5961 				PF_ACPY(&match_key.addr[1],
5962 				    &s->key[idx]->addr[0], match_key.af);
5963 				match_key.port[1] = s->key[idx]->port[0];
5964 			}
5965 
5966 			/*
5967 			 * Don't send out individual
5968 			 * delete messages.
5969 			 */
5970 			s->state_flags |= PFSTATE_NOSYNC;
5971 			pf_unlink_state(s);
5972 			killed++;
5973 
5974 			if (kill->psk_kill_match)
5975 				killed += pf_kill_matching_state(&match_key,
5976 				    dir);
5977 
5978 			goto relock_DIOCCLRSTATES;
5979 		}
5980 		PF_HASHROW_UNLOCK(ih);
5981 	}
5982 
5983 	if (V_pfsync_clear_states_ptr != NULL)
5984 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
5985 
5986 	return (killed);
5987 }
5988 
5989 static void
5990 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
5991 {
5992 	struct pf_kstate	*s;
5993 
5994 	if (kill->psk_pfcmp.id) {
5995 		if (kill->psk_pfcmp.creatorid == 0)
5996 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
5997 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
5998 		    kill->psk_pfcmp.creatorid))) {
5999 			pf_unlink_state(s);
6000 			*killed = 1;
6001 		}
6002 		return;
6003 	}
6004 
6005 	for (unsigned int i = 0; i <= pf_hashmask; i++)
6006 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6007 
6008 	return;
6009 }
6010 
6011 static int
6012 pf_killstates_nv(struct pfioc_nv *nv)
6013 {
6014 	struct pf_kstate_kill	 kill;
6015 	nvlist_t		*nvl = NULL;
6016 	void			*nvlpacked = NULL;
6017 	int			 error = 0;
6018 	unsigned int		 killed = 0;
6019 
6020 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6021 
6022 	if (nv->len > pf_ioctl_maxcount)
6023 		ERROUT(ENOMEM);
6024 
6025 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6026 	if (nvlpacked == NULL)
6027 		ERROUT(ENOMEM);
6028 
6029 	error = copyin(nv->data, nvlpacked, nv->len);
6030 	if (error)
6031 		ERROUT(error);
6032 
6033 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6034 	if (nvl == NULL)
6035 		ERROUT(EBADMSG);
6036 
6037 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6038 	if (error)
6039 		ERROUT(error);
6040 
6041 	pf_killstates(&kill, &killed);
6042 
6043 	free(nvlpacked, M_NVLIST);
6044 	nvlpacked = NULL;
6045 	nvlist_destroy(nvl);
6046 	nvl = nvlist_create(0);
6047 	if (nvl == NULL)
6048 		ERROUT(ENOMEM);
6049 
6050 	nvlist_add_number(nvl, "killed", killed);
6051 
6052 	nvlpacked = nvlist_pack(nvl, &nv->len);
6053 	if (nvlpacked == NULL)
6054 		ERROUT(ENOMEM);
6055 
6056 	if (nv->size == 0)
6057 		ERROUT(0);
6058 	else if (nv->size < nv->len)
6059 		ERROUT(ENOSPC);
6060 
6061 	error = copyout(nvlpacked, nv->data, nv->len);
6062 
6063 on_error:
6064 	nvlist_destroy(nvl);
6065 	free(nvlpacked, M_NVLIST);
6066 	return (error);
6067 }
6068 
6069 static int
6070 pf_clearstates_nv(struct pfioc_nv *nv)
6071 {
6072 	struct pf_kstate_kill	 kill;
6073 	nvlist_t		*nvl = NULL;
6074 	void			*nvlpacked = NULL;
6075 	int			 error = 0;
6076 	unsigned int		 killed;
6077 
6078 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6079 
6080 	if (nv->len > pf_ioctl_maxcount)
6081 		ERROUT(ENOMEM);
6082 
6083 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6084 	if (nvlpacked == NULL)
6085 		ERROUT(ENOMEM);
6086 
6087 	error = copyin(nv->data, nvlpacked, nv->len);
6088 	if (error)
6089 		ERROUT(error);
6090 
6091 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6092 	if (nvl == NULL)
6093 		ERROUT(EBADMSG);
6094 
6095 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6096 	if (error)
6097 		ERROUT(error);
6098 
6099 	killed = pf_clear_states(&kill);
6100 
6101 	free(nvlpacked, M_NVLIST);
6102 	nvlpacked = NULL;
6103 	nvlist_destroy(nvl);
6104 	nvl = nvlist_create(0);
6105 	if (nvl == NULL)
6106 		ERROUT(ENOMEM);
6107 
6108 	nvlist_add_number(nvl, "killed", killed);
6109 
6110 	nvlpacked = nvlist_pack(nvl, &nv->len);
6111 	if (nvlpacked == NULL)
6112 		ERROUT(ENOMEM);
6113 
6114 	if (nv->size == 0)
6115 		ERROUT(0);
6116 	else if (nv->size < nv->len)
6117 		ERROUT(ENOSPC);
6118 
6119 	error = copyout(nvlpacked, nv->data, nv->len);
6120 
6121 #undef ERROUT
6122 on_error:
6123 	nvlist_destroy(nvl);
6124 	free(nvlpacked, M_NVLIST);
6125 	return (error);
6126 }
6127 
6128 static int
6129 pf_getstate(struct pfioc_nv *nv)
6130 {
6131 	nvlist_t		*nvl = NULL, *nvls;
6132 	void			*nvlpacked = NULL;
6133 	struct pf_kstate	*s = NULL;
6134 	int			 error = 0;
6135 	uint64_t		 id, creatorid;
6136 
6137 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6138 
6139 	if (nv->len > pf_ioctl_maxcount)
6140 		ERROUT(ENOMEM);
6141 
6142 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6143 	if (nvlpacked == NULL)
6144 		ERROUT(ENOMEM);
6145 
6146 	error = copyin(nv->data, nvlpacked, nv->len);
6147 	if (error)
6148 		ERROUT(error);
6149 
6150 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6151 	if (nvl == NULL)
6152 		ERROUT(EBADMSG);
6153 
6154 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6155 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6156 
6157 	s = pf_find_state_byid(id, creatorid);
6158 	if (s == NULL)
6159 		ERROUT(ENOENT);
6160 
6161 	free(nvlpacked, M_NVLIST);
6162 	nvlpacked = NULL;
6163 	nvlist_destroy(nvl);
6164 	nvl = nvlist_create(0);
6165 	if (nvl == NULL)
6166 		ERROUT(ENOMEM);
6167 
6168 	nvls = pf_state_to_nvstate(s);
6169 	if (nvls == NULL)
6170 		ERROUT(ENOMEM);
6171 
6172 	nvlist_add_nvlist(nvl, "state", nvls);
6173 	nvlist_destroy(nvls);
6174 
6175 	nvlpacked = nvlist_pack(nvl, &nv->len);
6176 	if (nvlpacked == NULL)
6177 		ERROUT(ENOMEM);
6178 
6179 	if (nv->size == 0)
6180 		ERROUT(0);
6181 	else if (nv->size < nv->len)
6182 		ERROUT(ENOSPC);
6183 
6184 	error = copyout(nvlpacked, nv->data, nv->len);
6185 
6186 #undef ERROUT
6187 errout:
6188 	if (s != NULL)
6189 		PF_STATE_UNLOCK(s);
6190 	free(nvlpacked, M_NVLIST);
6191 	nvlist_destroy(nvl);
6192 	return (error);
6193 }
6194 
6195 /*
6196  * XXX - Check for version mismatch!!!
6197  */
6198 
6199 /*
6200  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6201  */
6202 static int
6203 shutdown_pf(void)
6204 {
6205 	int error = 0;
6206 	u_int32_t t[5];
6207 	char nn = '\0';
6208 
6209 	do {
6210 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6211 		    != 0) {
6212 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6213 			break;
6214 		}
6215 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6216 		    != 0) {
6217 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6218 			break;		/* XXX: rollback? */
6219 		}
6220 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6221 		    != 0) {
6222 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6223 			break;		/* XXX: rollback? */
6224 		}
6225 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6226 		    != 0) {
6227 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6228 			break;		/* XXX: rollback? */
6229 		}
6230 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6231 		    != 0) {
6232 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6233 			break;		/* XXX: rollback? */
6234 		}
6235 
6236 		/* XXX: these should always succeed here */
6237 		pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6238 		pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6239 		pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6240 		pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6241 		pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6242 
6243 		if ((error = pf_clear_tables()) != 0)
6244 			break;
6245 
6246 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6247 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6248 			break;
6249 		}
6250 		pf_commit_eth(t[0], &nn);
6251 
6252 #ifdef ALTQ
6253 		if ((error = pf_begin_altq(&t[0])) != 0) {
6254 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6255 			break;
6256 		}
6257 		pf_commit_altq(t[0]);
6258 #endif
6259 
6260 		pf_clear_all_states();
6261 
6262 		pf_clear_srcnodes(NULL);
6263 
6264 		/* status does not use malloced mem so no need to cleanup */
6265 		/* fingerprints and interfaces have their own cleanup code */
6266 	} while(0);
6267 
6268 	return (error);
6269 }
6270 
6271 static pfil_return_t
6272 pf_check_return(int chk, struct mbuf **m)
6273 {
6274 
6275 	switch (chk) {
6276 	case PF_PASS:
6277 		if (*m == NULL)
6278 			return (PFIL_CONSUMED);
6279 		else
6280 			return (PFIL_PASS);
6281 		break;
6282 	default:
6283 		if (*m != NULL) {
6284 			m_freem(*m);
6285 			*m = NULL;
6286 		}
6287 		return (PFIL_DROPPED);
6288 	}
6289 }
6290 
6291 static pfil_return_t
6292 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6293     void *ruleset __unused, struct inpcb *inp)
6294 {
6295 	int chk;
6296 
6297 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6298 
6299 	return (pf_check_return(chk, m));
6300 }
6301 
6302 static pfil_return_t
6303 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6304     void *ruleset __unused, struct inpcb *inp)
6305 {
6306 	int chk;
6307 
6308 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6309 
6310 	return (pf_check_return(chk, m));
6311 }
6312 
6313 #ifdef INET
6314 static pfil_return_t
6315 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6316     void *ruleset __unused, struct inpcb *inp)
6317 {
6318 	int chk;
6319 
6320 	chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6321 
6322 	return (pf_check_return(chk, m));
6323 }
6324 
6325 static pfil_return_t
6326 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6327     void *ruleset __unused,  struct inpcb *inp)
6328 {
6329 	int chk;
6330 
6331 	chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6332 
6333 	return (pf_check_return(chk, m));
6334 }
6335 #endif
6336 
6337 #ifdef INET6
6338 static pfil_return_t
6339 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6340     void *ruleset __unused,  struct inpcb *inp)
6341 {
6342 	int chk;
6343 
6344 	/*
6345 	 * In case of loopback traffic IPv6 uses the real interface in
6346 	 * order to support scoped addresses. In order to support stateful
6347 	 * filtering we have change this to lo0 as it is the case in IPv4.
6348 	 */
6349 	CURVNET_SET(ifp->if_vnet);
6350 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6351 	    m, inp, NULL);
6352 	CURVNET_RESTORE();
6353 
6354 	return (pf_check_return(chk, m));
6355 }
6356 
6357 static pfil_return_t
6358 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6359     void *ruleset __unused,  struct inpcb *inp)
6360 {
6361 	int chk;
6362 
6363 	CURVNET_SET(ifp->if_vnet);
6364 	chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6365 	CURVNET_RESTORE();
6366 
6367 	return (pf_check_return(chk, m));
6368 }
6369 #endif /* INET6 */
6370 
6371 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6372 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6373 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6374 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6375 
6376 #ifdef INET
6377 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6378 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6379 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6380 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6381 #endif
6382 #ifdef INET6
6383 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6384 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6385 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6386 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6387 #endif
6388 
6389 static void
6390 hook_pf_eth(void)
6391 {
6392 	struct pfil_hook_args pha = {
6393 		.pa_version = PFIL_VERSION,
6394 		.pa_modname = "pf",
6395 		.pa_type = PFIL_TYPE_ETHERNET,
6396 	};
6397 	struct pfil_link_args pla = {
6398 		.pa_version = PFIL_VERSION,
6399 	};
6400 	int ret __diagused;
6401 
6402 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6403 		return;
6404 
6405 	pha.pa_mbuf_chk = pf_eth_check_in;
6406 	pha.pa_flags = PFIL_IN;
6407 	pha.pa_rulname = "eth-in";
6408 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6409 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6410 	pla.pa_head = V_link_pfil_head;
6411 	pla.pa_hook = V_pf_eth_in_hook;
6412 	ret = pfil_link(&pla);
6413 	MPASS(ret == 0);
6414 	pha.pa_mbuf_chk = pf_eth_check_out;
6415 	pha.pa_flags = PFIL_OUT;
6416 	pha.pa_rulname = "eth-out";
6417 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6418 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6419 	pla.pa_head = V_link_pfil_head;
6420 	pla.pa_hook = V_pf_eth_out_hook;
6421 	ret = pfil_link(&pla);
6422 	MPASS(ret == 0);
6423 
6424 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6425 }
6426 
6427 static void
6428 hook_pf(void)
6429 {
6430 	struct pfil_hook_args pha = {
6431 		.pa_version = PFIL_VERSION,
6432 		.pa_modname = "pf",
6433 	};
6434 	struct pfil_link_args pla = {
6435 		.pa_version = PFIL_VERSION,
6436 	};
6437 	int ret __diagused;
6438 
6439 	if (atomic_load_bool(&V_pf_pfil_hooked))
6440 		return;
6441 
6442 #ifdef INET
6443 	pha.pa_type = PFIL_TYPE_IP4;
6444 	pha.pa_mbuf_chk = pf_check_in;
6445 	pha.pa_flags = PFIL_IN;
6446 	pha.pa_rulname = "default-in";
6447 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6448 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6449 	pla.pa_head = V_inet_pfil_head;
6450 	pla.pa_hook = V_pf_ip4_in_hook;
6451 	ret = pfil_link(&pla);
6452 	MPASS(ret == 0);
6453 	pha.pa_mbuf_chk = pf_check_out;
6454 	pha.pa_flags = PFIL_OUT;
6455 	pha.pa_rulname = "default-out";
6456 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6457 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6458 	pla.pa_head = V_inet_pfil_head;
6459 	pla.pa_hook = V_pf_ip4_out_hook;
6460 	ret = pfil_link(&pla);
6461 	MPASS(ret == 0);
6462 	if (V_pf_filter_local) {
6463 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6464 		pla.pa_head = V_inet_local_pfil_head;
6465 		pla.pa_hook = V_pf_ip4_out_hook;
6466 		ret = pfil_link(&pla);
6467 		MPASS(ret == 0);
6468 	}
6469 #endif
6470 #ifdef INET6
6471 	pha.pa_type = PFIL_TYPE_IP6;
6472 	pha.pa_mbuf_chk = pf_check6_in;
6473 	pha.pa_flags = PFIL_IN;
6474 	pha.pa_rulname = "default-in6";
6475 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6476 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6477 	pla.pa_head = V_inet6_pfil_head;
6478 	pla.pa_hook = V_pf_ip6_in_hook;
6479 	ret = pfil_link(&pla);
6480 	MPASS(ret == 0);
6481 	pha.pa_mbuf_chk = pf_check6_out;
6482 	pha.pa_rulname = "default-out6";
6483 	pha.pa_flags = PFIL_OUT;
6484 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6485 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6486 	pla.pa_head = V_inet6_pfil_head;
6487 	pla.pa_hook = V_pf_ip6_out_hook;
6488 	ret = pfil_link(&pla);
6489 	MPASS(ret == 0);
6490 	if (V_pf_filter_local) {
6491 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6492 		pla.pa_head = V_inet6_local_pfil_head;
6493 		pla.pa_hook = V_pf_ip6_out_hook;
6494 		ret = pfil_link(&pla);
6495 		MPASS(ret == 0);
6496 	}
6497 #endif
6498 
6499 	atomic_store_bool(&V_pf_pfil_hooked, true);
6500 }
6501 
6502 static void
6503 dehook_pf_eth(void)
6504 {
6505 
6506 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6507 		return;
6508 
6509 	pfil_remove_hook(V_pf_eth_in_hook);
6510 	pfil_remove_hook(V_pf_eth_out_hook);
6511 
6512 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6513 }
6514 
6515 static void
6516 dehook_pf(void)
6517 {
6518 
6519 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6520 		return;
6521 
6522 #ifdef INET
6523 	pfil_remove_hook(V_pf_ip4_in_hook);
6524 	pfil_remove_hook(V_pf_ip4_out_hook);
6525 #endif
6526 #ifdef INET6
6527 	pfil_remove_hook(V_pf_ip6_in_hook);
6528 	pfil_remove_hook(V_pf_ip6_out_hook);
6529 #endif
6530 
6531 	atomic_store_bool(&V_pf_pfil_hooked, false);
6532 }
6533 
6534 static void
6535 pf_load_vnet(void)
6536 {
6537 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6538 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6539 
6540 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6541 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6542 
6543 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6544 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6545 #ifdef ALTQ
6546 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6547 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6548 #endif
6549 
6550 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6551 
6552 	pfattach_vnet();
6553 	V_pf_vnet_active = 1;
6554 }
6555 
6556 static int
6557 pf_load(void)
6558 {
6559 	int error;
6560 
6561 	sx_init(&pf_end_lock, "pf end thread");
6562 
6563 	pf_mtag_initialize();
6564 
6565 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6566 	if (pf_dev == NULL)
6567 		return (ENOMEM);
6568 
6569 	pf_end_threads = 0;
6570 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6571 	if (error != 0)
6572 		return (error);
6573 
6574 	pfi_initialize();
6575 
6576 	return (0);
6577 }
6578 
6579 static void
6580 pf_unload_vnet(void)
6581 {
6582 	int ret __diagused;
6583 
6584 	V_pf_vnet_active = 0;
6585 	V_pf_status.running = 0;
6586 	dehook_pf();
6587 	dehook_pf_eth();
6588 
6589 	PF_RULES_WLOCK();
6590 	pf_syncookies_cleanup();
6591 	shutdown_pf();
6592 	PF_RULES_WUNLOCK();
6593 
6594 	/* Make sure we've cleaned up ethernet rules before we continue. */
6595 	NET_EPOCH_DRAIN_CALLBACKS();
6596 
6597 	ret = swi_remove(V_pf_swi_cookie);
6598 	MPASS(ret == 0);
6599 	ret = intr_event_destroy(V_pf_swi_ie);
6600 	MPASS(ret == 0);
6601 
6602 	pf_unload_vnet_purge();
6603 
6604 	pf_normalize_cleanup();
6605 	PF_RULES_WLOCK();
6606 	pfi_cleanup_vnet();
6607 	PF_RULES_WUNLOCK();
6608 	pfr_cleanup();
6609 	pf_osfp_flush();
6610 	pf_cleanup();
6611 	if (IS_DEFAULT_VNET(curvnet))
6612 		pf_mtag_cleanup();
6613 
6614 	pf_cleanup_tagset(&V_pf_tags);
6615 #ifdef ALTQ
6616 	pf_cleanup_tagset(&V_pf_qids);
6617 #endif
6618 	uma_zdestroy(V_pf_tag_z);
6619 
6620 #ifdef PF_WANT_32_TO_64_COUNTER
6621 	PF_RULES_WLOCK();
6622 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6623 
6624 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6625 	MPASS(V_pf_allkifcount == 0);
6626 
6627 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6628 	V_pf_allrulecount--;
6629 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6630 
6631 	/*
6632 	 * There are known pf rule leaks when running the test suite.
6633 	 */
6634 #ifdef notyet
6635 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6636 	MPASS(V_pf_allrulecount == 0);
6637 #endif
6638 
6639 	PF_RULES_WUNLOCK();
6640 
6641 	free(V_pf_kifmarker, PFI_MTYPE);
6642 	free(V_pf_rulemarker, M_PFRULE);
6643 #endif
6644 
6645 	/* Free counters last as we updated them during shutdown. */
6646 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6647 	for (int i = 0; i < 2; i++) {
6648 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6649 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6650 	}
6651 	counter_u64_free(V_pf_default_rule.states_cur);
6652 	counter_u64_free(V_pf_default_rule.states_tot);
6653 	counter_u64_free(V_pf_default_rule.src_nodes);
6654 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6655 
6656 	for (int i = 0; i < PFRES_MAX; i++)
6657 		counter_u64_free(V_pf_status.counters[i]);
6658 	for (int i = 0; i < KLCNT_MAX; i++)
6659 		counter_u64_free(V_pf_status.lcounters[i]);
6660 	for (int i = 0; i < FCNT_MAX; i++)
6661 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6662 	for (int i = 0; i < SCNT_MAX; i++)
6663 		counter_u64_free(V_pf_status.scounters[i]);
6664 
6665 	rm_destroy(&V_pf_rules_lock);
6666 	sx_destroy(&V_pf_ioctl_lock);
6667 }
6668 
6669 static void
6670 pf_unload(void)
6671 {
6672 
6673 	sx_xlock(&pf_end_lock);
6674 	pf_end_threads = 1;
6675 	while (pf_end_threads < 2) {
6676 		wakeup_one(pf_purge_thread);
6677 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6678 	}
6679 	sx_xunlock(&pf_end_lock);
6680 
6681 	pf_nl_unregister();
6682 
6683 	if (pf_dev != NULL)
6684 		destroy_dev(pf_dev);
6685 
6686 	pfi_cleanup();
6687 
6688 	sx_destroy(&pf_end_lock);
6689 }
6690 
6691 static void
6692 vnet_pf_init(void *unused __unused)
6693 {
6694 
6695 	pf_load_vnet();
6696 }
6697 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6698     vnet_pf_init, NULL);
6699 
6700 static void
6701 vnet_pf_uninit(const void *unused __unused)
6702 {
6703 
6704 	pf_unload_vnet();
6705 }
6706 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6707 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6708     vnet_pf_uninit, NULL);
6709 
6710 static int
6711 pf_modevent(module_t mod, int type, void *data)
6712 {
6713 	int error = 0;
6714 
6715 	switch(type) {
6716 	case MOD_LOAD:
6717 		error = pf_load();
6718 		pf_nl_register();
6719 		break;
6720 	case MOD_UNLOAD:
6721 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6722 		 * the vnet_pf_uninit()s */
6723 		break;
6724 	default:
6725 		error = EINVAL;
6726 		break;
6727 	}
6728 
6729 	return (error);
6730 }
6731 
6732 static moduledata_t pf_mod = {
6733 	"pf",
6734 	pf_modevent,
6735 	0
6736 };
6737 
6738 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6739 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6740 MODULE_VERSION(pf, PF_MODVER);
6741