xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 1fc0dac54cb444d6c22102d7bbc23545de459e0f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
133 			    struct pf_src_node *);
134 #ifdef ALTQ
135 static int		 pf_export_kaltq(struct pf_altq *,
136 			    struct pfioc_altq_v1 *, size_t);
137 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
138 			    struct pf_altq *, size_t);
139 #endif /* ALTQ */
140 
141 VNET_DEFINE(struct pf_krule,	pf_default_rule);
142 
143 static __inline int             pf_krule_compare(struct pf_krule *,
144 				    struct pf_krule *);
145 
146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
147 
148 #ifdef ALTQ
149 VNET_DEFINE_STATIC(int,		pf_altq_running);
150 #define	V_pf_altq_running	VNET(pf_altq_running)
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 struct pf_tagname {
155 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
156 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
157 	char			name[PF_TAG_NAME_SIZE];
158 	uint16_t		tag;
159 	int			ref;
160 };
161 
162 struct pf_tagset {
163 	TAILQ_HEAD(, pf_tagname)	*namehash;
164 	TAILQ_HEAD(, pf_tagname)	*taghash;
165 	unsigned int			 mask;
166 	uint32_t			 seed;
167 	BITSET_DEFINE(, TAGID_MAX)	 avail;
168 };
169 
170 VNET_DEFINE(struct pf_tagset, pf_tags);
171 #define	V_pf_tags	VNET(pf_tags)
172 static unsigned int	pf_rule_tag_hashsize;
173 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
175     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
176     "Size of pf(4) rule tag hashtable");
177 
178 #ifdef ALTQ
179 VNET_DEFINE(struct pf_tagset, pf_qids);
180 #define	V_pf_qids	VNET(pf_qids)
181 static unsigned int	pf_queue_tag_hashsize;
182 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
184     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
185     "Size of pf(4) queue tag hashtable");
186 #endif
187 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
188 #define	V_pf_tag_z		 VNET(pf_tag_z)
189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
222 
223 struct cdev *pf_dev;
224 
225 /*
226  * XXX - These are new and need to be checked when moveing to a new version
227  */
228 static void		 pf_clear_all_states(void);
229 static int		 pf_killstates_row(struct pf_kstate_kill *,
230 			    struct pf_idhash *);
231 static int		 pf_killstates_nv(struct pfioc_nv *);
232 static int		 pf_clearstates_nv(struct pfioc_nv *);
233 static int		 pf_getstate(struct pfioc_nv *);
234 static int		 pf_getstatus(struct pfioc_nv *);
235 static int		 pf_clear_tables(void);
236 static void		 pf_clear_srcnodes(struct pf_ksrc_node *);
237 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
238 static int		 pf_keepcounters(struct pfioc_nv *);
239 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
240 
241 /*
242  * Wrapper functions for pfil(9) hooks
243  */
244 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
245     int flags, void *ruleset __unused, struct inpcb *inp);
246 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 #ifdef INET
249 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
250     int flags, void *ruleset __unused, struct inpcb *inp);
251 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
252     int flags, void *ruleset __unused, struct inpcb *inp);
253 #endif
254 #ifdef INET6
255 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
256     int flags, void *ruleset __unused, struct inpcb *inp);
257 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
258     int flags, void *ruleset __unused, struct inpcb *inp);
259 #endif
260 
261 static void		hook_pf_eth(void);
262 static void		hook_pf(void);
263 static void		dehook_pf_eth(void);
264 static void		dehook_pf(void);
265 static int		shutdown_pf(void);
266 static int		pf_load(void);
267 static void		pf_unload(void);
268 
269 static struct cdevsw pf_cdevsw = {
270 	.d_ioctl =	pfioctl,
271 	.d_name =	PF_NAME,
272 	.d_version =	D_VERSION,
273 };
274 
275 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
276 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
277 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
278 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
279 
280 /*
281  * We need a flag that is neither hooked nor running to know when
282  * the VNET is "valid".  We primarily need this to control (global)
283  * external event, e.g., eventhandlers.
284  */
285 VNET_DEFINE(int, pf_vnet_active);
286 #define V_pf_vnet_active	VNET(pf_vnet_active)
287 
288 int pf_end_threads;
289 struct proc *pf_purge_proc;
290 
291 VNET_DEFINE(struct rmlock, pf_rules_lock);
292 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
293 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
294 struct sx			pf_end_lock;
295 
296 /* pfsync */
297 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
298 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
299 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
300 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
301 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
302 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
303 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
304 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
305 
306 /* pflog */
307 pflog_packet_t			*pflog_packet_ptr = NULL;
308 
309 /*
310  * Copy a user-provided string, returning an error if truncation would occur.
311  * Avoid scanning past "sz" bytes in the source string since there's no
312  * guarantee that it's nul-terminated.
313  */
314 static int
315 pf_user_strcpy(char *dst, const char *src, size_t sz)
316 {
317 	if (strnlen(src, sz) == sz)
318 		return (EINVAL);
319 	(void)strlcpy(dst, src, sz);
320 	return (0);
321 }
322 
323 static void
324 pfattach_vnet(void)
325 {
326 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
327 
328 	bzero(&V_pf_status, sizeof(V_pf_status));
329 
330 	pf_initialize();
331 	pfr_initialize();
332 	pfi_initialize_vnet();
333 	pf_normalize_init();
334 	pf_syncookies_init();
335 
336 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
337 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
338 
339 	RB_INIT(&V_pf_anchors);
340 	pf_init_kruleset(&pf_main_ruleset);
341 
342 	pf_init_keth(V_pf_keth);
343 
344 	/* default rule should never be garbage collected */
345 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
346 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
347 	V_pf_default_rule.nr = -1;
348 	V_pf_default_rule.rtableid = -1;
349 
350 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
351 	for (int i = 0; i < 2; i++) {
352 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
353 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
354 	}
355 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
356 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
357 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
358 
359 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
360 	    M_WAITOK | M_ZERO);
361 
362 #ifdef PF_WANT_32_TO_64_COUNTER
363 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
364 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
365 	PF_RULES_WLOCK();
366 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
367 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
368 	V_pf_allrulecount++;
369 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
370 	PF_RULES_WUNLOCK();
371 #endif
372 
373 	/* initialize default timeouts */
374 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
375 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
376 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
377 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
378 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
379 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
380 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
381 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
382 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
383 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
384 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
385 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
386 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
387 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
388 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
390 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
391 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
392 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
393 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
394 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
395 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
396 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
397 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
398 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
399 
400 	V_pf_status.debug = PF_DEBUG_URGENT;
401 	/*
402 	 * XXX This is different than in OpenBSD where reassembly is enabled by
403 	 * defult. In FreeBSD we expect people to still use scrub rules and
404 	 * switch to the new syntax later. Only when they switch they must
405 	 * explicitly enable reassemle. We could change the default once the
406 	 * scrub rule functionality is hopefully removed some day in future.
407 	 */
408 	V_pf_status.reass = 0;
409 
410 	V_pf_pfil_hooked = false;
411 	V_pf_pfil_eth_hooked = false;
412 
413 	/* XXX do our best to avoid a conflict */
414 	V_pf_status.hostid = arc4random();
415 
416 	for (int i = 0; i < PFRES_MAX; i++)
417 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
418 	for (int i = 0; i < KLCNT_MAX; i++)
419 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
420 	for (int i = 0; i < FCNT_MAX; i++)
421 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
422 	for (int i = 0; i < SCNT_MAX; i++)
423 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
424 
425 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
426 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
427 		/* XXXGL: leaked all above. */
428 		return;
429 }
430 
431 static struct pf_kpool *
432 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
433     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
434     u_int8_t check_ticket)
435 {
436 	struct pf_kruleset	*ruleset;
437 	struct pf_krule		*rule;
438 	int			 rs_num;
439 
440 	ruleset = pf_find_kruleset(anchor);
441 	if (ruleset == NULL)
442 		return (NULL);
443 	rs_num = pf_get_ruleset_number(rule_action);
444 	if (rs_num >= PF_RULESET_MAX)
445 		return (NULL);
446 	if (active) {
447 		if (check_ticket && ticket !=
448 		    ruleset->rules[rs_num].active.ticket)
449 			return (NULL);
450 		if (r_last)
451 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
452 			    pf_krulequeue);
453 		else
454 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
455 	} else {
456 		if (check_ticket && ticket !=
457 		    ruleset->rules[rs_num].inactive.ticket)
458 			return (NULL);
459 		if (r_last)
460 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
461 			    pf_krulequeue);
462 		else
463 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
464 	}
465 	if (!r_last) {
466 		while ((rule != NULL) && (rule->nr != rule_number))
467 			rule = TAILQ_NEXT(rule, entries);
468 	}
469 	if (rule == NULL)
470 		return (NULL);
471 
472 	return (&rule->rpool);
473 }
474 
475 static void
476 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
477 {
478 	struct pf_kpooladdr	*mv_pool_pa;
479 
480 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
481 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
482 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
483 	}
484 }
485 
486 static void
487 pf_empty_kpool(struct pf_kpalist *poola)
488 {
489 	struct pf_kpooladdr *pa;
490 
491 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
492 		switch (pa->addr.type) {
493 		case PF_ADDR_DYNIFTL:
494 			pfi_dynaddr_remove(pa->addr.p.dyn);
495 			break;
496 		case PF_ADDR_TABLE:
497 			/* XXX: this could be unfinished pooladdr on pabuf */
498 			if (pa->addr.p.tbl != NULL)
499 				pfr_detach_table(pa->addr.p.tbl);
500 			break;
501 		}
502 		if (pa->kif)
503 			pfi_kkif_unref(pa->kif);
504 		TAILQ_REMOVE(poola, pa, entries);
505 		free(pa, M_PFRULE);
506 	}
507 }
508 
509 static void
510 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
511 {
512 
513 	PF_RULES_WASSERT();
514 	PF_UNLNKDRULES_ASSERT();
515 
516 	TAILQ_REMOVE(rulequeue, rule, entries);
517 
518 	rule->rule_ref |= PFRULE_REFS;
519 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
520 }
521 
522 static void
523 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
524 {
525 
526 	PF_RULES_WASSERT();
527 
528 	PF_UNLNKDRULES_LOCK();
529 	pf_unlink_rule_locked(rulequeue, rule);
530 	PF_UNLNKDRULES_UNLOCK();
531 }
532 
533 static void
534 pf_free_eth_rule(struct pf_keth_rule *rule)
535 {
536 	PF_RULES_WASSERT();
537 
538 	if (rule == NULL)
539 		return;
540 
541 	if (rule->tag)
542 		tag_unref(&V_pf_tags, rule->tag);
543 	if (rule->match_tag)
544 		tag_unref(&V_pf_tags, rule->match_tag);
545 #ifdef ALTQ
546 	pf_qid_unref(rule->qid);
547 #endif
548 
549 	if (rule->bridge_to)
550 		pfi_kkif_unref(rule->bridge_to);
551 	if (rule->kif)
552 		pfi_kkif_unref(rule->kif);
553 
554 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
555 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
556 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
557 		pfr_detach_table(rule->ipdst.addr.p.tbl);
558 
559 	counter_u64_free(rule->evaluations);
560 	for (int i = 0; i < 2; i++) {
561 		counter_u64_free(rule->packets[i]);
562 		counter_u64_free(rule->bytes[i]);
563 	}
564 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
565 	pf_keth_anchor_remove(rule);
566 
567 	free(rule, M_PFRULE);
568 }
569 
570 void
571 pf_free_rule(struct pf_krule *rule)
572 {
573 
574 	PF_RULES_WASSERT();
575 	PF_CONFIG_ASSERT();
576 
577 	if (rule->tag)
578 		tag_unref(&V_pf_tags, rule->tag);
579 	if (rule->match_tag)
580 		tag_unref(&V_pf_tags, rule->match_tag);
581 #ifdef ALTQ
582 	if (rule->pqid != rule->qid)
583 		pf_qid_unref(rule->pqid);
584 	pf_qid_unref(rule->qid);
585 #endif
586 	switch (rule->src.addr.type) {
587 	case PF_ADDR_DYNIFTL:
588 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
589 		break;
590 	case PF_ADDR_TABLE:
591 		pfr_detach_table(rule->src.addr.p.tbl);
592 		break;
593 	}
594 	switch (rule->dst.addr.type) {
595 	case PF_ADDR_DYNIFTL:
596 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
597 		break;
598 	case PF_ADDR_TABLE:
599 		pfr_detach_table(rule->dst.addr.p.tbl);
600 		break;
601 	}
602 	if (rule->overload_tbl)
603 		pfr_detach_table(rule->overload_tbl);
604 	if (rule->kif)
605 		pfi_kkif_unref(rule->kif);
606 	pf_kanchor_remove(rule);
607 	pf_empty_kpool(&rule->rpool.list);
608 
609 	pf_krule_free(rule);
610 }
611 
612 static void
613 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
614     unsigned int default_size)
615 {
616 	unsigned int i;
617 	unsigned int hashsize;
618 
619 	if (*tunable_size == 0 || !powerof2(*tunable_size))
620 		*tunable_size = default_size;
621 
622 	hashsize = *tunable_size;
623 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
624 	    M_WAITOK);
625 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
626 	    M_WAITOK);
627 	ts->mask = hashsize - 1;
628 	ts->seed = arc4random();
629 	for (i = 0; i < hashsize; i++) {
630 		TAILQ_INIT(&ts->namehash[i]);
631 		TAILQ_INIT(&ts->taghash[i]);
632 	}
633 	BIT_FILL(TAGID_MAX, &ts->avail);
634 }
635 
636 static void
637 pf_cleanup_tagset(struct pf_tagset *ts)
638 {
639 	unsigned int i;
640 	unsigned int hashsize;
641 	struct pf_tagname *t, *tmp;
642 
643 	/*
644 	 * Only need to clean up one of the hashes as each tag is hashed
645 	 * into each table.
646 	 */
647 	hashsize = ts->mask + 1;
648 	for (i = 0; i < hashsize; i++)
649 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
650 			uma_zfree(V_pf_tag_z, t);
651 
652 	free(ts->namehash, M_PFHASH);
653 	free(ts->taghash, M_PFHASH);
654 }
655 
656 static uint16_t
657 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
658 {
659 	size_t len;
660 
661 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
662 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
663 }
664 
665 static uint16_t
666 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
667 {
668 
669 	return (tag & ts->mask);
670 }
671 
672 static u_int16_t
673 tagname2tag(struct pf_tagset *ts, const char *tagname)
674 {
675 	struct pf_tagname	*tag;
676 	u_int32_t		 index;
677 	u_int16_t		 new_tagid;
678 
679 	PF_RULES_WASSERT();
680 
681 	index = tagname2hashindex(ts, tagname);
682 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
683 		if (strcmp(tagname, tag->name) == 0) {
684 			tag->ref++;
685 			return (tag->tag);
686 		}
687 
688 	/*
689 	 * new entry
690 	 *
691 	 * to avoid fragmentation, we do a linear search from the beginning
692 	 * and take the first free slot we find.
693 	 */
694 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
695 	/*
696 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
697 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
698 	 * set.  It may also return a bit number greater than TAGID_MAX due
699 	 * to rounding of the number of bits in the vector up to a multiple
700 	 * of the vector word size at declaration/allocation time.
701 	 */
702 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
703 		return (0);
704 
705 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
706 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
707 
708 	/* allocate and fill new struct pf_tagname */
709 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
710 	if (tag == NULL)
711 		return (0);
712 	strlcpy(tag->name, tagname, sizeof(tag->name));
713 	tag->tag = new_tagid;
714 	tag->ref = 1;
715 
716 	/* Insert into namehash */
717 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
718 
719 	/* Insert into taghash */
720 	index = tag2hashindex(ts, new_tagid);
721 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
722 
723 	return (tag->tag);
724 }
725 
726 static void
727 tag_unref(struct pf_tagset *ts, u_int16_t tag)
728 {
729 	struct pf_tagname	*t;
730 	uint16_t		 index;
731 
732 	PF_RULES_WASSERT();
733 
734 	index = tag2hashindex(ts, tag);
735 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
736 		if (tag == t->tag) {
737 			if (--t->ref == 0) {
738 				TAILQ_REMOVE(&ts->taghash[index], t,
739 				    taghash_entries);
740 				index = tagname2hashindex(ts, t->name);
741 				TAILQ_REMOVE(&ts->namehash[index], t,
742 				    namehash_entries);
743 				/* Bits are 0-based for BIT_SET() */
744 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
745 				uma_zfree(V_pf_tag_z, t);
746 			}
747 			break;
748 		}
749 }
750 
751 static uint16_t
752 pf_tagname2tag(const char *tagname)
753 {
754 	return (tagname2tag(&V_pf_tags, tagname));
755 }
756 
757 static int
758 pf_begin_eth(uint32_t *ticket, const char *anchor)
759 {
760 	struct pf_keth_rule *rule, *tmp;
761 	struct pf_keth_ruleset *rs;
762 
763 	PF_RULES_WASSERT();
764 
765 	rs = pf_find_or_create_keth_ruleset(anchor);
766 	if (rs == NULL)
767 		return (EINVAL);
768 
769 	/* Purge old inactive rules. */
770 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
771 	    tmp) {
772 		TAILQ_REMOVE(rs->inactive.rules, rule,
773 		    entries);
774 		pf_free_eth_rule(rule);
775 	}
776 
777 	*ticket = ++rs->inactive.ticket;
778 	rs->inactive.open = 1;
779 
780 	return (0);
781 }
782 
783 static void
784 pf_rollback_eth_cb(struct epoch_context *ctx)
785 {
786 	struct pf_keth_ruleset *rs;
787 
788 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
789 
790 	CURVNET_SET(rs->vnet);
791 
792 	PF_RULES_WLOCK();
793 	pf_rollback_eth(rs->inactive.ticket,
794 	    rs->anchor ? rs->anchor->path : "");
795 	PF_RULES_WUNLOCK();
796 
797 	CURVNET_RESTORE();
798 }
799 
800 static int
801 pf_rollback_eth(uint32_t ticket, const char *anchor)
802 {
803 	struct pf_keth_rule *rule, *tmp;
804 	struct pf_keth_ruleset *rs;
805 
806 	PF_RULES_WASSERT();
807 
808 	rs = pf_find_keth_ruleset(anchor);
809 	if (rs == NULL)
810 		return (EINVAL);
811 
812 	if (!rs->inactive.open ||
813 	    ticket != rs->inactive.ticket)
814 		return (0);
815 
816 	/* Purge old inactive rules. */
817 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
818 	    tmp) {
819 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
820 		pf_free_eth_rule(rule);
821 	}
822 
823 	rs->inactive.open = 0;
824 
825 	pf_remove_if_empty_keth_ruleset(rs);
826 
827 	return (0);
828 }
829 
830 #define	PF_SET_SKIP_STEPS(i)					\
831 	do {							\
832 		while (head[i] != cur) {			\
833 			head[i]->skip[i].ptr = cur;		\
834 			head[i] = TAILQ_NEXT(head[i], entries);	\
835 		}						\
836 	} while (0)
837 
838 static void
839 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
840 {
841 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
842 	int i;
843 
844 	cur = TAILQ_FIRST(rules);
845 	prev = cur;
846 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
847 		head[i] = cur;
848 	while (cur != NULL) {
849 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
850 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
851 		if (cur->direction != prev->direction)
852 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
853 		if (cur->proto != prev->proto)
854 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
855 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
856 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
857 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
858 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
859 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
860 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
861 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
862 		if (cur->ipdst.neg != prev->ipdst.neg ||
863 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
864 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
865 
866 		prev = cur;
867 		cur = TAILQ_NEXT(cur, entries);
868 	}
869 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
870 		PF_SET_SKIP_STEPS(i);
871 }
872 
873 static int
874 pf_commit_eth(uint32_t ticket, const char *anchor)
875 {
876 	struct pf_keth_ruleq *rules;
877 	struct pf_keth_ruleset *rs;
878 
879 	rs = pf_find_keth_ruleset(anchor);
880 	if (rs == NULL) {
881 		return (EINVAL);
882 	}
883 
884 	if (!rs->inactive.open ||
885 	    ticket != rs->inactive.ticket)
886 		return (EBUSY);
887 
888 	PF_RULES_WASSERT();
889 
890 	pf_eth_calc_skip_steps(rs->inactive.rules);
891 
892 	rules = rs->active.rules;
893 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
894 	rs->inactive.rules = rules;
895 	rs->inactive.ticket = rs->active.ticket;
896 
897 	/* Clean up inactive rules (i.e. previously active rules), only when
898 	 * we're sure they're no longer used. */
899 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
900 
901 	return (0);
902 }
903 
904 #ifdef ALTQ
905 static uint16_t
906 pf_qname2qid(const char *qname)
907 {
908 	return (tagname2tag(&V_pf_qids, qname));
909 }
910 
911 static void
912 pf_qid_unref(uint16_t qid)
913 {
914 	tag_unref(&V_pf_qids, qid);
915 }
916 
917 static int
918 pf_begin_altq(u_int32_t *ticket)
919 {
920 	struct pf_altq	*altq, *tmp;
921 	int		 error = 0;
922 
923 	PF_RULES_WASSERT();
924 
925 	/* Purge the old altq lists */
926 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
927 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
928 			/* detach and destroy the discipline */
929 			error = altq_remove(altq);
930 		}
931 		free(altq, M_PFALTQ);
932 	}
933 	TAILQ_INIT(V_pf_altq_ifs_inactive);
934 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
935 		pf_qid_unref(altq->qid);
936 		free(altq, M_PFALTQ);
937 	}
938 	TAILQ_INIT(V_pf_altqs_inactive);
939 	if (error)
940 		return (error);
941 	*ticket = ++V_ticket_altqs_inactive;
942 	V_altqs_inactive_open = 1;
943 	return (0);
944 }
945 
946 static int
947 pf_rollback_altq(u_int32_t ticket)
948 {
949 	struct pf_altq	*altq, *tmp;
950 	int		 error = 0;
951 
952 	PF_RULES_WASSERT();
953 
954 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
955 		return (0);
956 	/* Purge the old altq lists */
957 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
958 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
959 			/* detach and destroy the discipline */
960 			error = altq_remove(altq);
961 		}
962 		free(altq, M_PFALTQ);
963 	}
964 	TAILQ_INIT(V_pf_altq_ifs_inactive);
965 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
966 		pf_qid_unref(altq->qid);
967 		free(altq, M_PFALTQ);
968 	}
969 	TAILQ_INIT(V_pf_altqs_inactive);
970 	V_altqs_inactive_open = 0;
971 	return (error);
972 }
973 
974 static int
975 pf_commit_altq(u_int32_t ticket)
976 {
977 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
978 	struct pf_altq		*altq, *tmp;
979 	int			 err, error = 0;
980 
981 	PF_RULES_WASSERT();
982 
983 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
984 		return (EBUSY);
985 
986 	/* swap altqs, keep the old. */
987 	old_altqs = V_pf_altqs_active;
988 	old_altq_ifs = V_pf_altq_ifs_active;
989 	V_pf_altqs_active = V_pf_altqs_inactive;
990 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
991 	V_pf_altqs_inactive = old_altqs;
992 	V_pf_altq_ifs_inactive = old_altq_ifs;
993 	V_ticket_altqs_active = V_ticket_altqs_inactive;
994 
995 	/* Attach new disciplines */
996 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
997 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
998 			/* attach the discipline */
999 			error = altq_pfattach(altq);
1000 			if (error == 0 && V_pf_altq_running)
1001 				error = pf_enable_altq(altq);
1002 			if (error != 0)
1003 				return (error);
1004 		}
1005 	}
1006 
1007 	/* Purge the old altq lists */
1008 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1009 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1010 			/* detach and destroy the discipline */
1011 			if (V_pf_altq_running)
1012 				error = pf_disable_altq(altq);
1013 			err = altq_pfdetach(altq);
1014 			if (err != 0 && error == 0)
1015 				error = err;
1016 			err = altq_remove(altq);
1017 			if (err != 0 && error == 0)
1018 				error = err;
1019 		}
1020 		free(altq, M_PFALTQ);
1021 	}
1022 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1023 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1024 		pf_qid_unref(altq->qid);
1025 		free(altq, M_PFALTQ);
1026 	}
1027 	TAILQ_INIT(V_pf_altqs_inactive);
1028 
1029 	V_altqs_inactive_open = 0;
1030 	return (error);
1031 }
1032 
1033 static int
1034 pf_enable_altq(struct pf_altq *altq)
1035 {
1036 	struct ifnet		*ifp;
1037 	struct tb_profile	 tb;
1038 	int			 error = 0;
1039 
1040 	if ((ifp = ifunit(altq->ifname)) == NULL)
1041 		return (EINVAL);
1042 
1043 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1044 		error = altq_enable(&ifp->if_snd);
1045 
1046 	/* set tokenbucket regulator */
1047 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1048 		tb.rate = altq->ifbandwidth;
1049 		tb.depth = altq->tbrsize;
1050 		error = tbr_set(&ifp->if_snd, &tb);
1051 	}
1052 
1053 	return (error);
1054 }
1055 
1056 static int
1057 pf_disable_altq(struct pf_altq *altq)
1058 {
1059 	struct ifnet		*ifp;
1060 	struct tb_profile	 tb;
1061 	int			 error;
1062 
1063 	if ((ifp = ifunit(altq->ifname)) == NULL)
1064 		return (EINVAL);
1065 
1066 	/*
1067 	 * when the discipline is no longer referenced, it was overridden
1068 	 * by a new one.  if so, just return.
1069 	 */
1070 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1071 		return (0);
1072 
1073 	error = altq_disable(&ifp->if_snd);
1074 
1075 	if (error == 0) {
1076 		/* clear tokenbucket regulator */
1077 		tb.rate = 0;
1078 		error = tbr_set(&ifp->if_snd, &tb);
1079 	}
1080 
1081 	return (error);
1082 }
1083 
1084 static int
1085 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1086     struct pf_altq *altq)
1087 {
1088 	struct ifnet	*ifp1;
1089 	int		 error = 0;
1090 
1091 	/* Deactivate the interface in question */
1092 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1093 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1094 	    (remove && ifp1 == ifp)) {
1095 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1096 	} else {
1097 		error = altq_add(ifp1, altq);
1098 
1099 		if (ticket != V_ticket_altqs_inactive)
1100 			error = EBUSY;
1101 
1102 		if (error)
1103 			free(altq, M_PFALTQ);
1104 	}
1105 
1106 	return (error);
1107 }
1108 
1109 void
1110 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1111 {
1112 	struct pf_altq	*a1, *a2, *a3;
1113 	u_int32_t	 ticket;
1114 	int		 error = 0;
1115 
1116 	/*
1117 	 * No need to re-evaluate the configuration for events on interfaces
1118 	 * that do not support ALTQ, as it's not possible for such
1119 	 * interfaces to be part of the configuration.
1120 	 */
1121 	if (!ALTQ_IS_READY(&ifp->if_snd))
1122 		return;
1123 
1124 	/* Interrupt userland queue modifications */
1125 	if (V_altqs_inactive_open)
1126 		pf_rollback_altq(V_ticket_altqs_inactive);
1127 
1128 	/* Start new altq ruleset */
1129 	if (pf_begin_altq(&ticket))
1130 		return;
1131 
1132 	/* Copy the current active set */
1133 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1134 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1135 		if (a2 == NULL) {
1136 			error = ENOMEM;
1137 			break;
1138 		}
1139 		bcopy(a1, a2, sizeof(struct pf_altq));
1140 
1141 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1142 		if (error)
1143 			break;
1144 
1145 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1146 	}
1147 	if (error)
1148 		goto out;
1149 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1150 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1151 		if (a2 == NULL) {
1152 			error = ENOMEM;
1153 			break;
1154 		}
1155 		bcopy(a1, a2, sizeof(struct pf_altq));
1156 
1157 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1158 			error = EBUSY;
1159 			free(a2, M_PFALTQ);
1160 			break;
1161 		}
1162 		a2->altq_disc = NULL;
1163 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1164 			if (strncmp(a3->ifname, a2->ifname,
1165 				IFNAMSIZ) == 0) {
1166 				a2->altq_disc = a3->altq_disc;
1167 				break;
1168 			}
1169 		}
1170 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1171 		if (error)
1172 			break;
1173 
1174 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1175 	}
1176 
1177 out:
1178 	if (error != 0)
1179 		pf_rollback_altq(ticket);
1180 	else
1181 		pf_commit_altq(ticket);
1182 }
1183 #endif /* ALTQ */
1184 
1185 static struct pf_krule_global *
1186 pf_rule_tree_alloc(int flags)
1187 {
1188 	struct pf_krule_global *tree;
1189 
1190 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1191 	if (tree == NULL)
1192 		return (NULL);
1193 	RB_INIT(tree);
1194 	return (tree);
1195 }
1196 
1197 static void
1198 pf_rule_tree_free(struct pf_krule_global *tree)
1199 {
1200 
1201 	free(tree, M_TEMP);
1202 }
1203 
1204 static int
1205 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1206 {
1207 	struct pf_krule_global *tree;
1208 	struct pf_kruleset	*rs;
1209 	struct pf_krule		*rule;
1210 
1211 	PF_RULES_WASSERT();
1212 
1213 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1214 		return (EINVAL);
1215 	tree = pf_rule_tree_alloc(M_NOWAIT);
1216 	if (tree == NULL)
1217 		return (ENOMEM);
1218 	rs = pf_find_or_create_kruleset(anchor);
1219 	if (rs == NULL) {
1220 		free(tree, M_TEMP);
1221 		return (EINVAL);
1222 	}
1223 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1224 	rs->rules[rs_num].inactive.tree = tree;
1225 
1226 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1227 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1228 		rs->rules[rs_num].inactive.rcount--;
1229 	}
1230 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1231 	rs->rules[rs_num].inactive.open = 1;
1232 	return (0);
1233 }
1234 
1235 static int
1236 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1237 {
1238 	struct pf_kruleset	*rs;
1239 	struct pf_krule		*rule;
1240 
1241 	PF_RULES_WASSERT();
1242 
1243 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1244 		return (EINVAL);
1245 	rs = pf_find_kruleset(anchor);
1246 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1247 	    rs->rules[rs_num].inactive.ticket != ticket)
1248 		return (0);
1249 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1250 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1251 		rs->rules[rs_num].inactive.rcount--;
1252 	}
1253 	rs->rules[rs_num].inactive.open = 0;
1254 	return (0);
1255 }
1256 
1257 #define PF_MD5_UPD(st, elm)						\
1258 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1259 
1260 #define PF_MD5_UPD_STR(st, elm)						\
1261 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1262 
1263 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1264 		(stor) = htonl((st)->elm);				\
1265 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1266 } while (0)
1267 
1268 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1269 		(stor) = htons((st)->elm);				\
1270 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1271 } while (0)
1272 
1273 static void
1274 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1275 {
1276 	PF_MD5_UPD(pfr, addr.type);
1277 	switch (pfr->addr.type) {
1278 		case PF_ADDR_DYNIFTL:
1279 			PF_MD5_UPD(pfr, addr.v.ifname);
1280 			PF_MD5_UPD(pfr, addr.iflags);
1281 			break;
1282 		case PF_ADDR_TABLE:
1283 			PF_MD5_UPD(pfr, addr.v.tblname);
1284 			break;
1285 		case PF_ADDR_ADDRMASK:
1286 			/* XXX ignore af? */
1287 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1288 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1289 			break;
1290 	}
1291 
1292 	PF_MD5_UPD(pfr, port[0]);
1293 	PF_MD5_UPD(pfr, port[1]);
1294 	PF_MD5_UPD(pfr, neg);
1295 	PF_MD5_UPD(pfr, port_op);
1296 }
1297 
1298 static void
1299 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1300 {
1301 	u_int16_t x;
1302 	u_int32_t y;
1303 
1304 	pf_hash_rule_addr(ctx, &rule->src);
1305 	pf_hash_rule_addr(ctx, &rule->dst);
1306 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1307 		PF_MD5_UPD_STR(rule, label[i]);
1308 	PF_MD5_UPD_STR(rule, ifname);
1309 	PF_MD5_UPD_STR(rule, match_tagname);
1310 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1311 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1312 	PF_MD5_UPD_HTONL(rule, prob, y);
1313 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1314 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1315 	PF_MD5_UPD(rule, uid.op);
1316 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1317 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1318 	PF_MD5_UPD(rule, gid.op);
1319 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1320 	PF_MD5_UPD(rule, action);
1321 	PF_MD5_UPD(rule, direction);
1322 	PF_MD5_UPD(rule, af);
1323 	PF_MD5_UPD(rule, quick);
1324 	PF_MD5_UPD(rule, ifnot);
1325 	PF_MD5_UPD(rule, match_tag_not);
1326 	PF_MD5_UPD(rule, natpass);
1327 	PF_MD5_UPD(rule, keep_state);
1328 	PF_MD5_UPD(rule, proto);
1329 	PF_MD5_UPD(rule, type);
1330 	PF_MD5_UPD(rule, code);
1331 	PF_MD5_UPD(rule, flags);
1332 	PF_MD5_UPD(rule, flagset);
1333 	PF_MD5_UPD(rule, allow_opts);
1334 	PF_MD5_UPD(rule, rt);
1335 	PF_MD5_UPD(rule, tos);
1336 	PF_MD5_UPD(rule, scrub_flags);
1337 	PF_MD5_UPD(rule, min_ttl);
1338 	PF_MD5_UPD(rule, set_tos);
1339 	if (rule->anchor != NULL)
1340 		PF_MD5_UPD_STR(rule, anchor->path);
1341 }
1342 
1343 static void
1344 pf_hash_rule(struct pf_krule *rule)
1345 {
1346 	MD5_CTX		ctx;
1347 
1348 	MD5Init(&ctx);
1349 	pf_hash_rule_rolling(&ctx, rule);
1350 	MD5Final(rule->md5sum, &ctx);
1351 }
1352 
1353 static int
1354 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1355 {
1356 
1357 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1358 }
1359 
1360 static int
1361 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1362 {
1363 	struct pf_kruleset	*rs;
1364 	struct pf_krule		*rule, **old_array, *old_rule;
1365 	struct pf_krulequeue	*old_rules;
1366 	struct pf_krule_global  *old_tree;
1367 	int			 error;
1368 	u_int32_t		 old_rcount;
1369 
1370 	PF_RULES_WASSERT();
1371 
1372 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1373 		return (EINVAL);
1374 	rs = pf_find_kruleset(anchor);
1375 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1376 	    ticket != rs->rules[rs_num].inactive.ticket)
1377 		return (EBUSY);
1378 
1379 	/* Calculate checksum for the main ruleset */
1380 	if (rs == &pf_main_ruleset) {
1381 		error = pf_setup_pfsync_matching(rs);
1382 		if (error != 0)
1383 			return (error);
1384 	}
1385 
1386 	/* Swap rules, keep the old. */
1387 	old_rules = rs->rules[rs_num].active.ptr;
1388 	old_rcount = rs->rules[rs_num].active.rcount;
1389 	old_array = rs->rules[rs_num].active.ptr_array;
1390 	old_tree = rs->rules[rs_num].active.tree;
1391 
1392 	rs->rules[rs_num].active.ptr =
1393 	    rs->rules[rs_num].inactive.ptr;
1394 	rs->rules[rs_num].active.ptr_array =
1395 	    rs->rules[rs_num].inactive.ptr_array;
1396 	rs->rules[rs_num].active.tree =
1397 	    rs->rules[rs_num].inactive.tree;
1398 	rs->rules[rs_num].active.rcount =
1399 	    rs->rules[rs_num].inactive.rcount;
1400 
1401 	/* Attempt to preserve counter information. */
1402 	if (V_pf_status.keep_counters && old_tree != NULL) {
1403 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1404 		    entries) {
1405 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1406 			if (old_rule == NULL) {
1407 				continue;
1408 			}
1409 			pf_counter_u64_critical_enter();
1410 			pf_counter_u64_rollup_protected(&rule->evaluations,
1411 			    pf_counter_u64_fetch(&old_rule->evaluations));
1412 			pf_counter_u64_rollup_protected(&rule->packets[0],
1413 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1414 			pf_counter_u64_rollup_protected(&rule->packets[1],
1415 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1416 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1417 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1418 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1419 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1420 			pf_counter_u64_critical_exit();
1421 		}
1422 	}
1423 
1424 	rs->rules[rs_num].inactive.ptr = old_rules;
1425 	rs->rules[rs_num].inactive.ptr_array = old_array;
1426 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1427 	rs->rules[rs_num].inactive.rcount = old_rcount;
1428 
1429 	rs->rules[rs_num].active.ticket =
1430 	    rs->rules[rs_num].inactive.ticket;
1431 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1432 
1433 	/* Purge the old rule list. */
1434 	PF_UNLNKDRULES_LOCK();
1435 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1436 		pf_unlink_rule_locked(old_rules, rule);
1437 	PF_UNLNKDRULES_UNLOCK();
1438 	if (rs->rules[rs_num].inactive.ptr_array)
1439 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1440 	rs->rules[rs_num].inactive.ptr_array = NULL;
1441 	rs->rules[rs_num].inactive.rcount = 0;
1442 	rs->rules[rs_num].inactive.open = 0;
1443 	pf_remove_if_empty_kruleset(rs);
1444 	free(old_tree, M_TEMP);
1445 
1446 	return (0);
1447 }
1448 
1449 static int
1450 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1451 {
1452 	MD5_CTX			 ctx;
1453 	struct pf_krule		*rule;
1454 	int			 rs_cnt;
1455 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1456 
1457 	MD5Init(&ctx);
1458 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1459 		/* XXX PF_RULESET_SCRUB as well? */
1460 		if (rs_cnt == PF_RULESET_SCRUB)
1461 			continue;
1462 
1463 		if (rs->rules[rs_cnt].inactive.ptr_array)
1464 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1465 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1466 
1467 		if (rs->rules[rs_cnt].inactive.rcount) {
1468 			rs->rules[rs_cnt].inactive.ptr_array =
1469 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1470 			    sizeof(struct pf_rule **),
1471 			    M_TEMP, M_NOWAIT);
1472 
1473 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1474 				return (ENOMEM);
1475 		}
1476 
1477 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1478 		    entries) {
1479 			pf_hash_rule_rolling(&ctx, rule);
1480 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1481 		}
1482 	}
1483 
1484 	MD5Final(digest, &ctx);
1485 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1486 	return (0);
1487 }
1488 
1489 static int
1490 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1491 {
1492 	int error = 0;
1493 
1494 	switch (addr->type) {
1495 	case PF_ADDR_TABLE:
1496 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1497 		if (addr->p.tbl == NULL)
1498 			error = ENOMEM;
1499 		break;
1500 	default:
1501 		error = EINVAL;
1502 	}
1503 
1504 	return (error);
1505 }
1506 
1507 static int
1508 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1509     sa_family_t af)
1510 {
1511 	int error = 0;
1512 
1513 	switch (addr->type) {
1514 	case PF_ADDR_TABLE:
1515 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1516 		if (addr->p.tbl == NULL)
1517 			error = ENOMEM;
1518 		break;
1519 	case PF_ADDR_DYNIFTL:
1520 		error = pfi_dynaddr_setup(addr, af);
1521 		break;
1522 	}
1523 
1524 	return (error);
1525 }
1526 
1527 void
1528 pf_addr_copyout(struct pf_addr_wrap *addr)
1529 {
1530 
1531 	switch (addr->type) {
1532 	case PF_ADDR_DYNIFTL:
1533 		pfi_dynaddr_copyout(addr);
1534 		break;
1535 	case PF_ADDR_TABLE:
1536 		pf_tbladdr_copyout(addr);
1537 		break;
1538 	}
1539 }
1540 
1541 static void
1542 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1543 {
1544 	int	secs = time_uptime, diff;
1545 
1546 	bzero(out, sizeof(struct pf_src_node));
1547 
1548 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1549 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1550 
1551 	if (in->rule.ptr != NULL)
1552 		out->rule.nr = in->rule.ptr->nr;
1553 
1554 	for (int i = 0; i < 2; i++) {
1555 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1556 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1557 	}
1558 
1559 	out->states = in->states;
1560 	out->conn = in->conn;
1561 	out->af = in->af;
1562 	out->ruletype = in->ruletype;
1563 
1564 	out->creation = secs - in->creation;
1565 	if (out->expire > secs)
1566 		out->expire -= secs;
1567 	else
1568 		out->expire = 0;
1569 
1570 	/* Adjust the connection rate estimate. */
1571 	diff = secs - in->conn_rate.last;
1572 	if (diff >= in->conn_rate.seconds)
1573 		out->conn_rate.count = 0;
1574 	else
1575 		out->conn_rate.count -=
1576 		    in->conn_rate.count * diff /
1577 		    in->conn_rate.seconds;
1578 }
1579 
1580 #ifdef ALTQ
1581 /*
1582  * Handle export of struct pf_kaltq to user binaries that may be using any
1583  * version of struct pf_altq.
1584  */
1585 static int
1586 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1587 {
1588 	u_int32_t version;
1589 
1590 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1591 		version = 0;
1592 	else
1593 		version = pa->version;
1594 
1595 	if (version > PFIOC_ALTQ_VERSION)
1596 		return (EINVAL);
1597 
1598 #define ASSIGN(x) exported_q->x = q->x
1599 #define COPY(x) \
1600 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1601 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1602 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1603 
1604 	switch (version) {
1605 	case 0: {
1606 		struct pf_altq_v0 *exported_q =
1607 		    &((struct pfioc_altq_v0 *)pa)->altq;
1608 
1609 		COPY(ifname);
1610 
1611 		ASSIGN(scheduler);
1612 		ASSIGN(tbrsize);
1613 		exported_q->tbrsize = SATU16(q->tbrsize);
1614 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1615 
1616 		COPY(qname);
1617 		COPY(parent);
1618 		ASSIGN(parent_qid);
1619 		exported_q->bandwidth = SATU32(q->bandwidth);
1620 		ASSIGN(priority);
1621 		ASSIGN(local_flags);
1622 
1623 		ASSIGN(qlimit);
1624 		ASSIGN(flags);
1625 
1626 		if (q->scheduler == ALTQT_HFSC) {
1627 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1628 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1629 			    SATU32(q->pq_u.hfsc_opts.x)
1630 
1631 			ASSIGN_OPT_SATU32(rtsc_m1);
1632 			ASSIGN_OPT(rtsc_d);
1633 			ASSIGN_OPT_SATU32(rtsc_m2);
1634 
1635 			ASSIGN_OPT_SATU32(lssc_m1);
1636 			ASSIGN_OPT(lssc_d);
1637 			ASSIGN_OPT_SATU32(lssc_m2);
1638 
1639 			ASSIGN_OPT_SATU32(ulsc_m1);
1640 			ASSIGN_OPT(ulsc_d);
1641 			ASSIGN_OPT_SATU32(ulsc_m2);
1642 
1643 			ASSIGN_OPT(flags);
1644 
1645 #undef ASSIGN_OPT
1646 #undef ASSIGN_OPT_SATU32
1647 		} else
1648 			COPY(pq_u);
1649 
1650 		ASSIGN(qid);
1651 		break;
1652 	}
1653 	case 1:	{
1654 		struct pf_altq_v1 *exported_q =
1655 		    &((struct pfioc_altq_v1 *)pa)->altq;
1656 
1657 		COPY(ifname);
1658 
1659 		ASSIGN(scheduler);
1660 		ASSIGN(tbrsize);
1661 		ASSIGN(ifbandwidth);
1662 
1663 		COPY(qname);
1664 		COPY(parent);
1665 		ASSIGN(parent_qid);
1666 		ASSIGN(bandwidth);
1667 		ASSIGN(priority);
1668 		ASSIGN(local_flags);
1669 
1670 		ASSIGN(qlimit);
1671 		ASSIGN(flags);
1672 		COPY(pq_u);
1673 
1674 		ASSIGN(qid);
1675 		break;
1676 	}
1677 	default:
1678 		panic("%s: unhandled struct pfioc_altq version", __func__);
1679 		break;
1680 	}
1681 
1682 #undef ASSIGN
1683 #undef COPY
1684 #undef SATU16
1685 #undef SATU32
1686 
1687 	return (0);
1688 }
1689 
1690 /*
1691  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1692  * that may be using any version of it.
1693  */
1694 static int
1695 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1696 {
1697 	u_int32_t version;
1698 
1699 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1700 		version = 0;
1701 	else
1702 		version = pa->version;
1703 
1704 	if (version > PFIOC_ALTQ_VERSION)
1705 		return (EINVAL);
1706 
1707 #define ASSIGN(x) q->x = imported_q->x
1708 #define COPY(x) \
1709 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1710 
1711 	switch (version) {
1712 	case 0: {
1713 		struct pf_altq_v0 *imported_q =
1714 		    &((struct pfioc_altq_v0 *)pa)->altq;
1715 
1716 		COPY(ifname);
1717 
1718 		ASSIGN(scheduler);
1719 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1720 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1721 
1722 		COPY(qname);
1723 		COPY(parent);
1724 		ASSIGN(parent_qid);
1725 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1726 		ASSIGN(priority);
1727 		ASSIGN(local_flags);
1728 
1729 		ASSIGN(qlimit);
1730 		ASSIGN(flags);
1731 
1732 		if (imported_q->scheduler == ALTQT_HFSC) {
1733 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1734 
1735 			/*
1736 			 * The m1 and m2 parameters are being copied from
1737 			 * 32-bit to 64-bit.
1738 			 */
1739 			ASSIGN_OPT(rtsc_m1);
1740 			ASSIGN_OPT(rtsc_d);
1741 			ASSIGN_OPT(rtsc_m2);
1742 
1743 			ASSIGN_OPT(lssc_m1);
1744 			ASSIGN_OPT(lssc_d);
1745 			ASSIGN_OPT(lssc_m2);
1746 
1747 			ASSIGN_OPT(ulsc_m1);
1748 			ASSIGN_OPT(ulsc_d);
1749 			ASSIGN_OPT(ulsc_m2);
1750 
1751 			ASSIGN_OPT(flags);
1752 
1753 #undef ASSIGN_OPT
1754 		} else
1755 			COPY(pq_u);
1756 
1757 		ASSIGN(qid);
1758 		break;
1759 	}
1760 	case 1: {
1761 		struct pf_altq_v1 *imported_q =
1762 		    &((struct pfioc_altq_v1 *)pa)->altq;
1763 
1764 		COPY(ifname);
1765 
1766 		ASSIGN(scheduler);
1767 		ASSIGN(tbrsize);
1768 		ASSIGN(ifbandwidth);
1769 
1770 		COPY(qname);
1771 		COPY(parent);
1772 		ASSIGN(parent_qid);
1773 		ASSIGN(bandwidth);
1774 		ASSIGN(priority);
1775 		ASSIGN(local_flags);
1776 
1777 		ASSIGN(qlimit);
1778 		ASSIGN(flags);
1779 		COPY(pq_u);
1780 
1781 		ASSIGN(qid);
1782 		break;
1783 	}
1784 	default:
1785 		panic("%s: unhandled struct pfioc_altq version", __func__);
1786 		break;
1787 	}
1788 
1789 #undef ASSIGN
1790 #undef COPY
1791 
1792 	return (0);
1793 }
1794 
1795 static struct pf_altq *
1796 pf_altq_get_nth_active(u_int32_t n)
1797 {
1798 	struct pf_altq		*altq;
1799 	u_int32_t		 nr;
1800 
1801 	nr = 0;
1802 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1803 		if (nr == n)
1804 			return (altq);
1805 		nr++;
1806 	}
1807 
1808 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1809 		if (nr == n)
1810 			return (altq);
1811 		nr++;
1812 	}
1813 
1814 	return (NULL);
1815 }
1816 #endif /* ALTQ */
1817 
1818 struct pf_krule *
1819 pf_krule_alloc(void)
1820 {
1821 	struct pf_krule *rule;
1822 
1823 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1824 	mtx_init(&rule->rpool.mtx, "pf_krule_pool", NULL, MTX_DEF);
1825 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1826 	    M_WAITOK | M_ZERO);
1827 	return (rule);
1828 }
1829 
1830 void
1831 pf_krule_free(struct pf_krule *rule)
1832 {
1833 #ifdef PF_WANT_32_TO_64_COUNTER
1834 	bool wowned;
1835 #endif
1836 
1837 	if (rule == NULL)
1838 		return;
1839 
1840 #ifdef PF_WANT_32_TO_64_COUNTER
1841 	if (rule->allrulelinked) {
1842 		wowned = PF_RULES_WOWNED();
1843 		if (!wowned)
1844 			PF_RULES_WLOCK();
1845 		LIST_REMOVE(rule, allrulelist);
1846 		V_pf_allrulecount--;
1847 		if (!wowned)
1848 			PF_RULES_WUNLOCK();
1849 	}
1850 #endif
1851 
1852 	pf_counter_u64_deinit(&rule->evaluations);
1853 	for (int i = 0; i < 2; i++) {
1854 		pf_counter_u64_deinit(&rule->packets[i]);
1855 		pf_counter_u64_deinit(&rule->bytes[i]);
1856 	}
1857 	counter_u64_free(rule->states_cur);
1858 	counter_u64_free(rule->states_tot);
1859 	counter_u64_free(rule->src_nodes);
1860 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1861 
1862 	mtx_destroy(&rule->rpool.mtx);
1863 	free(rule, M_PFRULE);
1864 }
1865 
1866 void
1867 pf_krule_clear_counters(struct pf_krule *rule)
1868 {
1869 	pf_counter_u64_zero(&rule->evaluations);
1870 	for (int i = 0; i < 2; i++) {
1871 		pf_counter_u64_zero(&rule->packets[i]);
1872 		pf_counter_u64_zero(&rule->bytes[i]);
1873 	}
1874 	counter_u64_zero(rule->states_tot);
1875 }
1876 
1877 static void
1878 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1879     struct pf_pooladdr *pool)
1880 {
1881 
1882 	bzero(pool, sizeof(*pool));
1883 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1884 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1885 }
1886 
1887 static int
1888 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1889     struct pf_kpooladdr *kpool)
1890 {
1891 	int ret;
1892 
1893 	bzero(kpool, sizeof(*kpool));
1894 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1895 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1896 	    sizeof(kpool->ifname));
1897 	return (ret);
1898 }
1899 
1900 static void
1901 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1902 {
1903 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1904 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1905 
1906 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1907 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1908 
1909 	kpool->tblidx = pool->tblidx;
1910 	kpool->proxy_port[0] = pool->proxy_port[0];
1911 	kpool->proxy_port[1] = pool->proxy_port[1];
1912 	kpool->opts = pool->opts;
1913 }
1914 
1915 static int
1916 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1917 {
1918 	int ret;
1919 
1920 #ifndef INET
1921 	if (rule->af == AF_INET) {
1922 		return (EAFNOSUPPORT);
1923 	}
1924 #endif /* INET */
1925 #ifndef INET6
1926 	if (rule->af == AF_INET6) {
1927 		return (EAFNOSUPPORT);
1928 	}
1929 #endif /* INET6 */
1930 
1931 	ret = pf_check_rule_addr(&rule->src);
1932 	if (ret != 0)
1933 		return (ret);
1934 	ret = pf_check_rule_addr(&rule->dst);
1935 	if (ret != 0)
1936 		return (ret);
1937 
1938 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1939 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1940 
1941 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1942 	if (ret != 0)
1943 		return (ret);
1944 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1945 	if (ret != 0)
1946 		return (ret);
1947 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1948 	if (ret != 0)
1949 		return (ret);
1950 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1951 	if (ret != 0)
1952 		return (ret);
1953 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1954 	    sizeof(rule->tagname));
1955 	if (ret != 0)
1956 		return (ret);
1957 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1958 	    sizeof(rule->match_tagname));
1959 	if (ret != 0)
1960 		return (ret);
1961 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1962 	    sizeof(rule->overload_tblname));
1963 	if (ret != 0)
1964 		return (ret);
1965 
1966 	pf_pool_to_kpool(&rule->rpool, &krule->rpool);
1967 
1968 	/* Don't allow userspace to set evaluations, packets or bytes. */
1969 	/* kif, anchor, overload_tbl are not copied over. */
1970 
1971 	krule->os_fingerprint = rule->os_fingerprint;
1972 
1973 	krule->rtableid = rule->rtableid;
1974 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1975 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1976 	krule->max_states = rule->max_states;
1977 	krule->max_src_nodes = rule->max_src_nodes;
1978 	krule->max_src_states = rule->max_src_states;
1979 	krule->max_src_conn = rule->max_src_conn;
1980 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1981 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1982 	krule->qid = rule->qid;
1983 	krule->pqid = rule->pqid;
1984 	krule->nr = rule->nr;
1985 	krule->prob = rule->prob;
1986 	krule->cuid = rule->cuid;
1987 	krule->cpid = rule->cpid;
1988 
1989 	krule->return_icmp = rule->return_icmp;
1990 	krule->return_icmp6 = rule->return_icmp6;
1991 	krule->max_mss = rule->max_mss;
1992 	krule->tag = rule->tag;
1993 	krule->match_tag = rule->match_tag;
1994 	krule->scrub_flags = rule->scrub_flags;
1995 
1996 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1997 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1998 
1999 	krule->rule_flag = rule->rule_flag;
2000 	krule->action = rule->action;
2001 	krule->direction = rule->direction;
2002 	krule->log = rule->log;
2003 	krule->logif = rule->logif;
2004 	krule->quick = rule->quick;
2005 	krule->ifnot = rule->ifnot;
2006 	krule->match_tag_not = rule->match_tag_not;
2007 	krule->natpass = rule->natpass;
2008 
2009 	krule->keep_state = rule->keep_state;
2010 	krule->af = rule->af;
2011 	krule->proto = rule->proto;
2012 	krule->type = rule->type;
2013 	krule->code = rule->code;
2014 	krule->flags = rule->flags;
2015 	krule->flagset = rule->flagset;
2016 	krule->min_ttl = rule->min_ttl;
2017 	krule->allow_opts = rule->allow_opts;
2018 	krule->rt = rule->rt;
2019 	krule->return_ttl = rule->return_ttl;
2020 	krule->tos = rule->tos;
2021 	krule->set_tos = rule->set_tos;
2022 
2023 	krule->flush = rule->flush;
2024 	krule->prio = rule->prio;
2025 	krule->set_prio[0] = rule->set_prio[0];
2026 	krule->set_prio[1] = rule->set_prio[1];
2027 
2028 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2029 
2030 	return (0);
2031 }
2032 
2033 int
2034 pf_ioctl_getrules(struct pfioc_rule *pr)
2035 {
2036 	struct pf_kruleset	*ruleset;
2037 	struct pf_krule		*tail;
2038 	int			 rs_num;
2039 
2040 	PF_RULES_WLOCK();
2041 	ruleset = pf_find_kruleset(pr->anchor);
2042 	if (ruleset == NULL) {
2043 		PF_RULES_WUNLOCK();
2044 		return (EINVAL);
2045 	}
2046 	rs_num = pf_get_ruleset_number(pr->rule.action);
2047 	if (rs_num >= PF_RULESET_MAX) {
2048 		PF_RULES_WUNLOCK();
2049 		return (EINVAL);
2050 	}
2051 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2052 	    pf_krulequeue);
2053 	if (tail)
2054 		pr->nr = tail->nr + 1;
2055 	else
2056 		pr->nr = 0;
2057 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2058 	PF_RULES_WUNLOCK();
2059 
2060 	return (0);
2061 }
2062 
2063 int
2064 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2065     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2066     uid_t uid, pid_t pid)
2067 {
2068 	struct pf_kruleset	*ruleset;
2069 	struct pf_krule		*tail;
2070 	struct pf_kpooladdr	*pa;
2071 	struct pfi_kkif		*kif = NULL;
2072 	int			 rs_num;
2073 	int			 error = 0;
2074 
2075 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2076 		error = EINVAL;
2077 		goto errout_unlocked;
2078 	}
2079 
2080 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2081 
2082 	if (rule->ifname[0])
2083 		kif = pf_kkif_create(M_WAITOK);
2084 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2085 	for (int i = 0; i < 2; i++) {
2086 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2087 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2088 	}
2089 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2090 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2091 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2092 	rule->cuid = uid;
2093 	rule->cpid = pid;
2094 	TAILQ_INIT(&rule->rpool.list);
2095 
2096 	PF_CONFIG_LOCK();
2097 	PF_RULES_WLOCK();
2098 #ifdef PF_WANT_32_TO_64_COUNTER
2099 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2100 	MPASS(!rule->allrulelinked);
2101 	rule->allrulelinked = true;
2102 	V_pf_allrulecount++;
2103 #endif
2104 	ruleset = pf_find_kruleset(anchor);
2105 	if (ruleset == NULL)
2106 		ERROUT(EINVAL);
2107 	rs_num = pf_get_ruleset_number(rule->action);
2108 	if (rs_num >= PF_RULESET_MAX)
2109 		ERROUT(EINVAL);
2110 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2111 		DPFPRINTF(PF_DEBUG_MISC,
2112 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2113 		    ruleset->rules[rs_num].inactive.ticket));
2114 		ERROUT(EBUSY);
2115 	}
2116 	if (pool_ticket != V_ticket_pabuf) {
2117 		DPFPRINTF(PF_DEBUG_MISC,
2118 		    ("pool_ticket: %d != %d\n", pool_ticket,
2119 		    V_ticket_pabuf));
2120 		ERROUT(EBUSY);
2121 	}
2122 	/*
2123 	 * XXXMJG hack: there is no mechanism to ensure they started the
2124 	 * transaction. Ticket checked above may happen to match by accident,
2125 	 * even if nobody called DIOCXBEGIN, let alone this process.
2126 	 * Partially work around it by checking if the RB tree got allocated,
2127 	 * see pf_begin_rules.
2128 	 */
2129 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2130 		ERROUT(EINVAL);
2131 	}
2132 
2133 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2134 	    pf_krulequeue);
2135 	if (tail)
2136 		rule->nr = tail->nr + 1;
2137 	else
2138 		rule->nr = 0;
2139 	if (rule->ifname[0]) {
2140 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2141 		kif = NULL;
2142 		pfi_kkif_ref(rule->kif);
2143 	} else
2144 		rule->kif = NULL;
2145 
2146 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2147 		error = EBUSY;
2148 
2149 #ifdef ALTQ
2150 	/* set queue IDs */
2151 	if (rule->qname[0] != 0) {
2152 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2153 			error = EBUSY;
2154 		else if (rule->pqname[0] != 0) {
2155 			if ((rule->pqid =
2156 			    pf_qname2qid(rule->pqname)) == 0)
2157 				error = EBUSY;
2158 		} else
2159 			rule->pqid = rule->qid;
2160 	}
2161 #endif
2162 	if (rule->tagname[0])
2163 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2164 			error = EBUSY;
2165 	if (rule->match_tagname[0])
2166 		if ((rule->match_tag =
2167 		    pf_tagname2tag(rule->match_tagname)) == 0)
2168 			error = EBUSY;
2169 	if (rule->rt && !rule->direction)
2170 		error = EINVAL;
2171 	if (!rule->log)
2172 		rule->logif = 0;
2173 	if (rule->logif >= PFLOGIFS_MAX)
2174 		error = EINVAL;
2175 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2176 		error = ENOMEM;
2177 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2178 		error = ENOMEM;
2179 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2180 		error = EINVAL;
2181 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2182 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2183 	    rule->set_prio[1] > PF_PRIO_MAX))
2184 		error = EINVAL;
2185 	TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
2186 		if (pa->addr.type == PF_ADDR_TABLE) {
2187 			pa->addr.p.tbl = pfr_attach_table(ruleset,
2188 			    pa->addr.v.tblname);
2189 			if (pa->addr.p.tbl == NULL)
2190 				error = ENOMEM;
2191 		}
2192 
2193 	rule->overload_tbl = NULL;
2194 	if (rule->overload_tblname[0]) {
2195 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2196 		    rule->overload_tblname)) == NULL)
2197 			error = EINVAL;
2198 		else
2199 			rule->overload_tbl->pfrkt_flags |=
2200 			    PFR_TFLAG_ACTIVE;
2201 	}
2202 
2203 	pf_mv_kpool(&V_pf_pabuf, &rule->rpool.list);
2204 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2205 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2206 	    (rule->rt > PF_NOPFROUTE)) &&
2207 	    (TAILQ_FIRST(&rule->rpool.list) == NULL))
2208 		error = EINVAL;
2209 
2210 	if (error) {
2211 		pf_free_rule(rule);
2212 		rule = NULL;
2213 		ERROUT(error);
2214 	}
2215 
2216 	rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
2217 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2218 	    rule, entries);
2219 	ruleset->rules[rs_num].inactive.rcount++;
2220 
2221 	PF_RULES_WUNLOCK();
2222 	pf_hash_rule(rule);
2223 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2224 		PF_RULES_WLOCK();
2225 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2226 		ruleset->rules[rs_num].inactive.rcount--;
2227 		pf_free_rule(rule);
2228 		rule = NULL;
2229 		ERROUT(EEXIST);
2230 	}
2231 	PF_CONFIG_UNLOCK();
2232 
2233 	return (0);
2234 
2235 #undef ERROUT
2236 errout:
2237 	PF_RULES_WUNLOCK();
2238 	PF_CONFIG_UNLOCK();
2239 errout_unlocked:
2240 	pf_kkif_free(kif);
2241 	pf_krule_free(rule);
2242 	return (error);
2243 }
2244 
2245 static bool
2246 pf_label_match(const struct pf_krule *rule, const char *label)
2247 {
2248 	int i = 0;
2249 
2250 	while (*rule->label[i]) {
2251 		if (strcmp(rule->label[i], label) == 0)
2252 			return (true);
2253 		i++;
2254 	}
2255 
2256 	return (false);
2257 }
2258 
2259 static unsigned int
2260 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2261 {
2262 	struct pf_kstate *s;
2263 	int more = 0;
2264 
2265 	s = pf_find_state_all(key, dir, &more);
2266 	if (s == NULL)
2267 		return (0);
2268 
2269 	if (more) {
2270 		PF_STATE_UNLOCK(s);
2271 		return (0);
2272 	}
2273 
2274 	pf_unlink_state(s);
2275 	return (1);
2276 }
2277 
2278 static int
2279 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2280 {
2281 	struct pf_kstate	*s;
2282 	struct pf_state_key	*sk;
2283 	struct pf_addr		*srcaddr, *dstaddr;
2284 	struct pf_state_key_cmp	 match_key;
2285 	int			 idx, killed = 0;
2286 	unsigned int		 dir;
2287 	u_int16_t		 srcport, dstport;
2288 	struct pfi_kkif		*kif;
2289 
2290 relock_DIOCKILLSTATES:
2291 	PF_HASHROW_LOCK(ih);
2292 	LIST_FOREACH(s, &ih->states, entry) {
2293 		/* For floating states look at the original kif. */
2294 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2295 
2296 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2297 		if (s->direction == PF_OUT) {
2298 			srcaddr = &sk->addr[1];
2299 			dstaddr = &sk->addr[0];
2300 			srcport = sk->port[1];
2301 			dstport = sk->port[0];
2302 		} else {
2303 			srcaddr = &sk->addr[0];
2304 			dstaddr = &sk->addr[1];
2305 			srcport = sk->port[0];
2306 			dstport = sk->port[1];
2307 		}
2308 
2309 		if (psk->psk_af && sk->af != psk->psk_af)
2310 			continue;
2311 
2312 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2313 			continue;
2314 
2315 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2316 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2317 			continue;
2318 
2319 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2320 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2321 			continue;
2322 
2323 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2324 		    &psk->psk_rt_addr.addr.v.a.addr,
2325 		    &psk->psk_rt_addr.addr.v.a.mask,
2326 		    &s->rt_addr, sk->af))
2327 			continue;
2328 
2329 		if (psk->psk_src.port_op != 0 &&
2330 		    ! pf_match_port(psk->psk_src.port_op,
2331 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2332 			continue;
2333 
2334 		if (psk->psk_dst.port_op != 0 &&
2335 		    ! pf_match_port(psk->psk_dst.port_op,
2336 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2337 			continue;
2338 
2339 		if (psk->psk_label[0] &&
2340 		    ! pf_label_match(s->rule.ptr, psk->psk_label))
2341 			continue;
2342 
2343 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2344 		    kif->pfik_name))
2345 			continue;
2346 
2347 		if (psk->psk_kill_match) {
2348 			/* Create the key to find matching states, with lock
2349 			 * held. */
2350 
2351 			bzero(&match_key, sizeof(match_key));
2352 
2353 			if (s->direction == PF_OUT) {
2354 				dir = PF_IN;
2355 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2356 			} else {
2357 				dir = PF_OUT;
2358 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2359 			}
2360 
2361 			match_key.af = s->key[idx]->af;
2362 			match_key.proto = s->key[idx]->proto;
2363 			PF_ACPY(&match_key.addr[0],
2364 			    &s->key[idx]->addr[1], match_key.af);
2365 			match_key.port[0] = s->key[idx]->port[1];
2366 			PF_ACPY(&match_key.addr[1],
2367 			    &s->key[idx]->addr[0], match_key.af);
2368 			match_key.port[1] = s->key[idx]->port[0];
2369 		}
2370 
2371 		pf_unlink_state(s);
2372 		killed++;
2373 
2374 		if (psk->psk_kill_match)
2375 			killed += pf_kill_matching_state(&match_key, dir);
2376 
2377 		goto relock_DIOCKILLSTATES;
2378 	}
2379 	PF_HASHROW_UNLOCK(ih);
2380 
2381 	return (killed);
2382 }
2383 
2384 int
2385 pf_start(void)
2386 {
2387 	int error = 0;
2388 
2389 	sx_xlock(&V_pf_ioctl_lock);
2390 	if (V_pf_status.running)
2391 		error = EEXIST;
2392 	else {
2393 		hook_pf();
2394 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2395 			hook_pf_eth();
2396 		V_pf_status.running = 1;
2397 		V_pf_status.since = time_second;
2398 		new_unrhdr64(&V_pf_stateid, time_second);
2399 
2400 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2401 	}
2402 	sx_xunlock(&V_pf_ioctl_lock);
2403 
2404 	return (error);
2405 }
2406 
2407 int
2408 pf_stop(void)
2409 {
2410 	int error = 0;
2411 
2412 	sx_xlock(&V_pf_ioctl_lock);
2413 	if (!V_pf_status.running)
2414 		error = ENOENT;
2415 	else {
2416 		V_pf_status.running = 0;
2417 		dehook_pf();
2418 		dehook_pf_eth();
2419 		V_pf_status.since = time_second;
2420 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2421 	}
2422 	sx_xunlock(&V_pf_ioctl_lock);
2423 
2424 	return (error);
2425 }
2426 
2427 void
2428 pf_ioctl_clear_status(void)
2429 {
2430 	PF_RULES_WLOCK();
2431 	for (int i = 0; i < PFRES_MAX; i++)
2432 		counter_u64_zero(V_pf_status.counters[i]);
2433 	for (int i = 0; i < FCNT_MAX; i++)
2434 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2435 	for (int i = 0; i < SCNT_MAX; i++)
2436 		counter_u64_zero(V_pf_status.scounters[i]);
2437 	for (int i = 0; i < KLCNT_MAX; i++)
2438 		counter_u64_zero(V_pf_status.lcounters[i]);
2439 	V_pf_status.since = time_second;
2440 	if (*V_pf_status.ifname)
2441 		pfi_update_status(V_pf_status.ifname, NULL);
2442 	PF_RULES_WUNLOCK();
2443 }
2444 
2445 int
2446 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2447 {
2448 	uint32_t old;
2449 
2450 	if (timeout < 0 || timeout >= PFTM_MAX ||
2451 	    seconds < 0)
2452 		return (EINVAL);
2453 
2454 	PF_RULES_WLOCK();
2455 	old = V_pf_default_rule.timeout[timeout];
2456 	if (timeout == PFTM_INTERVAL && seconds == 0)
2457 		seconds = 1;
2458 	V_pf_default_rule.timeout[timeout] = seconds;
2459 	if (timeout == PFTM_INTERVAL && seconds < old)
2460 		wakeup(pf_purge_thread);
2461 
2462 	if (prev_seconds != NULL)
2463 		*prev_seconds = old;
2464 
2465 	PF_RULES_WUNLOCK();
2466 
2467 	return (0);
2468 }
2469 
2470 int
2471 pf_ioctl_get_timeout(int timeout, int *seconds)
2472 {
2473 	PF_RULES_RLOCK_TRACKER;
2474 
2475 	if (timeout < 0 || timeout >= PFTM_MAX)
2476 		return (EINVAL);
2477 
2478 	PF_RULES_RLOCK();
2479 	*seconds = V_pf_default_rule.timeout[timeout];
2480 	PF_RULES_RUNLOCK();
2481 
2482 	return (0);
2483 }
2484 
2485 int
2486 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2487 {
2488 
2489 	PF_RULES_WLOCK();
2490 	if (index < 0 || index >= PF_LIMIT_MAX ||
2491 	    V_pf_limits[index].zone == NULL) {
2492 		PF_RULES_WUNLOCK();
2493 		return (EINVAL);
2494 	}
2495 	uma_zone_set_max(V_pf_limits[index].zone, limit);
2496 	if (old_limit != NULL)
2497 		*old_limit = V_pf_limits[index].limit;
2498 	V_pf_limits[index].limit = limit;
2499 	PF_RULES_WUNLOCK();
2500 
2501 	return (0);
2502 }
2503 
2504 int
2505 pf_ioctl_get_limit(int index, unsigned int *limit)
2506 {
2507 	PF_RULES_RLOCK_TRACKER;
2508 
2509 	if (index < 0 || index >= PF_LIMIT_MAX)
2510 		return (EINVAL);
2511 
2512 	PF_RULES_RLOCK();
2513 	*limit = V_pf_limits[index].limit;
2514 	PF_RULES_RUNLOCK();
2515 
2516 	return (0);
2517 }
2518 
2519 int
2520 pf_ioctl_begin_addrs(uint32_t *ticket)
2521 {
2522 	PF_RULES_WLOCK();
2523 	pf_empty_kpool(&V_pf_pabuf);
2524 	*ticket = ++V_ticket_pabuf;
2525 	PF_RULES_WUNLOCK();
2526 
2527 	return (0);
2528 }
2529 
2530 int
2531 pf_ioctl_add_addr(struct pfioc_pooladdr *pp)
2532 {
2533 	struct pf_kpooladdr	*pa = NULL;
2534 	struct pfi_kkif		*kif = NULL;
2535 	int error;
2536 
2537 #ifndef INET
2538 	if (pp->af == AF_INET)
2539 		return (EAFNOSUPPORT);
2540 #endif /* INET */
2541 #ifndef INET6
2542 	if (pp->af == AF_INET6)
2543 		return (EAFNOSUPPORT);
2544 #endif /* INET6 */
2545 
2546 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2547 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2548 	    pp->addr.addr.type != PF_ADDR_TABLE)
2549 		return (EINVAL);
2550 
2551 	if (pp->addr.addr.p.dyn != NULL)
2552 		return (EINVAL);
2553 
2554 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2555 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2556 	if (error != 0)
2557 		goto out;
2558 	if (pa->ifname[0])
2559 		kif = pf_kkif_create(M_WAITOK);
2560 	PF_RULES_WLOCK();
2561 	if (pp->ticket != V_ticket_pabuf) {
2562 		PF_RULES_WUNLOCK();
2563 		if (pa->ifname[0])
2564 			pf_kkif_free(kif);
2565 		error = EBUSY;
2566 		goto out;
2567 	}
2568 	if (pa->ifname[0]) {
2569 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2570 		kif = NULL;
2571 		pfi_kkif_ref(pa->kif);
2572 	} else
2573 		pa->kif = NULL;
2574 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2575 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2576 		if (pa->ifname[0])
2577 			pfi_kkif_unref(pa->kif);
2578 		PF_RULES_WUNLOCK();
2579 		goto out;
2580 	}
2581 	TAILQ_INSERT_TAIL(&V_pf_pabuf, pa, entries);
2582 	PF_RULES_WUNLOCK();
2583 
2584 	return (0);
2585 
2586 out:
2587 	free(pa, M_PFRULE);
2588 	return (error);
2589 }
2590 
2591 int
2592 pf_ioctl_get_addrs(struct pfioc_pooladdr *pp)
2593 {
2594 	struct pf_kpool		*pool;
2595 	struct pf_kpooladdr	*pa;
2596 
2597 	PF_RULES_RLOCK_TRACKER;
2598 
2599 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2600 	pp->nr = 0;
2601 
2602 	PF_RULES_RLOCK();
2603 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2604 	    pp->r_num, 0, 1, 0);
2605 	if (pool == NULL) {
2606 		PF_RULES_RUNLOCK();
2607 		return (EBUSY);
2608 	}
2609 	TAILQ_FOREACH(pa, &pool->list, entries)
2610 		pp->nr++;
2611 	PF_RULES_RUNLOCK();
2612 
2613 	return (0);
2614 }
2615 
2616 int
2617 pf_ioctl_get_addr(struct pfioc_pooladdr *pp)
2618 {
2619 	struct pf_kpool		*pool;
2620 	struct pf_kpooladdr	*pa;
2621 	u_int32_t		 nr = 0;
2622 
2623 	PF_RULES_RLOCK_TRACKER;
2624 
2625 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2626 
2627 	PF_RULES_RLOCK();
2628 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2629 	    pp->r_num, 0, 1, 1);
2630 	if (pool == NULL) {
2631 		PF_RULES_RUNLOCK();
2632 		return (EBUSY);
2633 	}
2634 	pa = TAILQ_FIRST(&pool->list);
2635 	while ((pa != NULL) && (nr < pp->nr)) {
2636 		pa = TAILQ_NEXT(pa, entries);
2637 		nr++;
2638 	}
2639 	if (pa == NULL) {
2640 		PF_RULES_RUNLOCK();
2641 		return (EBUSY);
2642 	}
2643 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2644 	pf_addr_copyout(&pp->addr.addr);
2645 	PF_RULES_RUNLOCK();
2646 
2647 	return (0);
2648 }
2649 
2650 static int
2651 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2652 {
2653 	int			 error = 0;
2654 	PF_RULES_RLOCK_TRACKER;
2655 
2656 #define	ERROUT_IOCTL(target, x)					\
2657     do {								\
2658 	    error = (x);						\
2659 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2660 	    goto target;						\
2661     } while (0)
2662 
2663 
2664 	/* XXX keep in sync with switch() below */
2665 	if (securelevel_gt(td->td_ucred, 2))
2666 		switch (cmd) {
2667 		case DIOCGETRULES:
2668 		case DIOCGETRULENV:
2669 		case DIOCGETADDRS:
2670 		case DIOCGETADDR:
2671 		case DIOCGETSTATE:
2672 		case DIOCGETSTATENV:
2673 		case DIOCSETSTATUSIF:
2674 		case DIOCGETSTATUSNV:
2675 		case DIOCCLRSTATUS:
2676 		case DIOCNATLOOK:
2677 		case DIOCSETDEBUG:
2678 #ifdef COMPAT_FREEBSD14
2679 		case DIOCGETSTATES:
2680 		case DIOCGETSTATESV2:
2681 #endif
2682 		case DIOCGETTIMEOUT:
2683 		case DIOCCLRRULECTRS:
2684 		case DIOCGETLIMIT:
2685 		case DIOCGETALTQSV0:
2686 		case DIOCGETALTQSV1:
2687 		case DIOCGETALTQV0:
2688 		case DIOCGETALTQV1:
2689 		case DIOCGETQSTATSV0:
2690 		case DIOCGETQSTATSV1:
2691 		case DIOCGETRULESETS:
2692 		case DIOCGETRULESET:
2693 		case DIOCRGETTABLES:
2694 		case DIOCRGETTSTATS:
2695 		case DIOCRCLRTSTATS:
2696 		case DIOCRCLRADDRS:
2697 		case DIOCRADDADDRS:
2698 		case DIOCRDELADDRS:
2699 		case DIOCRSETADDRS:
2700 		case DIOCRGETADDRS:
2701 		case DIOCRGETASTATS:
2702 		case DIOCRCLRASTATS:
2703 		case DIOCRTSTADDRS:
2704 		case DIOCOSFPGET:
2705 		case DIOCGETSRCNODES:
2706 		case DIOCCLRSRCNODES:
2707 		case DIOCGETSYNCOOKIES:
2708 		case DIOCIGETIFACES:
2709 		case DIOCGIFSPEEDV0:
2710 		case DIOCGIFSPEEDV1:
2711 		case DIOCSETIFFLAG:
2712 		case DIOCCLRIFFLAG:
2713 		case DIOCGETETHRULES:
2714 		case DIOCGETETHRULE:
2715 		case DIOCGETETHRULESETS:
2716 		case DIOCGETETHRULESET:
2717 			break;
2718 		case DIOCRCLRTABLES:
2719 		case DIOCRADDTABLES:
2720 		case DIOCRDELTABLES:
2721 		case DIOCRSETTFLAGS:
2722 			if (((struct pfioc_table *)addr)->pfrio_flags &
2723 			    PFR_FLAG_DUMMY)
2724 				break; /* dummy operation ok */
2725 			return (EPERM);
2726 		default:
2727 			return (EPERM);
2728 		}
2729 
2730 	if (!(flags & FWRITE))
2731 		switch (cmd) {
2732 		case DIOCGETRULES:
2733 		case DIOCGETADDRS:
2734 		case DIOCGETADDR:
2735 		case DIOCGETSTATE:
2736 		case DIOCGETSTATENV:
2737 		case DIOCGETSTATUSNV:
2738 #ifdef COMPAT_FREEBSD14
2739 		case DIOCGETSTATES:
2740 		case DIOCGETSTATESV2:
2741 #endif
2742 		case DIOCGETTIMEOUT:
2743 		case DIOCGETLIMIT:
2744 		case DIOCGETALTQSV0:
2745 		case DIOCGETALTQSV1:
2746 		case DIOCGETALTQV0:
2747 		case DIOCGETALTQV1:
2748 		case DIOCGETQSTATSV0:
2749 		case DIOCGETQSTATSV1:
2750 		case DIOCGETRULESETS:
2751 		case DIOCGETRULESET:
2752 		case DIOCNATLOOK:
2753 		case DIOCRGETTABLES:
2754 		case DIOCRGETTSTATS:
2755 		case DIOCRGETADDRS:
2756 		case DIOCRGETASTATS:
2757 		case DIOCRTSTADDRS:
2758 		case DIOCOSFPGET:
2759 		case DIOCGETSRCNODES:
2760 		case DIOCGETSYNCOOKIES:
2761 		case DIOCIGETIFACES:
2762 		case DIOCGIFSPEEDV1:
2763 		case DIOCGIFSPEEDV0:
2764 		case DIOCGETRULENV:
2765 		case DIOCGETETHRULES:
2766 		case DIOCGETETHRULE:
2767 		case DIOCGETETHRULESETS:
2768 		case DIOCGETETHRULESET:
2769 			break;
2770 		case DIOCRCLRTABLES:
2771 		case DIOCRADDTABLES:
2772 		case DIOCRDELTABLES:
2773 		case DIOCRCLRTSTATS:
2774 		case DIOCRCLRADDRS:
2775 		case DIOCRADDADDRS:
2776 		case DIOCRDELADDRS:
2777 		case DIOCRSETADDRS:
2778 		case DIOCRSETTFLAGS:
2779 			if (((struct pfioc_table *)addr)->pfrio_flags &
2780 			    PFR_FLAG_DUMMY) {
2781 				flags |= FWRITE; /* need write lock for dummy */
2782 				break; /* dummy operation ok */
2783 			}
2784 			return (EACCES);
2785 		default:
2786 			return (EACCES);
2787 		}
2788 
2789 	CURVNET_SET(TD_TO_VNET(td));
2790 
2791 	switch (cmd) {
2792 #ifdef COMPAT_FREEBSD14
2793 	case DIOCSTART:
2794 		error = pf_start();
2795 		break;
2796 
2797 	case DIOCSTOP:
2798 		error = pf_stop();
2799 		break;
2800 #endif
2801 
2802 	case DIOCGETETHRULES: {
2803 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2804 		nvlist_t		*nvl;
2805 		void			*packed;
2806 		struct pf_keth_rule	*tail;
2807 		struct pf_keth_ruleset	*rs;
2808 		u_int32_t		 ticket, nr;
2809 		const char		*anchor = "";
2810 
2811 		nvl = NULL;
2812 		packed = NULL;
2813 
2814 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2815 
2816 		if (nv->len > pf_ioctl_maxcount)
2817 			ERROUT(ENOMEM);
2818 
2819 		/* Copy the request in */
2820 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2821 		if (packed == NULL)
2822 			ERROUT(ENOMEM);
2823 
2824 		error = copyin(nv->data, packed, nv->len);
2825 		if (error)
2826 			ERROUT(error);
2827 
2828 		nvl = nvlist_unpack(packed, nv->len, 0);
2829 		if (nvl == NULL)
2830 			ERROUT(EBADMSG);
2831 
2832 		if (! nvlist_exists_string(nvl, "anchor"))
2833 			ERROUT(EBADMSG);
2834 
2835 		anchor = nvlist_get_string(nvl, "anchor");
2836 
2837 		rs = pf_find_keth_ruleset(anchor);
2838 
2839 		nvlist_destroy(nvl);
2840 		nvl = NULL;
2841 		free(packed, M_NVLIST);
2842 		packed = NULL;
2843 
2844 		if (rs == NULL)
2845 			ERROUT(ENOENT);
2846 
2847 		/* Reply */
2848 		nvl = nvlist_create(0);
2849 		if (nvl == NULL)
2850 			ERROUT(ENOMEM);
2851 
2852 		PF_RULES_RLOCK();
2853 
2854 		ticket = rs->active.ticket;
2855 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2856 		if (tail)
2857 			nr = tail->nr + 1;
2858 		else
2859 			nr = 0;
2860 
2861 		PF_RULES_RUNLOCK();
2862 
2863 		nvlist_add_number(nvl, "ticket", ticket);
2864 		nvlist_add_number(nvl, "nr", nr);
2865 
2866 		packed = nvlist_pack(nvl, &nv->len);
2867 		if (packed == NULL)
2868 			ERROUT(ENOMEM);
2869 
2870 		if (nv->size == 0)
2871 			ERROUT(0);
2872 		else if (nv->size < nv->len)
2873 			ERROUT(ENOSPC);
2874 
2875 		error = copyout(packed, nv->data, nv->len);
2876 
2877 #undef ERROUT
2878 DIOCGETETHRULES_error:
2879 		free(packed, M_NVLIST);
2880 		nvlist_destroy(nvl);
2881 		break;
2882 	}
2883 
2884 	case DIOCGETETHRULE: {
2885 		struct epoch_tracker	 et;
2886 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2887 		nvlist_t		*nvl = NULL;
2888 		void			*nvlpacked = NULL;
2889 		struct pf_keth_rule	*rule = NULL;
2890 		struct pf_keth_ruleset	*rs;
2891 		u_int32_t		 ticket, nr;
2892 		bool			 clear = false;
2893 		const char		*anchor;
2894 
2895 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
2896 
2897 		if (nv->len > pf_ioctl_maxcount)
2898 			ERROUT(ENOMEM);
2899 
2900 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
2901 		if (nvlpacked == NULL)
2902 			ERROUT(ENOMEM);
2903 
2904 		error = copyin(nv->data, nvlpacked, nv->len);
2905 		if (error)
2906 			ERROUT(error);
2907 
2908 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
2909 		if (nvl == NULL)
2910 			ERROUT(EBADMSG);
2911 		if (! nvlist_exists_number(nvl, "ticket"))
2912 			ERROUT(EBADMSG);
2913 		ticket = nvlist_get_number(nvl, "ticket");
2914 		if (! nvlist_exists_string(nvl, "anchor"))
2915 			ERROUT(EBADMSG);
2916 		anchor = nvlist_get_string(nvl, "anchor");
2917 
2918 		if (nvlist_exists_bool(nvl, "clear"))
2919 			clear = nvlist_get_bool(nvl, "clear");
2920 
2921 		if (clear && !(flags & FWRITE))
2922 			ERROUT(EACCES);
2923 
2924 		if (! nvlist_exists_number(nvl, "nr"))
2925 			ERROUT(EBADMSG);
2926 		nr = nvlist_get_number(nvl, "nr");
2927 
2928 		PF_RULES_RLOCK();
2929 		rs = pf_find_keth_ruleset(anchor);
2930 		if (rs == NULL) {
2931 			PF_RULES_RUNLOCK();
2932 			ERROUT(ENOENT);
2933 		}
2934 		if (ticket != rs->active.ticket) {
2935 			PF_RULES_RUNLOCK();
2936 			ERROUT(EBUSY);
2937 		}
2938 
2939 		nvlist_destroy(nvl);
2940 		nvl = NULL;
2941 		free(nvlpacked, M_NVLIST);
2942 		nvlpacked = NULL;
2943 
2944 		rule = TAILQ_FIRST(rs->active.rules);
2945 		while ((rule != NULL) && (rule->nr != nr))
2946 			rule = TAILQ_NEXT(rule, entries);
2947 		if (rule == NULL) {
2948 			PF_RULES_RUNLOCK();
2949 			ERROUT(ENOENT);
2950 		}
2951 		/* Make sure rule can't go away. */
2952 		NET_EPOCH_ENTER(et);
2953 		PF_RULES_RUNLOCK();
2954 		nvl = pf_keth_rule_to_nveth_rule(rule);
2955 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl))
2956 			ERROUT(EBUSY);
2957 		NET_EPOCH_EXIT(et);
2958 		if (nvl == NULL)
2959 			ERROUT(ENOMEM);
2960 
2961 		nvlpacked = nvlist_pack(nvl, &nv->len);
2962 		if (nvlpacked == NULL)
2963 			ERROUT(ENOMEM);
2964 
2965 		if (nv->size == 0)
2966 			ERROUT(0);
2967 		else if (nv->size < nv->len)
2968 			ERROUT(ENOSPC);
2969 
2970 		error = copyout(nvlpacked, nv->data, nv->len);
2971 		if (error == 0 && clear) {
2972 			counter_u64_zero(rule->evaluations);
2973 			for (int i = 0; i < 2; i++) {
2974 				counter_u64_zero(rule->packets[i]);
2975 				counter_u64_zero(rule->bytes[i]);
2976 			}
2977 		}
2978 
2979 #undef ERROUT
2980 DIOCGETETHRULE_error:
2981 		free(nvlpacked, M_NVLIST);
2982 		nvlist_destroy(nvl);
2983 		break;
2984 	}
2985 
2986 	case DIOCADDETHRULE: {
2987 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2988 		nvlist_t		*nvl = NULL;
2989 		void			*nvlpacked = NULL;
2990 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
2991 		struct pf_keth_ruleset	*ruleset = NULL;
2992 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
2993 		const char		*anchor = "", *anchor_call = "";
2994 
2995 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
2996 
2997 		if (nv->len > pf_ioctl_maxcount)
2998 			ERROUT(ENOMEM);
2999 
3000 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3001 		if (nvlpacked == NULL)
3002 			ERROUT(ENOMEM);
3003 
3004 		error = copyin(nv->data, nvlpacked, nv->len);
3005 		if (error)
3006 			ERROUT(error);
3007 
3008 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3009 		if (nvl == NULL)
3010 			ERROUT(EBADMSG);
3011 
3012 		if (! nvlist_exists_number(nvl, "ticket"))
3013 			ERROUT(EBADMSG);
3014 
3015 		if (nvlist_exists_string(nvl, "anchor"))
3016 			anchor = nvlist_get_string(nvl, "anchor");
3017 		if (nvlist_exists_string(nvl, "anchor_call"))
3018 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3019 
3020 		ruleset = pf_find_keth_ruleset(anchor);
3021 		if (ruleset == NULL)
3022 			ERROUT(EINVAL);
3023 
3024 		if (nvlist_get_number(nvl, "ticket") !=
3025 		    ruleset->inactive.ticket) {
3026 			DPFPRINTF(PF_DEBUG_MISC,
3027 			    ("ticket: %d != %d\n",
3028 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3029 			    ruleset->inactive.ticket));
3030 			ERROUT(EBUSY);
3031 		}
3032 
3033 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3034 		if (rule == NULL)
3035 			ERROUT(ENOMEM);
3036 		rule->timestamp = NULL;
3037 
3038 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3039 		if (error != 0)
3040 			ERROUT(error);
3041 
3042 		if (rule->ifname[0])
3043 			kif = pf_kkif_create(M_WAITOK);
3044 		if (rule->bridge_to_name[0])
3045 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3046 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3047 		for (int i = 0; i < 2; i++) {
3048 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3049 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3050 		}
3051 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3052 		    M_WAITOK | M_ZERO);
3053 
3054 		PF_RULES_WLOCK();
3055 
3056 		if (rule->ifname[0]) {
3057 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3058 			pfi_kkif_ref(rule->kif);
3059 		} else
3060 			rule->kif = NULL;
3061 		if (rule->bridge_to_name[0]) {
3062 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3063 			    rule->bridge_to_name);
3064 			pfi_kkif_ref(rule->bridge_to);
3065 		} else
3066 			rule->bridge_to = NULL;
3067 
3068 #ifdef ALTQ
3069 		/* set queue IDs */
3070 		if (rule->qname[0] != 0) {
3071 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3072 				error = EBUSY;
3073 			else
3074 				rule->qid = rule->qid;
3075 		}
3076 #endif
3077 		if (rule->tagname[0])
3078 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3079 				error = EBUSY;
3080 		if (rule->match_tagname[0])
3081 			if ((rule->match_tag = pf_tagname2tag(
3082 			    rule->match_tagname)) == 0)
3083 				error = EBUSY;
3084 
3085 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3086 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3087 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3088 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3089 
3090 		if (error) {
3091 			pf_free_eth_rule(rule);
3092 			PF_RULES_WUNLOCK();
3093 			ERROUT(error);
3094 		}
3095 
3096 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3097 			pf_free_eth_rule(rule);
3098 			PF_RULES_WUNLOCK();
3099 			ERROUT(EINVAL);
3100 		}
3101 
3102 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3103 		if (tail)
3104 			rule->nr = tail->nr + 1;
3105 		else
3106 			rule->nr = 0;
3107 
3108 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3109 
3110 		PF_RULES_WUNLOCK();
3111 
3112 #undef ERROUT
3113 DIOCADDETHRULE_error:
3114 		nvlist_destroy(nvl);
3115 		free(nvlpacked, M_NVLIST);
3116 		break;
3117 	}
3118 
3119 	case DIOCGETETHRULESETS: {
3120 		struct epoch_tracker	 et;
3121 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3122 		nvlist_t		*nvl = NULL;
3123 		void			*nvlpacked = NULL;
3124 		struct pf_keth_ruleset	*ruleset;
3125 		struct pf_keth_anchor	*anchor;
3126 		int			 nr = 0;
3127 
3128 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3129 
3130 		if (nv->len > pf_ioctl_maxcount)
3131 			ERROUT(ENOMEM);
3132 
3133 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3134 		if (nvlpacked == NULL)
3135 			ERROUT(ENOMEM);
3136 
3137 		error = copyin(nv->data, nvlpacked, nv->len);
3138 		if (error)
3139 			ERROUT(error);
3140 
3141 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3142 		if (nvl == NULL)
3143 			ERROUT(EBADMSG);
3144 		if (! nvlist_exists_string(nvl, "path"))
3145 			ERROUT(EBADMSG);
3146 
3147 		NET_EPOCH_ENTER(et);
3148 
3149 		if ((ruleset = pf_find_keth_ruleset(
3150 		    nvlist_get_string(nvl, "path"))) == NULL) {
3151 			NET_EPOCH_EXIT(et);
3152 			ERROUT(ENOENT);
3153 		}
3154 
3155 		if (ruleset->anchor == NULL) {
3156 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3157 				if (anchor->parent == NULL)
3158 					nr++;
3159 		} else {
3160 			RB_FOREACH(anchor, pf_keth_anchor_node,
3161 			    &ruleset->anchor->children)
3162 				nr++;
3163 		}
3164 
3165 		NET_EPOCH_EXIT(et);
3166 
3167 		nvlist_destroy(nvl);
3168 		nvl = NULL;
3169 		free(nvlpacked, M_NVLIST);
3170 		nvlpacked = NULL;
3171 
3172 		nvl = nvlist_create(0);
3173 		if (nvl == NULL)
3174 			ERROUT(ENOMEM);
3175 
3176 		nvlist_add_number(nvl, "nr", nr);
3177 
3178 		nvlpacked = nvlist_pack(nvl, &nv->len);
3179 		if (nvlpacked == NULL)
3180 			ERROUT(ENOMEM);
3181 
3182 		if (nv->size == 0)
3183 			ERROUT(0);
3184 		else if (nv->size < nv->len)
3185 			ERROUT(ENOSPC);
3186 
3187 		error = copyout(nvlpacked, nv->data, nv->len);
3188 
3189 #undef ERROUT
3190 DIOCGETETHRULESETS_error:
3191 		free(nvlpacked, M_NVLIST);
3192 		nvlist_destroy(nvl);
3193 		break;
3194 	}
3195 
3196 	case DIOCGETETHRULESET: {
3197 		struct epoch_tracker	 et;
3198 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3199 		nvlist_t		*nvl = NULL;
3200 		void			*nvlpacked = NULL;
3201 		struct pf_keth_ruleset	*ruleset;
3202 		struct pf_keth_anchor	*anchor;
3203 		int			 nr = 0, req_nr = 0;
3204 		bool			 found = false;
3205 
3206 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3207 
3208 		if (nv->len > pf_ioctl_maxcount)
3209 			ERROUT(ENOMEM);
3210 
3211 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3212 		if (nvlpacked == NULL)
3213 			ERROUT(ENOMEM);
3214 
3215 		error = copyin(nv->data, nvlpacked, nv->len);
3216 		if (error)
3217 			ERROUT(error);
3218 
3219 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3220 		if (nvl == NULL)
3221 			ERROUT(EBADMSG);
3222 		if (! nvlist_exists_string(nvl, "path"))
3223 			ERROUT(EBADMSG);
3224 		if (! nvlist_exists_number(nvl, "nr"))
3225 			ERROUT(EBADMSG);
3226 
3227 		req_nr = nvlist_get_number(nvl, "nr");
3228 
3229 		NET_EPOCH_ENTER(et);
3230 
3231 		if ((ruleset = pf_find_keth_ruleset(
3232 		    nvlist_get_string(nvl, "path"))) == NULL) {
3233 			NET_EPOCH_EXIT(et);
3234 			ERROUT(ENOENT);
3235 		}
3236 
3237 		nvlist_destroy(nvl);
3238 		nvl = NULL;
3239 		free(nvlpacked, M_NVLIST);
3240 		nvlpacked = NULL;
3241 
3242 		nvl = nvlist_create(0);
3243 		if (nvl == NULL) {
3244 			NET_EPOCH_EXIT(et);
3245 			ERROUT(ENOMEM);
3246 		}
3247 
3248 		if (ruleset->anchor == NULL) {
3249 			RB_FOREACH(anchor, pf_keth_anchor_global,
3250 			    &V_pf_keth_anchors) {
3251 				if (anchor->parent == NULL && nr++ == req_nr) {
3252 					found = true;
3253 					break;
3254 				}
3255 			}
3256 		} else {
3257 			RB_FOREACH(anchor, pf_keth_anchor_node,
3258 			     &ruleset->anchor->children) {
3259 				if (nr++ == req_nr) {
3260 					found = true;
3261 					break;
3262 				}
3263 			}
3264 		}
3265 
3266 		NET_EPOCH_EXIT(et);
3267 		if (found) {
3268 			nvlist_add_number(nvl, "nr", nr);
3269 			nvlist_add_string(nvl, "name", anchor->name);
3270 			if (ruleset->anchor)
3271 				nvlist_add_string(nvl, "path",
3272 				    ruleset->anchor->path);
3273 			else
3274 				nvlist_add_string(nvl, "path", "");
3275 		} else {
3276 			ERROUT(EBUSY);
3277 		}
3278 
3279 		nvlpacked = nvlist_pack(nvl, &nv->len);
3280 		if (nvlpacked == NULL)
3281 			ERROUT(ENOMEM);
3282 
3283 		if (nv->size == 0)
3284 			ERROUT(0);
3285 		else if (nv->size < nv->len)
3286 			ERROUT(ENOSPC);
3287 
3288 		error = copyout(nvlpacked, nv->data, nv->len);
3289 
3290 #undef ERROUT
3291 DIOCGETETHRULESET_error:
3292 		free(nvlpacked, M_NVLIST);
3293 		nvlist_destroy(nvl);
3294 		break;
3295 	}
3296 
3297 	case DIOCADDRULENV: {
3298 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3299 		nvlist_t	*nvl = NULL;
3300 		void		*nvlpacked = NULL;
3301 		struct pf_krule	*rule = NULL;
3302 		const char	*anchor = "", *anchor_call = "";
3303 		uint32_t	 ticket = 0, pool_ticket = 0;
3304 
3305 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3306 
3307 		if (nv->len > pf_ioctl_maxcount)
3308 			ERROUT(ENOMEM);
3309 
3310 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3311 		error = copyin(nv->data, nvlpacked, nv->len);
3312 		if (error)
3313 			ERROUT(error);
3314 
3315 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3316 		if (nvl == NULL)
3317 			ERROUT(EBADMSG);
3318 
3319 		if (! nvlist_exists_number(nvl, "ticket"))
3320 			ERROUT(EINVAL);
3321 		ticket = nvlist_get_number(nvl, "ticket");
3322 
3323 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3324 			ERROUT(EINVAL);
3325 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3326 
3327 		if (! nvlist_exists_nvlist(nvl, "rule"))
3328 			ERROUT(EINVAL);
3329 
3330 		rule = pf_krule_alloc();
3331 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3332 		    rule);
3333 		if (error)
3334 			ERROUT(error);
3335 
3336 		if (nvlist_exists_string(nvl, "anchor"))
3337 			anchor = nvlist_get_string(nvl, "anchor");
3338 		if (nvlist_exists_string(nvl, "anchor_call"))
3339 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3340 
3341 		if ((error = nvlist_error(nvl)))
3342 			ERROUT(error);
3343 
3344 		/* Frees rule on error */
3345 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3346 		    anchor_call, td->td_ucred->cr_ruid,
3347 		    td->td_proc ? td->td_proc->p_pid : 0);
3348 
3349 		nvlist_destroy(nvl);
3350 		free(nvlpacked, M_NVLIST);
3351 		break;
3352 #undef ERROUT
3353 DIOCADDRULENV_error:
3354 		pf_krule_free(rule);
3355 		nvlist_destroy(nvl);
3356 		free(nvlpacked, M_NVLIST);
3357 
3358 		break;
3359 	}
3360 	case DIOCADDRULE: {
3361 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3362 		struct pf_krule		*rule;
3363 
3364 		rule = pf_krule_alloc();
3365 		error = pf_rule_to_krule(&pr->rule, rule);
3366 		if (error != 0) {
3367 			pf_krule_free(rule);
3368 			break;
3369 		}
3370 
3371 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3372 
3373 		/* Frees rule on error */
3374 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3375 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3376 		    td->td_proc ? td->td_proc->p_pid : 0);
3377 		break;
3378 	}
3379 
3380 	case DIOCGETRULES: {
3381 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3382 
3383 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3384 
3385 		error = pf_ioctl_getrules(pr);
3386 
3387 		break;
3388 	}
3389 
3390 	case DIOCGETRULENV: {
3391 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3392 		nvlist_t		*nvrule = NULL;
3393 		nvlist_t		*nvl = NULL;
3394 		struct pf_kruleset	*ruleset;
3395 		struct pf_krule		*rule;
3396 		void			*nvlpacked = NULL;
3397 		int			 rs_num, nr;
3398 		bool			 clear_counter = false;
3399 
3400 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3401 
3402 		if (nv->len > pf_ioctl_maxcount)
3403 			ERROUT(ENOMEM);
3404 
3405 		/* Copy the request in */
3406 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3407 		if (nvlpacked == NULL)
3408 			ERROUT(ENOMEM);
3409 
3410 		error = copyin(nv->data, nvlpacked, nv->len);
3411 		if (error)
3412 			ERROUT(error);
3413 
3414 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3415 		if (nvl == NULL)
3416 			ERROUT(EBADMSG);
3417 
3418 		if (! nvlist_exists_string(nvl, "anchor"))
3419 			ERROUT(EBADMSG);
3420 		if (! nvlist_exists_number(nvl, "ruleset"))
3421 			ERROUT(EBADMSG);
3422 		if (! nvlist_exists_number(nvl, "ticket"))
3423 			ERROUT(EBADMSG);
3424 		if (! nvlist_exists_number(nvl, "nr"))
3425 			ERROUT(EBADMSG);
3426 
3427 		if (nvlist_exists_bool(nvl, "clear_counter"))
3428 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3429 
3430 		if (clear_counter && !(flags & FWRITE))
3431 			ERROUT(EACCES);
3432 
3433 		nr = nvlist_get_number(nvl, "nr");
3434 
3435 		PF_RULES_WLOCK();
3436 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3437 		if (ruleset == NULL) {
3438 			PF_RULES_WUNLOCK();
3439 			ERROUT(ENOENT);
3440 		}
3441 
3442 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3443 		if (rs_num >= PF_RULESET_MAX) {
3444 			PF_RULES_WUNLOCK();
3445 			ERROUT(EINVAL);
3446 		}
3447 
3448 		if (nvlist_get_number(nvl, "ticket") !=
3449 		    ruleset->rules[rs_num].active.ticket) {
3450 			PF_RULES_WUNLOCK();
3451 			ERROUT(EBUSY);
3452 		}
3453 
3454 		if ((error = nvlist_error(nvl))) {
3455 			PF_RULES_WUNLOCK();
3456 			ERROUT(error);
3457 		}
3458 
3459 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3460 		while ((rule != NULL) && (rule->nr != nr))
3461 			rule = TAILQ_NEXT(rule, entries);
3462 		if (rule == NULL) {
3463 			PF_RULES_WUNLOCK();
3464 			ERROUT(EBUSY);
3465 		}
3466 
3467 		nvrule = pf_krule_to_nvrule(rule);
3468 
3469 		nvlist_destroy(nvl);
3470 		nvl = nvlist_create(0);
3471 		if (nvl == NULL) {
3472 			PF_RULES_WUNLOCK();
3473 			ERROUT(ENOMEM);
3474 		}
3475 		nvlist_add_number(nvl, "nr", nr);
3476 		nvlist_add_nvlist(nvl, "rule", nvrule);
3477 		nvlist_destroy(nvrule);
3478 		nvrule = NULL;
3479 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3480 			PF_RULES_WUNLOCK();
3481 			ERROUT(EBUSY);
3482 		}
3483 
3484 		free(nvlpacked, M_NVLIST);
3485 		nvlpacked = nvlist_pack(nvl, &nv->len);
3486 		if (nvlpacked == NULL) {
3487 			PF_RULES_WUNLOCK();
3488 			ERROUT(ENOMEM);
3489 		}
3490 
3491 		if (nv->size == 0) {
3492 			PF_RULES_WUNLOCK();
3493 			ERROUT(0);
3494 		}
3495 		else if (nv->size < nv->len) {
3496 			PF_RULES_WUNLOCK();
3497 			ERROUT(ENOSPC);
3498 		}
3499 
3500 		if (clear_counter)
3501 			pf_krule_clear_counters(rule);
3502 
3503 		PF_RULES_WUNLOCK();
3504 
3505 		error = copyout(nvlpacked, nv->data, nv->len);
3506 
3507 #undef ERROUT
3508 DIOCGETRULENV_error:
3509 		free(nvlpacked, M_NVLIST);
3510 		nvlist_destroy(nvrule);
3511 		nvlist_destroy(nvl);
3512 
3513 		break;
3514 	}
3515 
3516 	case DIOCCHANGERULE: {
3517 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3518 		struct pf_kruleset	*ruleset;
3519 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3520 		struct pfi_kkif		*kif = NULL;
3521 		struct pf_kpooladdr	*pa;
3522 		u_int32_t		 nr = 0;
3523 		int			 rs_num;
3524 
3525 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3526 
3527 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3528 		    pcr->action > PF_CHANGE_GET_TICKET) {
3529 			error = EINVAL;
3530 			break;
3531 		}
3532 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3533 			error = EINVAL;
3534 			break;
3535 		}
3536 
3537 		if (pcr->action != PF_CHANGE_REMOVE) {
3538 			newrule = pf_krule_alloc();
3539 			error = pf_rule_to_krule(&pcr->rule, newrule);
3540 			if (error != 0) {
3541 				pf_krule_free(newrule);
3542 				break;
3543 			}
3544 
3545 			if (newrule->ifname[0])
3546 				kif = pf_kkif_create(M_WAITOK);
3547 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3548 			for (int i = 0; i < 2; i++) {
3549 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3550 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3551 			}
3552 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3553 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3554 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3555 			newrule->cuid = td->td_ucred->cr_ruid;
3556 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3557 			TAILQ_INIT(&newrule->rpool.list);
3558 		}
3559 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3560 
3561 		PF_CONFIG_LOCK();
3562 		PF_RULES_WLOCK();
3563 #ifdef PF_WANT_32_TO_64_COUNTER
3564 		if (newrule != NULL) {
3565 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3566 			newrule->allrulelinked = true;
3567 			V_pf_allrulecount++;
3568 		}
3569 #endif
3570 
3571 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3572 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3573 		    pcr->pool_ticket != V_ticket_pabuf)
3574 			ERROUT(EBUSY);
3575 
3576 		ruleset = pf_find_kruleset(pcr->anchor);
3577 		if (ruleset == NULL)
3578 			ERROUT(EINVAL);
3579 
3580 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3581 		if (rs_num >= PF_RULESET_MAX)
3582 			ERROUT(EINVAL);
3583 
3584 		/*
3585 		 * XXXMJG: there is no guarantee that the ruleset was
3586 		 * created by the usual route of calling DIOCXBEGIN.
3587 		 * As a result it is possible the rule tree will not
3588 		 * be allocated yet. Hack around it by doing it here.
3589 		 * Note it is fine to let the tree persist in case of
3590 		 * error as it will be freed down the road on future
3591 		 * updates (if need be).
3592 		 */
3593 		if (ruleset->rules[rs_num].active.tree == NULL) {
3594 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3595 			if (ruleset->rules[rs_num].active.tree == NULL) {
3596 				ERROUT(ENOMEM);
3597 			}
3598 		}
3599 
3600 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3601 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3602 			ERROUT(0);
3603 		} else if (pcr->ticket !=
3604 			    ruleset->rules[rs_num].active.ticket)
3605 				ERROUT(EINVAL);
3606 
3607 		if (pcr->action != PF_CHANGE_REMOVE) {
3608 			if (newrule->ifname[0]) {
3609 				newrule->kif = pfi_kkif_attach(kif,
3610 				    newrule->ifname);
3611 				kif = NULL;
3612 				pfi_kkif_ref(newrule->kif);
3613 			} else
3614 				newrule->kif = NULL;
3615 
3616 			if (newrule->rtableid > 0 &&
3617 			    newrule->rtableid >= rt_numfibs)
3618 				error = EBUSY;
3619 
3620 #ifdef ALTQ
3621 			/* set queue IDs */
3622 			if (newrule->qname[0] != 0) {
3623 				if ((newrule->qid =
3624 				    pf_qname2qid(newrule->qname)) == 0)
3625 					error = EBUSY;
3626 				else if (newrule->pqname[0] != 0) {
3627 					if ((newrule->pqid =
3628 					    pf_qname2qid(newrule->pqname)) == 0)
3629 						error = EBUSY;
3630 				} else
3631 					newrule->pqid = newrule->qid;
3632 			}
3633 #endif /* ALTQ */
3634 			if (newrule->tagname[0])
3635 				if ((newrule->tag =
3636 				    pf_tagname2tag(newrule->tagname)) == 0)
3637 					error = EBUSY;
3638 			if (newrule->match_tagname[0])
3639 				if ((newrule->match_tag = pf_tagname2tag(
3640 				    newrule->match_tagname)) == 0)
3641 					error = EBUSY;
3642 			if (newrule->rt && !newrule->direction)
3643 				error = EINVAL;
3644 			if (!newrule->log)
3645 				newrule->logif = 0;
3646 			if (newrule->logif >= PFLOGIFS_MAX)
3647 				error = EINVAL;
3648 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3649 				error = ENOMEM;
3650 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3651 				error = ENOMEM;
3652 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3653 				error = EINVAL;
3654 			TAILQ_FOREACH(pa, &V_pf_pabuf, entries)
3655 				if (pa->addr.type == PF_ADDR_TABLE) {
3656 					pa->addr.p.tbl =
3657 					    pfr_attach_table(ruleset,
3658 					    pa->addr.v.tblname);
3659 					if (pa->addr.p.tbl == NULL)
3660 						error = ENOMEM;
3661 				}
3662 
3663 			newrule->overload_tbl = NULL;
3664 			if (newrule->overload_tblname[0]) {
3665 				if ((newrule->overload_tbl = pfr_attach_table(
3666 				    ruleset, newrule->overload_tblname)) ==
3667 				    NULL)
3668 					error = EINVAL;
3669 				else
3670 					newrule->overload_tbl->pfrkt_flags |=
3671 					    PFR_TFLAG_ACTIVE;
3672 			}
3673 
3674 			pf_mv_kpool(&V_pf_pabuf, &newrule->rpool.list);
3675 			if (((((newrule->action == PF_NAT) ||
3676 			    (newrule->action == PF_RDR) ||
3677 			    (newrule->action == PF_BINAT) ||
3678 			    (newrule->rt > PF_NOPFROUTE)) &&
3679 			    !newrule->anchor)) &&
3680 			    (TAILQ_FIRST(&newrule->rpool.list) == NULL))
3681 				error = EINVAL;
3682 
3683 			if (error) {
3684 				pf_free_rule(newrule);
3685 				PF_RULES_WUNLOCK();
3686 				PF_CONFIG_UNLOCK();
3687 				break;
3688 			}
3689 
3690 			newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
3691 		}
3692 		pf_empty_kpool(&V_pf_pabuf);
3693 
3694 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3695 			oldrule = TAILQ_FIRST(
3696 			    ruleset->rules[rs_num].active.ptr);
3697 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3698 			oldrule = TAILQ_LAST(
3699 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3700 		else {
3701 			oldrule = TAILQ_FIRST(
3702 			    ruleset->rules[rs_num].active.ptr);
3703 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3704 				oldrule = TAILQ_NEXT(oldrule, entries);
3705 			if (oldrule == NULL) {
3706 				if (newrule != NULL)
3707 					pf_free_rule(newrule);
3708 				PF_RULES_WUNLOCK();
3709 				PF_CONFIG_UNLOCK();
3710 				error = EINVAL;
3711 				break;
3712 			}
3713 		}
3714 
3715 		if (pcr->action == PF_CHANGE_REMOVE) {
3716 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3717 			    oldrule);
3718 			RB_REMOVE(pf_krule_global,
3719 			    ruleset->rules[rs_num].active.tree, oldrule);
3720 			ruleset->rules[rs_num].active.rcount--;
3721 		} else {
3722 			pf_hash_rule(newrule);
3723 			if (RB_INSERT(pf_krule_global,
3724 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3725 				pf_free_rule(newrule);
3726 				PF_RULES_WUNLOCK();
3727 				PF_CONFIG_UNLOCK();
3728 				error = EEXIST;
3729 				break;
3730 			}
3731 
3732 			if (oldrule == NULL)
3733 				TAILQ_INSERT_TAIL(
3734 				    ruleset->rules[rs_num].active.ptr,
3735 				    newrule, entries);
3736 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3737 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3738 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3739 			else
3740 				TAILQ_INSERT_AFTER(
3741 				    ruleset->rules[rs_num].active.ptr,
3742 				    oldrule, newrule, entries);
3743 			ruleset->rules[rs_num].active.rcount++;
3744 		}
3745 
3746 		nr = 0;
3747 		TAILQ_FOREACH(oldrule,
3748 		    ruleset->rules[rs_num].active.ptr, entries)
3749 			oldrule->nr = nr++;
3750 
3751 		ruleset->rules[rs_num].active.ticket++;
3752 
3753 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3754 		pf_remove_if_empty_kruleset(ruleset);
3755 
3756 		PF_RULES_WUNLOCK();
3757 		PF_CONFIG_UNLOCK();
3758 		break;
3759 
3760 #undef ERROUT
3761 DIOCCHANGERULE_error:
3762 		PF_RULES_WUNLOCK();
3763 		PF_CONFIG_UNLOCK();
3764 		pf_krule_free(newrule);
3765 		pf_kkif_free(kif);
3766 		break;
3767 	}
3768 
3769 	case DIOCCLRSTATESNV: {
3770 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3771 		break;
3772 	}
3773 
3774 	case DIOCKILLSTATESNV: {
3775 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3776 		break;
3777 	}
3778 
3779 	case DIOCADDSTATE: {
3780 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3781 		struct pfsync_state_1301	*sp = &ps->state;
3782 
3783 		if (sp->timeout >= PFTM_MAX) {
3784 			error = EINVAL;
3785 			break;
3786 		}
3787 		if (V_pfsync_state_import_ptr != NULL) {
3788 			PF_RULES_RLOCK();
3789 			error = V_pfsync_state_import_ptr(
3790 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3791 			    PFSYNC_MSG_VERSION_1301);
3792 			PF_RULES_RUNLOCK();
3793 		} else
3794 			error = EOPNOTSUPP;
3795 		break;
3796 	}
3797 
3798 	case DIOCGETSTATE: {
3799 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3800 		struct pf_kstate	*s;
3801 
3802 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3803 		if (s == NULL) {
3804 			error = ENOENT;
3805 			break;
3806 		}
3807 
3808 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3809 		    s, PFSYNC_MSG_VERSION_1301);
3810 		PF_STATE_UNLOCK(s);
3811 		break;
3812 	}
3813 
3814 	case DIOCGETSTATENV: {
3815 		error = pf_getstate((struct pfioc_nv *)addr);
3816 		break;
3817 	}
3818 
3819 #ifdef COMPAT_FREEBSD14
3820 	case DIOCGETSTATES: {
3821 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3822 		struct pf_kstate	*s;
3823 		struct pfsync_state_1301	*pstore, *p;
3824 		int			 i, nr;
3825 		size_t			 slice_count = 16, count;
3826 		void			*out;
3827 
3828 		if (ps->ps_len <= 0) {
3829 			nr = uma_zone_get_cur(V_pf_state_z);
3830 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3831 			break;
3832 		}
3833 
3834 		out = ps->ps_states;
3835 		pstore = mallocarray(slice_count,
3836 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3837 		nr = 0;
3838 
3839 		for (i = 0; i <= V_pf_hashmask; i++) {
3840 			struct pf_idhash *ih = &V_pf_idhash[i];
3841 
3842 DIOCGETSTATES_retry:
3843 			p = pstore;
3844 
3845 			if (LIST_EMPTY(&ih->states))
3846 				continue;
3847 
3848 			PF_HASHROW_LOCK(ih);
3849 			count = 0;
3850 			LIST_FOREACH(s, &ih->states, entry) {
3851 				if (s->timeout == PFTM_UNLINKED)
3852 					continue;
3853 				count++;
3854 			}
3855 
3856 			if (count > slice_count) {
3857 				PF_HASHROW_UNLOCK(ih);
3858 				free(pstore, M_TEMP);
3859 				slice_count = count * 2;
3860 				pstore = mallocarray(slice_count,
3861 				    sizeof(struct pfsync_state_1301), M_TEMP,
3862 				    M_WAITOK | M_ZERO);
3863 				goto DIOCGETSTATES_retry;
3864 			}
3865 
3866 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3867 				PF_HASHROW_UNLOCK(ih);
3868 				goto DIOCGETSTATES_full;
3869 			}
3870 
3871 			LIST_FOREACH(s, &ih->states, entry) {
3872 				if (s->timeout == PFTM_UNLINKED)
3873 					continue;
3874 
3875 				pfsync_state_export((union pfsync_state_union*)p,
3876 				    s, PFSYNC_MSG_VERSION_1301);
3877 				p++;
3878 				nr++;
3879 			}
3880 			PF_HASHROW_UNLOCK(ih);
3881 			error = copyout(pstore, out,
3882 			    sizeof(struct pfsync_state_1301) * count);
3883 			if (error)
3884 				break;
3885 			out = ps->ps_states + nr;
3886 		}
3887 DIOCGETSTATES_full:
3888 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3889 		free(pstore, M_TEMP);
3890 
3891 		break;
3892 	}
3893 
3894 	case DIOCGETSTATESV2: {
3895 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3896 		struct pf_kstate	*s;
3897 		struct pf_state_export	*pstore, *p;
3898 		int i, nr;
3899 		size_t slice_count = 16, count;
3900 		void *out;
3901 
3902 		if (ps->ps_req_version > PF_STATE_VERSION) {
3903 			error = ENOTSUP;
3904 			break;
3905 		}
3906 
3907 		if (ps->ps_len <= 0) {
3908 			nr = uma_zone_get_cur(V_pf_state_z);
3909 			ps->ps_len = sizeof(struct pf_state_export) * nr;
3910 			break;
3911 		}
3912 
3913 		out = ps->ps_states;
3914 		pstore = mallocarray(slice_count,
3915 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
3916 		nr = 0;
3917 
3918 		for (i = 0; i <= V_pf_hashmask; i++) {
3919 			struct pf_idhash *ih = &V_pf_idhash[i];
3920 
3921 DIOCGETSTATESV2_retry:
3922 			p = pstore;
3923 
3924 			if (LIST_EMPTY(&ih->states))
3925 				continue;
3926 
3927 			PF_HASHROW_LOCK(ih);
3928 			count = 0;
3929 			LIST_FOREACH(s, &ih->states, entry) {
3930 				if (s->timeout == PFTM_UNLINKED)
3931 					continue;
3932 				count++;
3933 			}
3934 
3935 			if (count > slice_count) {
3936 				PF_HASHROW_UNLOCK(ih);
3937 				free(pstore, M_TEMP);
3938 				slice_count = count * 2;
3939 				pstore = mallocarray(slice_count,
3940 				    sizeof(struct pf_state_export), M_TEMP,
3941 				    M_WAITOK | M_ZERO);
3942 				goto DIOCGETSTATESV2_retry;
3943 			}
3944 
3945 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3946 				PF_HASHROW_UNLOCK(ih);
3947 				goto DIOCGETSTATESV2_full;
3948 			}
3949 
3950 			LIST_FOREACH(s, &ih->states, entry) {
3951 				if (s->timeout == PFTM_UNLINKED)
3952 					continue;
3953 
3954 				pf_state_export(p, s);
3955 				p++;
3956 				nr++;
3957 			}
3958 			PF_HASHROW_UNLOCK(ih);
3959 			error = copyout(pstore, out,
3960 			    sizeof(struct pf_state_export) * count);
3961 			if (error)
3962 				break;
3963 			out = ps->ps_states + nr;
3964 		}
3965 DIOCGETSTATESV2_full:
3966 		ps->ps_len = nr * sizeof(struct pf_state_export);
3967 		free(pstore, M_TEMP);
3968 
3969 		break;
3970 	}
3971 #endif
3972 	case DIOCGETSTATUSNV: {
3973 		error = pf_getstatus((struct pfioc_nv *)addr);
3974 		break;
3975 	}
3976 
3977 	case DIOCSETSTATUSIF: {
3978 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
3979 
3980 		if (pi->ifname[0] == 0) {
3981 			bzero(V_pf_status.ifname, IFNAMSIZ);
3982 			break;
3983 		}
3984 		PF_RULES_WLOCK();
3985 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
3986 		PF_RULES_WUNLOCK();
3987 		break;
3988 	}
3989 
3990 	case DIOCCLRSTATUS: {
3991 		pf_ioctl_clear_status();
3992 		break;
3993 	}
3994 
3995 	case DIOCNATLOOK: {
3996 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
3997 		struct pf_state_key	*sk;
3998 		struct pf_kstate	*state;
3999 		struct pf_state_key_cmp	 key;
4000 		int			 m = 0, direction = pnl->direction;
4001 		int			 sidx, didx;
4002 
4003 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4004 		sidx = (direction == PF_IN) ? 1 : 0;
4005 		didx = (direction == PF_IN) ? 0 : 1;
4006 
4007 		if (!pnl->proto ||
4008 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4009 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4010 		    ((pnl->proto == IPPROTO_TCP ||
4011 		    pnl->proto == IPPROTO_UDP) &&
4012 		    (!pnl->dport || !pnl->sport)))
4013 			error = EINVAL;
4014 		else {
4015 			bzero(&key, sizeof(key));
4016 			key.af = pnl->af;
4017 			key.proto = pnl->proto;
4018 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4019 			key.port[sidx] = pnl->sport;
4020 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4021 			key.port[didx] = pnl->dport;
4022 
4023 			state = pf_find_state_all(&key, direction, &m);
4024 			if (state == NULL) {
4025 				error = ENOENT;
4026 			} else {
4027 				if (m > 1) {
4028 					PF_STATE_UNLOCK(state);
4029 					error = E2BIG;	/* more than one state */
4030 				} else {
4031 					sk = state->key[sidx];
4032 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4033 					pnl->rsport = sk->port[sidx];
4034 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4035 					pnl->rdport = sk->port[didx];
4036 					PF_STATE_UNLOCK(state);
4037 				}
4038 			}
4039 		}
4040 		break;
4041 	}
4042 
4043 	case DIOCSETTIMEOUT: {
4044 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4045 
4046 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4047 		    &pt->seconds);
4048 		break;
4049 	}
4050 
4051 	case DIOCGETTIMEOUT: {
4052 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4053 
4054 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4055 		break;
4056 	}
4057 
4058 	case DIOCGETLIMIT: {
4059 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4060 
4061 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4062 		break;
4063 	}
4064 
4065 	case DIOCSETLIMIT: {
4066 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4067 		unsigned int old_limit;
4068 
4069 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4070 		pl->limit = old_limit;
4071 		break;
4072 	}
4073 
4074 	case DIOCSETDEBUG: {
4075 		u_int32_t	*level = (u_int32_t *)addr;
4076 
4077 		PF_RULES_WLOCK();
4078 		V_pf_status.debug = *level;
4079 		PF_RULES_WUNLOCK();
4080 		break;
4081 	}
4082 
4083 	case DIOCCLRRULECTRS: {
4084 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4085 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4086 		struct pf_krule		*rule;
4087 
4088 		PF_RULES_WLOCK();
4089 		TAILQ_FOREACH(rule,
4090 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4091 			pf_counter_u64_zero(&rule->evaluations);
4092 			for (int i = 0; i < 2; i++) {
4093 				pf_counter_u64_zero(&rule->packets[i]);
4094 				pf_counter_u64_zero(&rule->bytes[i]);
4095 			}
4096 		}
4097 		PF_RULES_WUNLOCK();
4098 		break;
4099 	}
4100 
4101 	case DIOCGIFSPEEDV0:
4102 	case DIOCGIFSPEEDV1: {
4103 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4104 		struct pf_ifspeed_v1	ps;
4105 		struct ifnet		*ifp;
4106 
4107 		if (psp->ifname[0] == '\0') {
4108 			error = EINVAL;
4109 			break;
4110 		}
4111 
4112 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4113 		if (error != 0)
4114 			break;
4115 		ifp = ifunit(ps.ifname);
4116 		if (ifp != NULL) {
4117 			psp->baudrate32 =
4118 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4119 			if (cmd == DIOCGIFSPEEDV1)
4120 				psp->baudrate = ifp->if_baudrate;
4121 		} else {
4122 			error = EINVAL;
4123 		}
4124 		break;
4125 	}
4126 
4127 #ifdef ALTQ
4128 	case DIOCSTARTALTQ: {
4129 		struct pf_altq		*altq;
4130 
4131 		PF_RULES_WLOCK();
4132 		/* enable all altq interfaces on active list */
4133 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4134 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4135 				error = pf_enable_altq(altq);
4136 				if (error != 0)
4137 					break;
4138 			}
4139 		}
4140 		if (error == 0)
4141 			V_pf_altq_running = 1;
4142 		PF_RULES_WUNLOCK();
4143 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4144 		break;
4145 	}
4146 
4147 	case DIOCSTOPALTQ: {
4148 		struct pf_altq		*altq;
4149 
4150 		PF_RULES_WLOCK();
4151 		/* disable all altq interfaces on active list */
4152 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4153 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4154 				error = pf_disable_altq(altq);
4155 				if (error != 0)
4156 					break;
4157 			}
4158 		}
4159 		if (error == 0)
4160 			V_pf_altq_running = 0;
4161 		PF_RULES_WUNLOCK();
4162 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4163 		break;
4164 	}
4165 
4166 	case DIOCADDALTQV0:
4167 	case DIOCADDALTQV1: {
4168 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4169 		struct pf_altq		*altq, *a;
4170 		struct ifnet		*ifp;
4171 
4172 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4173 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4174 		if (error)
4175 			break;
4176 		altq->local_flags = 0;
4177 
4178 		PF_RULES_WLOCK();
4179 		if (pa->ticket != V_ticket_altqs_inactive) {
4180 			PF_RULES_WUNLOCK();
4181 			free(altq, M_PFALTQ);
4182 			error = EBUSY;
4183 			break;
4184 		}
4185 
4186 		/*
4187 		 * if this is for a queue, find the discipline and
4188 		 * copy the necessary fields
4189 		 */
4190 		if (altq->qname[0] != 0) {
4191 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4192 				PF_RULES_WUNLOCK();
4193 				error = EBUSY;
4194 				free(altq, M_PFALTQ);
4195 				break;
4196 			}
4197 			altq->altq_disc = NULL;
4198 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4199 				if (strncmp(a->ifname, altq->ifname,
4200 				    IFNAMSIZ) == 0) {
4201 					altq->altq_disc = a->altq_disc;
4202 					break;
4203 				}
4204 			}
4205 		}
4206 
4207 		if ((ifp = ifunit(altq->ifname)) == NULL)
4208 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4209 		else
4210 			error = altq_add(ifp, altq);
4211 
4212 		if (error) {
4213 			PF_RULES_WUNLOCK();
4214 			free(altq, M_PFALTQ);
4215 			break;
4216 		}
4217 
4218 		if (altq->qname[0] != 0)
4219 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4220 		else
4221 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4222 		/* version error check done on import above */
4223 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4224 		PF_RULES_WUNLOCK();
4225 		break;
4226 	}
4227 
4228 	case DIOCGETALTQSV0:
4229 	case DIOCGETALTQSV1: {
4230 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4231 		struct pf_altq		*altq;
4232 
4233 		PF_RULES_RLOCK();
4234 		pa->nr = 0;
4235 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4236 			pa->nr++;
4237 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4238 			pa->nr++;
4239 		pa->ticket = V_ticket_altqs_active;
4240 		PF_RULES_RUNLOCK();
4241 		break;
4242 	}
4243 
4244 	case DIOCGETALTQV0:
4245 	case DIOCGETALTQV1: {
4246 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4247 		struct pf_altq		*altq;
4248 
4249 		PF_RULES_RLOCK();
4250 		if (pa->ticket != V_ticket_altqs_active) {
4251 			PF_RULES_RUNLOCK();
4252 			error = EBUSY;
4253 			break;
4254 		}
4255 		altq = pf_altq_get_nth_active(pa->nr);
4256 		if (altq == NULL) {
4257 			PF_RULES_RUNLOCK();
4258 			error = EBUSY;
4259 			break;
4260 		}
4261 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4262 		PF_RULES_RUNLOCK();
4263 		break;
4264 	}
4265 
4266 	case DIOCCHANGEALTQV0:
4267 	case DIOCCHANGEALTQV1:
4268 		/* CHANGEALTQ not supported yet! */
4269 		error = ENODEV;
4270 		break;
4271 
4272 	case DIOCGETQSTATSV0:
4273 	case DIOCGETQSTATSV1: {
4274 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4275 		struct pf_altq		*altq;
4276 		int			 nbytes;
4277 		u_int32_t		 version;
4278 
4279 		PF_RULES_RLOCK();
4280 		if (pq->ticket != V_ticket_altqs_active) {
4281 			PF_RULES_RUNLOCK();
4282 			error = EBUSY;
4283 			break;
4284 		}
4285 		nbytes = pq->nbytes;
4286 		altq = pf_altq_get_nth_active(pq->nr);
4287 		if (altq == NULL) {
4288 			PF_RULES_RUNLOCK();
4289 			error = EBUSY;
4290 			break;
4291 		}
4292 
4293 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4294 			PF_RULES_RUNLOCK();
4295 			error = ENXIO;
4296 			break;
4297 		}
4298 		PF_RULES_RUNLOCK();
4299 		if (cmd == DIOCGETQSTATSV0)
4300 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4301 		else
4302 			version = pq->version;
4303 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4304 		if (error == 0) {
4305 			pq->scheduler = altq->scheduler;
4306 			pq->nbytes = nbytes;
4307 		}
4308 		break;
4309 	}
4310 #endif /* ALTQ */
4311 
4312 	case DIOCBEGINADDRS: {
4313 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4314 
4315 		error = pf_ioctl_begin_addrs(&pp->ticket);
4316 		break;
4317 	}
4318 
4319 	case DIOCADDADDR: {
4320 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4321 
4322 		error = pf_ioctl_add_addr(pp);
4323 		break;
4324 	}
4325 
4326 	case DIOCGETADDRS: {
4327 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4328 
4329 		error = pf_ioctl_get_addrs(pp);
4330 		break;
4331 	}
4332 
4333 	case DIOCGETADDR: {
4334 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4335 
4336 		error = pf_ioctl_get_addr(pp);
4337 		break;
4338 	}
4339 
4340 	case DIOCCHANGEADDR: {
4341 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4342 		struct pf_kpool		*pool;
4343 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4344 		struct pf_kruleset	*ruleset;
4345 		struct pfi_kkif		*kif = NULL;
4346 
4347 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4348 
4349 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4350 		    pca->action > PF_CHANGE_REMOVE) {
4351 			error = EINVAL;
4352 			break;
4353 		}
4354 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4355 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4356 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4357 			error = EINVAL;
4358 			break;
4359 		}
4360 		if (pca->addr.addr.p.dyn != NULL) {
4361 			error = EINVAL;
4362 			break;
4363 		}
4364 
4365 		if (pca->action != PF_CHANGE_REMOVE) {
4366 #ifndef INET
4367 			if (pca->af == AF_INET) {
4368 				error = EAFNOSUPPORT;
4369 				break;
4370 			}
4371 #endif /* INET */
4372 #ifndef INET6
4373 			if (pca->af == AF_INET6) {
4374 				error = EAFNOSUPPORT;
4375 				break;
4376 			}
4377 #endif /* INET6 */
4378 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4379 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4380 			if (newpa->ifname[0])
4381 				kif = pf_kkif_create(M_WAITOK);
4382 			newpa->kif = NULL;
4383 		}
4384 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4385 		PF_RULES_WLOCK();
4386 		ruleset = pf_find_kruleset(pca->anchor);
4387 		if (ruleset == NULL)
4388 			ERROUT(EBUSY);
4389 
4390 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4391 		    pca->r_num, pca->r_last, 1, 1);
4392 		if (pool == NULL)
4393 			ERROUT(EBUSY);
4394 
4395 		if (pca->action != PF_CHANGE_REMOVE) {
4396 			if (newpa->ifname[0]) {
4397 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4398 				pfi_kkif_ref(newpa->kif);
4399 				kif = NULL;
4400 			}
4401 
4402 			switch (newpa->addr.type) {
4403 			case PF_ADDR_DYNIFTL:
4404 				error = pfi_dynaddr_setup(&newpa->addr,
4405 				    pca->af);
4406 				break;
4407 			case PF_ADDR_TABLE:
4408 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4409 				    newpa->addr.v.tblname);
4410 				if (newpa->addr.p.tbl == NULL)
4411 					error = ENOMEM;
4412 				break;
4413 			}
4414 			if (error)
4415 				goto DIOCCHANGEADDR_error;
4416 		}
4417 
4418 		switch (pca->action) {
4419 		case PF_CHANGE_ADD_HEAD:
4420 			oldpa = TAILQ_FIRST(&pool->list);
4421 			break;
4422 		case PF_CHANGE_ADD_TAIL:
4423 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4424 			break;
4425 		default:
4426 			oldpa = TAILQ_FIRST(&pool->list);
4427 			for (int i = 0; oldpa && i < pca->nr; i++)
4428 				oldpa = TAILQ_NEXT(oldpa, entries);
4429 
4430 			if (oldpa == NULL)
4431 				ERROUT(EINVAL);
4432 		}
4433 
4434 		if (pca->action == PF_CHANGE_REMOVE) {
4435 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4436 			switch (oldpa->addr.type) {
4437 			case PF_ADDR_DYNIFTL:
4438 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4439 				break;
4440 			case PF_ADDR_TABLE:
4441 				pfr_detach_table(oldpa->addr.p.tbl);
4442 				break;
4443 			}
4444 			if (oldpa->kif)
4445 				pfi_kkif_unref(oldpa->kif);
4446 			free(oldpa, M_PFRULE);
4447 		} else {
4448 			if (oldpa == NULL)
4449 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4450 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4451 			    pca->action == PF_CHANGE_ADD_BEFORE)
4452 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4453 			else
4454 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4455 				    newpa, entries);
4456 		}
4457 
4458 		pool->cur = TAILQ_FIRST(&pool->list);
4459 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4460 		PF_RULES_WUNLOCK();
4461 		break;
4462 
4463 #undef ERROUT
4464 DIOCCHANGEADDR_error:
4465 		if (newpa != NULL) {
4466 			if (newpa->kif)
4467 				pfi_kkif_unref(newpa->kif);
4468 			free(newpa, M_PFRULE);
4469 		}
4470 		PF_RULES_WUNLOCK();
4471 		pf_kkif_free(kif);
4472 		break;
4473 	}
4474 
4475 	case DIOCGETRULESETS: {
4476 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4477 		struct pf_kruleset	*ruleset;
4478 		struct pf_kanchor	*anchor;
4479 
4480 		pr->path[sizeof(pr->path) - 1] = 0;
4481 
4482 		PF_RULES_RLOCK();
4483 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4484 			PF_RULES_RUNLOCK();
4485 			error = ENOENT;
4486 			break;
4487 		}
4488 		pr->nr = 0;
4489 		if (ruleset->anchor == NULL) {
4490 			/* XXX kludge for pf_main_ruleset */
4491 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4492 				if (anchor->parent == NULL)
4493 					pr->nr++;
4494 		} else {
4495 			RB_FOREACH(anchor, pf_kanchor_node,
4496 			    &ruleset->anchor->children)
4497 				pr->nr++;
4498 		}
4499 		PF_RULES_RUNLOCK();
4500 		break;
4501 	}
4502 
4503 	case DIOCGETRULESET: {
4504 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4505 		struct pf_kruleset	*ruleset;
4506 		struct pf_kanchor	*anchor;
4507 		u_int32_t		 nr = 0;
4508 
4509 		pr->path[sizeof(pr->path) - 1] = 0;
4510 
4511 		PF_RULES_RLOCK();
4512 		if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
4513 			PF_RULES_RUNLOCK();
4514 			error = ENOENT;
4515 			break;
4516 		}
4517 		pr->name[0] = 0;
4518 		if (ruleset->anchor == NULL) {
4519 			/* XXX kludge for pf_main_ruleset */
4520 			RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
4521 				if (anchor->parent == NULL && nr++ == pr->nr) {
4522 					strlcpy(pr->name, anchor->name,
4523 					    sizeof(pr->name));
4524 					break;
4525 				}
4526 		} else {
4527 			RB_FOREACH(anchor, pf_kanchor_node,
4528 			    &ruleset->anchor->children)
4529 				if (nr++ == pr->nr) {
4530 					strlcpy(pr->name, anchor->name,
4531 					    sizeof(pr->name));
4532 					break;
4533 				}
4534 		}
4535 		if (!pr->name[0])
4536 			error = EBUSY;
4537 		PF_RULES_RUNLOCK();
4538 		break;
4539 	}
4540 
4541 	case DIOCRCLRTABLES: {
4542 		struct pfioc_table *io = (struct pfioc_table *)addr;
4543 
4544 		if (io->pfrio_esize != 0) {
4545 			error = ENODEV;
4546 			break;
4547 		}
4548 		PF_RULES_WLOCK();
4549 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4550 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4551 		PF_RULES_WUNLOCK();
4552 		break;
4553 	}
4554 
4555 	case DIOCRADDTABLES: {
4556 		struct pfioc_table *io = (struct pfioc_table *)addr;
4557 		struct pfr_table *pfrts;
4558 		size_t totlen;
4559 
4560 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4561 			error = ENODEV;
4562 			break;
4563 		}
4564 
4565 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4566 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4567 			error = ENOMEM;
4568 			break;
4569 		}
4570 
4571 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4572 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4573 		    M_TEMP, M_WAITOK);
4574 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4575 		if (error) {
4576 			free(pfrts, M_TEMP);
4577 			break;
4578 		}
4579 		PF_RULES_WLOCK();
4580 		error = pfr_add_tables(pfrts, io->pfrio_size,
4581 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4582 		PF_RULES_WUNLOCK();
4583 		free(pfrts, M_TEMP);
4584 		break;
4585 	}
4586 
4587 	case DIOCRDELTABLES: {
4588 		struct pfioc_table *io = (struct pfioc_table *)addr;
4589 		struct pfr_table *pfrts;
4590 		size_t totlen;
4591 
4592 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4593 			error = ENODEV;
4594 			break;
4595 		}
4596 
4597 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4598 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4599 			error = ENOMEM;
4600 			break;
4601 		}
4602 
4603 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4604 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4605 		    M_TEMP, M_WAITOK);
4606 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4607 		if (error) {
4608 			free(pfrts, M_TEMP);
4609 			break;
4610 		}
4611 		PF_RULES_WLOCK();
4612 		error = pfr_del_tables(pfrts, io->pfrio_size,
4613 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4614 		PF_RULES_WUNLOCK();
4615 		free(pfrts, M_TEMP);
4616 		break;
4617 	}
4618 
4619 	case DIOCRGETTABLES: {
4620 		struct pfioc_table *io = (struct pfioc_table *)addr;
4621 		struct pfr_table *pfrts;
4622 		size_t totlen;
4623 		int n;
4624 
4625 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4626 			error = ENODEV;
4627 			break;
4628 		}
4629 		PF_RULES_RLOCK();
4630 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4631 		if (n < 0) {
4632 			PF_RULES_RUNLOCK();
4633 			error = EINVAL;
4634 			break;
4635 		}
4636 		io->pfrio_size = min(io->pfrio_size, n);
4637 
4638 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4639 
4640 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4641 		    M_TEMP, M_NOWAIT | M_ZERO);
4642 		if (pfrts == NULL) {
4643 			error = ENOMEM;
4644 			PF_RULES_RUNLOCK();
4645 			break;
4646 		}
4647 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4648 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4649 		PF_RULES_RUNLOCK();
4650 		if (error == 0)
4651 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4652 		free(pfrts, M_TEMP);
4653 		break;
4654 	}
4655 
4656 	case DIOCRGETTSTATS: {
4657 		struct pfioc_table *io = (struct pfioc_table *)addr;
4658 		struct pfr_tstats *pfrtstats;
4659 		size_t totlen;
4660 		int n;
4661 
4662 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4663 			error = ENODEV;
4664 			break;
4665 		}
4666 		PF_TABLE_STATS_LOCK();
4667 		PF_RULES_RLOCK();
4668 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4669 		if (n < 0) {
4670 			PF_RULES_RUNLOCK();
4671 			PF_TABLE_STATS_UNLOCK();
4672 			error = EINVAL;
4673 			break;
4674 		}
4675 		io->pfrio_size = min(io->pfrio_size, n);
4676 
4677 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4678 		pfrtstats = mallocarray(io->pfrio_size,
4679 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4680 		if (pfrtstats == NULL) {
4681 			error = ENOMEM;
4682 			PF_RULES_RUNLOCK();
4683 			PF_TABLE_STATS_UNLOCK();
4684 			break;
4685 		}
4686 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4687 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4688 		PF_RULES_RUNLOCK();
4689 		PF_TABLE_STATS_UNLOCK();
4690 		if (error == 0)
4691 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4692 		free(pfrtstats, M_TEMP);
4693 		break;
4694 	}
4695 
4696 	case DIOCRCLRTSTATS: {
4697 		struct pfioc_table *io = (struct pfioc_table *)addr;
4698 		struct pfr_table *pfrts;
4699 		size_t totlen;
4700 
4701 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4702 			error = ENODEV;
4703 			break;
4704 		}
4705 
4706 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4707 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4708 			/* We used to count tables and use the minimum required
4709 			 * size, so we didn't fail on overly large requests.
4710 			 * Keep doing so. */
4711 			io->pfrio_size = pf_ioctl_maxcount;
4712 			break;
4713 		}
4714 
4715 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4716 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4717 		    M_TEMP, M_WAITOK);
4718 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4719 		if (error) {
4720 			free(pfrts, M_TEMP);
4721 			break;
4722 		}
4723 
4724 		PF_TABLE_STATS_LOCK();
4725 		PF_RULES_RLOCK();
4726 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4727 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4728 		PF_RULES_RUNLOCK();
4729 		PF_TABLE_STATS_UNLOCK();
4730 		free(pfrts, M_TEMP);
4731 		break;
4732 	}
4733 
4734 	case DIOCRSETTFLAGS: {
4735 		struct pfioc_table *io = (struct pfioc_table *)addr;
4736 		struct pfr_table *pfrts;
4737 		size_t totlen;
4738 		int n;
4739 
4740 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4741 			error = ENODEV;
4742 			break;
4743 		}
4744 
4745 		PF_RULES_RLOCK();
4746 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4747 		if (n < 0) {
4748 			PF_RULES_RUNLOCK();
4749 			error = EINVAL;
4750 			break;
4751 		}
4752 
4753 		io->pfrio_size = min(io->pfrio_size, n);
4754 		PF_RULES_RUNLOCK();
4755 
4756 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4757 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4758 		    M_TEMP, M_WAITOK);
4759 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4760 		if (error) {
4761 			free(pfrts, M_TEMP);
4762 			break;
4763 		}
4764 		PF_RULES_WLOCK();
4765 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4766 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4767 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4768 		PF_RULES_WUNLOCK();
4769 		free(pfrts, M_TEMP);
4770 		break;
4771 	}
4772 
4773 	case DIOCRCLRADDRS: {
4774 		struct pfioc_table *io = (struct pfioc_table *)addr;
4775 
4776 		if (io->pfrio_esize != 0) {
4777 			error = ENODEV;
4778 			break;
4779 		}
4780 		PF_RULES_WLOCK();
4781 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4782 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4783 		PF_RULES_WUNLOCK();
4784 		break;
4785 	}
4786 
4787 	case DIOCRADDADDRS: {
4788 		struct pfioc_table *io = (struct pfioc_table *)addr;
4789 		struct pfr_addr *pfras;
4790 		size_t totlen;
4791 
4792 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4793 			error = ENODEV;
4794 			break;
4795 		}
4796 		if (io->pfrio_size < 0 ||
4797 		    io->pfrio_size > pf_ioctl_maxcount ||
4798 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4799 			error = EINVAL;
4800 			break;
4801 		}
4802 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4803 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4804 		    M_TEMP, M_WAITOK);
4805 		error = copyin(io->pfrio_buffer, pfras, totlen);
4806 		if (error) {
4807 			free(pfras, M_TEMP);
4808 			break;
4809 		}
4810 		PF_RULES_WLOCK();
4811 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4812 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4813 		    PFR_FLAG_USERIOCTL);
4814 		PF_RULES_WUNLOCK();
4815 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4816 			error = copyout(pfras, io->pfrio_buffer, totlen);
4817 		free(pfras, M_TEMP);
4818 		break;
4819 	}
4820 
4821 	case DIOCRDELADDRS: {
4822 		struct pfioc_table *io = (struct pfioc_table *)addr;
4823 		struct pfr_addr *pfras;
4824 		size_t totlen;
4825 
4826 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4827 			error = ENODEV;
4828 			break;
4829 		}
4830 		if (io->pfrio_size < 0 ||
4831 		    io->pfrio_size > pf_ioctl_maxcount ||
4832 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4833 			error = EINVAL;
4834 			break;
4835 		}
4836 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4837 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4838 		    M_TEMP, M_WAITOK);
4839 		error = copyin(io->pfrio_buffer, pfras, totlen);
4840 		if (error) {
4841 			free(pfras, M_TEMP);
4842 			break;
4843 		}
4844 		PF_RULES_WLOCK();
4845 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4846 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4847 		    PFR_FLAG_USERIOCTL);
4848 		PF_RULES_WUNLOCK();
4849 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4850 			error = copyout(pfras, io->pfrio_buffer, totlen);
4851 		free(pfras, M_TEMP);
4852 		break;
4853 	}
4854 
4855 	case DIOCRSETADDRS: {
4856 		struct pfioc_table *io = (struct pfioc_table *)addr;
4857 		struct pfr_addr *pfras;
4858 		size_t totlen, count;
4859 
4860 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4861 			error = ENODEV;
4862 			break;
4863 		}
4864 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4865 			error = EINVAL;
4866 			break;
4867 		}
4868 		count = max(io->pfrio_size, io->pfrio_size2);
4869 		if (count > pf_ioctl_maxcount ||
4870 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4871 			error = EINVAL;
4872 			break;
4873 		}
4874 		totlen = count * sizeof(struct pfr_addr);
4875 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4876 		    M_WAITOK);
4877 		error = copyin(io->pfrio_buffer, pfras, totlen);
4878 		if (error) {
4879 			free(pfras, M_TEMP);
4880 			break;
4881 		}
4882 		PF_RULES_WLOCK();
4883 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4884 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4885 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4886 		    PFR_FLAG_USERIOCTL, 0);
4887 		PF_RULES_WUNLOCK();
4888 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4889 			error = copyout(pfras, io->pfrio_buffer, totlen);
4890 		free(pfras, M_TEMP);
4891 		break;
4892 	}
4893 
4894 	case DIOCRGETADDRS: {
4895 		struct pfioc_table *io = (struct pfioc_table *)addr;
4896 		struct pfr_addr *pfras;
4897 		size_t totlen;
4898 
4899 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4900 			error = ENODEV;
4901 			break;
4902 		}
4903 		if (io->pfrio_size < 0 ||
4904 		    io->pfrio_size > pf_ioctl_maxcount ||
4905 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4906 			error = EINVAL;
4907 			break;
4908 		}
4909 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4910 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4911 		    M_TEMP, M_WAITOK | M_ZERO);
4912 		PF_RULES_RLOCK();
4913 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4914 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4915 		PF_RULES_RUNLOCK();
4916 		if (error == 0)
4917 			error = copyout(pfras, io->pfrio_buffer, totlen);
4918 		free(pfras, M_TEMP);
4919 		break;
4920 	}
4921 
4922 	case DIOCRGETASTATS: {
4923 		struct pfioc_table *io = (struct pfioc_table *)addr;
4924 		struct pfr_astats *pfrastats;
4925 		size_t totlen;
4926 
4927 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4928 			error = ENODEV;
4929 			break;
4930 		}
4931 		if (io->pfrio_size < 0 ||
4932 		    io->pfrio_size > pf_ioctl_maxcount ||
4933 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4934 			error = EINVAL;
4935 			break;
4936 		}
4937 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
4938 		pfrastats = mallocarray(io->pfrio_size,
4939 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
4940 		PF_RULES_RLOCK();
4941 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
4942 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4943 		PF_RULES_RUNLOCK();
4944 		if (error == 0)
4945 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
4946 		free(pfrastats, M_TEMP);
4947 		break;
4948 	}
4949 
4950 	case DIOCRCLRASTATS: {
4951 		struct pfioc_table *io = (struct pfioc_table *)addr;
4952 		struct pfr_addr *pfras;
4953 		size_t totlen;
4954 
4955 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4956 			error = ENODEV;
4957 			break;
4958 		}
4959 		if (io->pfrio_size < 0 ||
4960 		    io->pfrio_size > pf_ioctl_maxcount ||
4961 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4962 			error = EINVAL;
4963 			break;
4964 		}
4965 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4966 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4967 		    M_TEMP, M_WAITOK);
4968 		error = copyin(io->pfrio_buffer, pfras, totlen);
4969 		if (error) {
4970 			free(pfras, M_TEMP);
4971 			break;
4972 		}
4973 		PF_RULES_WLOCK();
4974 		error = pfr_clr_astats(&io->pfrio_table, pfras,
4975 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
4976 		    PFR_FLAG_USERIOCTL);
4977 		PF_RULES_WUNLOCK();
4978 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4979 			error = copyout(pfras, io->pfrio_buffer, totlen);
4980 		free(pfras, M_TEMP);
4981 		break;
4982 	}
4983 
4984 	case DIOCRTSTADDRS: {
4985 		struct pfioc_table *io = (struct pfioc_table *)addr;
4986 		struct pfr_addr *pfras;
4987 		size_t totlen;
4988 
4989 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4990 			error = ENODEV;
4991 			break;
4992 		}
4993 		if (io->pfrio_size < 0 ||
4994 		    io->pfrio_size > pf_ioctl_maxcount ||
4995 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4996 			error = EINVAL;
4997 			break;
4998 		}
4999 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5000 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5001 		    M_TEMP, M_WAITOK);
5002 		error = copyin(io->pfrio_buffer, pfras, totlen);
5003 		if (error) {
5004 			free(pfras, M_TEMP);
5005 			break;
5006 		}
5007 		PF_RULES_RLOCK();
5008 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5009 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5010 		    PFR_FLAG_USERIOCTL);
5011 		PF_RULES_RUNLOCK();
5012 		if (error == 0)
5013 			error = copyout(pfras, io->pfrio_buffer, totlen);
5014 		free(pfras, M_TEMP);
5015 		break;
5016 	}
5017 
5018 	case DIOCRINADEFINE: {
5019 		struct pfioc_table *io = (struct pfioc_table *)addr;
5020 		struct pfr_addr *pfras;
5021 		size_t totlen;
5022 
5023 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5024 			error = ENODEV;
5025 			break;
5026 		}
5027 		if (io->pfrio_size < 0 ||
5028 		    io->pfrio_size > pf_ioctl_maxcount ||
5029 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5030 			error = EINVAL;
5031 			break;
5032 		}
5033 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5034 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5035 		    M_TEMP, M_WAITOK);
5036 		error = copyin(io->pfrio_buffer, pfras, totlen);
5037 		if (error) {
5038 			free(pfras, M_TEMP);
5039 			break;
5040 		}
5041 		PF_RULES_WLOCK();
5042 		error = pfr_ina_define(&io->pfrio_table, pfras,
5043 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5044 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5045 		PF_RULES_WUNLOCK();
5046 		free(pfras, M_TEMP);
5047 		break;
5048 	}
5049 
5050 	case DIOCOSFPADD: {
5051 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5052 		PF_RULES_WLOCK();
5053 		error = pf_osfp_add(io);
5054 		PF_RULES_WUNLOCK();
5055 		break;
5056 	}
5057 
5058 	case DIOCOSFPGET: {
5059 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5060 		PF_RULES_RLOCK();
5061 		error = pf_osfp_get(io);
5062 		PF_RULES_RUNLOCK();
5063 		break;
5064 	}
5065 
5066 	case DIOCXBEGIN: {
5067 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5068 		struct pfioc_trans_e	*ioes, *ioe;
5069 		size_t			 totlen;
5070 		int			 i;
5071 
5072 		if (io->esize != sizeof(*ioe)) {
5073 			error = ENODEV;
5074 			break;
5075 		}
5076 		if (io->size < 0 ||
5077 		    io->size > pf_ioctl_maxcount ||
5078 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5079 			error = EINVAL;
5080 			break;
5081 		}
5082 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5083 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5084 		    M_TEMP, M_WAITOK);
5085 		error = copyin(io->array, ioes, totlen);
5086 		if (error) {
5087 			free(ioes, M_TEMP);
5088 			break;
5089 		}
5090 		/* Ensure there's no more ethernet rules to clean up. */
5091 		NET_EPOCH_DRAIN_CALLBACKS();
5092 		PF_RULES_WLOCK();
5093 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5094 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5095 			switch (ioe->rs_num) {
5096 			case PF_RULESET_ETH:
5097 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5098 					PF_RULES_WUNLOCK();
5099 					free(ioes, M_TEMP);
5100 					goto fail;
5101 				}
5102 				break;
5103 #ifdef ALTQ
5104 			case PF_RULESET_ALTQ:
5105 				if (ioe->anchor[0]) {
5106 					PF_RULES_WUNLOCK();
5107 					free(ioes, M_TEMP);
5108 					error = EINVAL;
5109 					goto fail;
5110 				}
5111 				if ((error = pf_begin_altq(&ioe->ticket))) {
5112 					PF_RULES_WUNLOCK();
5113 					free(ioes, M_TEMP);
5114 					goto fail;
5115 				}
5116 				break;
5117 #endif /* ALTQ */
5118 			case PF_RULESET_TABLE:
5119 			    {
5120 				struct pfr_table table;
5121 
5122 				bzero(&table, sizeof(table));
5123 				strlcpy(table.pfrt_anchor, ioe->anchor,
5124 				    sizeof(table.pfrt_anchor));
5125 				if ((error = pfr_ina_begin(&table,
5126 				    &ioe->ticket, NULL, 0))) {
5127 					PF_RULES_WUNLOCK();
5128 					free(ioes, M_TEMP);
5129 					goto fail;
5130 				}
5131 				break;
5132 			    }
5133 			default:
5134 				if ((error = pf_begin_rules(&ioe->ticket,
5135 				    ioe->rs_num, ioe->anchor))) {
5136 					PF_RULES_WUNLOCK();
5137 					free(ioes, M_TEMP);
5138 					goto fail;
5139 				}
5140 				break;
5141 			}
5142 		}
5143 		PF_RULES_WUNLOCK();
5144 		error = copyout(ioes, io->array, totlen);
5145 		free(ioes, M_TEMP);
5146 		break;
5147 	}
5148 
5149 	case DIOCXROLLBACK: {
5150 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5151 		struct pfioc_trans_e	*ioe, *ioes;
5152 		size_t			 totlen;
5153 		int			 i;
5154 
5155 		if (io->esize != sizeof(*ioe)) {
5156 			error = ENODEV;
5157 			break;
5158 		}
5159 		if (io->size < 0 ||
5160 		    io->size > pf_ioctl_maxcount ||
5161 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5162 			error = EINVAL;
5163 			break;
5164 		}
5165 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5166 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5167 		    M_TEMP, M_WAITOK);
5168 		error = copyin(io->array, ioes, totlen);
5169 		if (error) {
5170 			free(ioes, M_TEMP);
5171 			break;
5172 		}
5173 		PF_RULES_WLOCK();
5174 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5175 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5176 			switch (ioe->rs_num) {
5177 			case PF_RULESET_ETH:
5178 				if ((error = pf_rollback_eth(ioe->ticket,
5179 				    ioe->anchor))) {
5180 					PF_RULES_WUNLOCK();
5181 					free(ioes, M_TEMP);
5182 					goto fail; /* really bad */
5183 				}
5184 				break;
5185 #ifdef ALTQ
5186 			case PF_RULESET_ALTQ:
5187 				if (ioe->anchor[0]) {
5188 					PF_RULES_WUNLOCK();
5189 					free(ioes, M_TEMP);
5190 					error = EINVAL;
5191 					goto fail;
5192 				}
5193 				if ((error = pf_rollback_altq(ioe->ticket))) {
5194 					PF_RULES_WUNLOCK();
5195 					free(ioes, M_TEMP);
5196 					goto fail; /* really bad */
5197 				}
5198 				break;
5199 #endif /* ALTQ */
5200 			case PF_RULESET_TABLE:
5201 			    {
5202 				struct pfr_table table;
5203 
5204 				bzero(&table, sizeof(table));
5205 				strlcpy(table.pfrt_anchor, ioe->anchor,
5206 				    sizeof(table.pfrt_anchor));
5207 				if ((error = pfr_ina_rollback(&table,
5208 				    ioe->ticket, NULL, 0))) {
5209 					PF_RULES_WUNLOCK();
5210 					free(ioes, M_TEMP);
5211 					goto fail; /* really bad */
5212 				}
5213 				break;
5214 			    }
5215 			default:
5216 				if ((error = pf_rollback_rules(ioe->ticket,
5217 				    ioe->rs_num, ioe->anchor))) {
5218 					PF_RULES_WUNLOCK();
5219 					free(ioes, M_TEMP);
5220 					goto fail; /* really bad */
5221 				}
5222 				break;
5223 			}
5224 		}
5225 		PF_RULES_WUNLOCK();
5226 		free(ioes, M_TEMP);
5227 		break;
5228 	}
5229 
5230 	case DIOCXCOMMIT: {
5231 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5232 		struct pfioc_trans_e	*ioe, *ioes;
5233 		struct pf_kruleset	*rs;
5234 		struct pf_keth_ruleset	*ers;
5235 		size_t			 totlen;
5236 		int			 i;
5237 
5238 		if (io->esize != sizeof(*ioe)) {
5239 			error = ENODEV;
5240 			break;
5241 		}
5242 
5243 		if (io->size < 0 ||
5244 		    io->size > pf_ioctl_maxcount ||
5245 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5246 			error = EINVAL;
5247 			break;
5248 		}
5249 
5250 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5251 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5252 		    M_TEMP, M_WAITOK);
5253 		error = copyin(io->array, ioes, totlen);
5254 		if (error) {
5255 			free(ioes, M_TEMP);
5256 			break;
5257 		}
5258 		PF_RULES_WLOCK();
5259 		/* First makes sure everything will succeed. */
5260 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5261 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5262 			switch (ioe->rs_num) {
5263 			case PF_RULESET_ETH:
5264 				ers = pf_find_keth_ruleset(ioe->anchor);
5265 				if (ers == NULL || ioe->ticket == 0 ||
5266 				    ioe->ticket != ers->inactive.ticket) {
5267 					PF_RULES_WUNLOCK();
5268 					free(ioes, M_TEMP);
5269 					error = EINVAL;
5270 					goto fail;
5271 				}
5272 				break;
5273 #ifdef ALTQ
5274 			case PF_RULESET_ALTQ:
5275 				if (ioe->anchor[0]) {
5276 					PF_RULES_WUNLOCK();
5277 					free(ioes, M_TEMP);
5278 					error = EINVAL;
5279 					goto fail;
5280 				}
5281 				if (!V_altqs_inactive_open || ioe->ticket !=
5282 				    V_ticket_altqs_inactive) {
5283 					PF_RULES_WUNLOCK();
5284 					free(ioes, M_TEMP);
5285 					error = EBUSY;
5286 					goto fail;
5287 				}
5288 				break;
5289 #endif /* ALTQ */
5290 			case PF_RULESET_TABLE:
5291 				rs = pf_find_kruleset(ioe->anchor);
5292 				if (rs == NULL || !rs->topen || ioe->ticket !=
5293 				    rs->tticket) {
5294 					PF_RULES_WUNLOCK();
5295 					free(ioes, M_TEMP);
5296 					error = EBUSY;
5297 					goto fail;
5298 				}
5299 				break;
5300 			default:
5301 				if (ioe->rs_num < 0 || ioe->rs_num >=
5302 				    PF_RULESET_MAX) {
5303 					PF_RULES_WUNLOCK();
5304 					free(ioes, M_TEMP);
5305 					error = EINVAL;
5306 					goto fail;
5307 				}
5308 				rs = pf_find_kruleset(ioe->anchor);
5309 				if (rs == NULL ||
5310 				    !rs->rules[ioe->rs_num].inactive.open ||
5311 				    rs->rules[ioe->rs_num].inactive.ticket !=
5312 				    ioe->ticket) {
5313 					PF_RULES_WUNLOCK();
5314 					free(ioes, M_TEMP);
5315 					error = EBUSY;
5316 					goto fail;
5317 				}
5318 				break;
5319 			}
5320 		}
5321 		/* Now do the commit - no errors should happen here. */
5322 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5323 			switch (ioe->rs_num) {
5324 			case PF_RULESET_ETH:
5325 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5326 					PF_RULES_WUNLOCK();
5327 					free(ioes, M_TEMP);
5328 					goto fail; /* really bad */
5329 				}
5330 				break;
5331 #ifdef ALTQ
5332 			case PF_RULESET_ALTQ:
5333 				if ((error = pf_commit_altq(ioe->ticket))) {
5334 					PF_RULES_WUNLOCK();
5335 					free(ioes, M_TEMP);
5336 					goto fail; /* really bad */
5337 				}
5338 				break;
5339 #endif /* ALTQ */
5340 			case PF_RULESET_TABLE:
5341 			    {
5342 				struct pfr_table table;
5343 
5344 				bzero(&table, sizeof(table));
5345 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5346 				    sizeof(table.pfrt_anchor));
5347 				if ((error = pfr_ina_commit(&table,
5348 				    ioe->ticket, NULL, NULL, 0))) {
5349 					PF_RULES_WUNLOCK();
5350 					free(ioes, M_TEMP);
5351 					goto fail; /* really bad */
5352 				}
5353 				break;
5354 			    }
5355 			default:
5356 				if ((error = pf_commit_rules(ioe->ticket,
5357 				    ioe->rs_num, ioe->anchor))) {
5358 					PF_RULES_WUNLOCK();
5359 					free(ioes, M_TEMP);
5360 					goto fail; /* really bad */
5361 				}
5362 				break;
5363 			}
5364 		}
5365 		PF_RULES_WUNLOCK();
5366 
5367 		/* Only hook into EtherNet taffic if we've got rules for it. */
5368 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5369 			hook_pf_eth();
5370 		else
5371 			dehook_pf_eth();
5372 
5373 		free(ioes, M_TEMP);
5374 		break;
5375 	}
5376 
5377 	case DIOCGETSRCNODES: {
5378 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5379 		struct pf_srchash	*sh;
5380 		struct pf_ksrc_node	*n;
5381 		struct pf_src_node	*p, *pstore;
5382 		uint32_t		 i, nr = 0;
5383 
5384 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5385 				i++, sh++) {
5386 			PF_HASHROW_LOCK(sh);
5387 			LIST_FOREACH(n, &sh->nodes, entry)
5388 				nr++;
5389 			PF_HASHROW_UNLOCK(sh);
5390 		}
5391 
5392 		psn->psn_len = min(psn->psn_len,
5393 		    sizeof(struct pf_src_node) * nr);
5394 
5395 		if (psn->psn_len == 0) {
5396 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5397 			break;
5398 		}
5399 
5400 		nr = 0;
5401 
5402 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5403 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5404 		    i++, sh++) {
5405 		    PF_HASHROW_LOCK(sh);
5406 		    LIST_FOREACH(n, &sh->nodes, entry) {
5407 
5408 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5409 				break;
5410 
5411 			pf_src_node_copy(n, p);
5412 
5413 			p++;
5414 			nr++;
5415 		    }
5416 		    PF_HASHROW_UNLOCK(sh);
5417 		}
5418 		error = copyout(pstore, psn->psn_src_nodes,
5419 		    sizeof(struct pf_src_node) * nr);
5420 		if (error) {
5421 			free(pstore, M_TEMP);
5422 			break;
5423 		}
5424 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5425 		free(pstore, M_TEMP);
5426 		break;
5427 	}
5428 
5429 	case DIOCCLRSRCNODES: {
5430 		pf_clear_srcnodes(NULL);
5431 		pf_purge_expired_src_nodes();
5432 		break;
5433 	}
5434 
5435 	case DIOCKILLSRCNODES:
5436 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5437 		break;
5438 
5439 #ifdef COMPAT_FREEBSD13
5440 	case DIOCKEEPCOUNTERS_FREEBSD13:
5441 #endif
5442 	case DIOCKEEPCOUNTERS:
5443 		error = pf_keepcounters((struct pfioc_nv *)addr);
5444 		break;
5445 
5446 	case DIOCGETSYNCOOKIES:
5447 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5448 		break;
5449 
5450 	case DIOCSETSYNCOOKIES:
5451 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5452 		break;
5453 
5454 	case DIOCSETHOSTID: {
5455 		u_int32_t	*hostid = (u_int32_t *)addr;
5456 
5457 		PF_RULES_WLOCK();
5458 		if (*hostid == 0)
5459 			V_pf_status.hostid = arc4random();
5460 		else
5461 			V_pf_status.hostid = *hostid;
5462 		PF_RULES_WUNLOCK();
5463 		break;
5464 	}
5465 
5466 	case DIOCOSFPFLUSH:
5467 		PF_RULES_WLOCK();
5468 		pf_osfp_flush();
5469 		PF_RULES_WUNLOCK();
5470 		break;
5471 
5472 	case DIOCIGETIFACES: {
5473 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5474 		struct pfi_kif *ifstore;
5475 		size_t bufsiz;
5476 
5477 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5478 			error = ENODEV;
5479 			break;
5480 		}
5481 
5482 		if (io->pfiio_size < 0 ||
5483 		    io->pfiio_size > pf_ioctl_maxcount ||
5484 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5485 			error = EINVAL;
5486 			break;
5487 		}
5488 
5489 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5490 
5491 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5492 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5493 		    M_TEMP, M_WAITOK | M_ZERO);
5494 
5495 		PF_RULES_RLOCK();
5496 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5497 		PF_RULES_RUNLOCK();
5498 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5499 		free(ifstore, M_TEMP);
5500 		break;
5501 	}
5502 
5503 	case DIOCSETIFFLAG: {
5504 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5505 
5506 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5507 
5508 		PF_RULES_WLOCK();
5509 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5510 		PF_RULES_WUNLOCK();
5511 		break;
5512 	}
5513 
5514 	case DIOCCLRIFFLAG: {
5515 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5516 
5517 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5518 
5519 		PF_RULES_WLOCK();
5520 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5521 		PF_RULES_WUNLOCK();
5522 		break;
5523 	}
5524 
5525 	case DIOCSETREASS: {
5526 		u_int32_t	*reass = (u_int32_t *)addr;
5527 
5528 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5529 		/* Removal of DF flag without reassembly enabled is not a
5530 		 * valid combination. Disable reassembly in such case. */
5531 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5532 			V_pf_status.reass = 0;
5533 		break;
5534 	}
5535 
5536 	default:
5537 		error = ENODEV;
5538 		break;
5539 	}
5540 fail:
5541 	CURVNET_RESTORE();
5542 
5543 #undef ERROUT_IOCTL
5544 
5545 	return (error);
5546 }
5547 
5548 void
5549 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5550 {
5551 	bzero(sp, sizeof(union pfsync_state_union));
5552 
5553 	/* copy from state key */
5554 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5555 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5556 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5557 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5558 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5559 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5560 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5561 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5562 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5563 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5564 
5565 	/* copy from state */
5566 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5567 	bcopy(&st->rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5568 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5569 	sp->pfs_1301.expire = pf_state_expires(st);
5570 	if (sp->pfs_1301.expire <= time_uptime)
5571 		sp->pfs_1301.expire = htonl(0);
5572 	else
5573 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5574 
5575 	sp->pfs_1301.direction = st->direction;
5576 	sp->pfs_1301.log = st->act.log;
5577 	sp->pfs_1301.timeout = st->timeout;
5578 
5579 	switch (msg_version) {
5580 		case PFSYNC_MSG_VERSION_1301:
5581 			sp->pfs_1301.state_flags = st->state_flags;
5582 			break;
5583 		case PFSYNC_MSG_VERSION_1400:
5584 			sp->pfs_1400.state_flags = htons(st->state_flags);
5585 			sp->pfs_1400.qid = htons(st->act.qid);
5586 			sp->pfs_1400.pqid = htons(st->act.pqid);
5587 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5588 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5589 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5590 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5591 			sp->pfs_1400.set_tos = st->act.set_tos;
5592 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5593 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5594 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5595 			sp->pfs_1400.rt = st->rt;
5596 			if (st->rt_kif)
5597 				strlcpy(sp->pfs_1400.rt_ifname,
5598 				    st->rt_kif->pfik_name,
5599 				    sizeof(sp->pfs_1400.rt_ifname));
5600 			break;
5601 		default:
5602 			panic("%s: Unsupported pfsync_msg_version %d",
5603 			    __func__, msg_version);
5604 	}
5605 
5606 	if (st->src_node)
5607 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5608 	if (st->nat_src_node)
5609 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5610 
5611 	sp->pfs_1301.id = st->id;
5612 	sp->pfs_1301.creatorid = st->creatorid;
5613 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5614 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5615 
5616 	if (st->rule.ptr == NULL)
5617 		sp->pfs_1301.rule = htonl(-1);
5618 	else
5619 		sp->pfs_1301.rule = htonl(st->rule.ptr->nr);
5620 	if (st->anchor.ptr == NULL)
5621 		sp->pfs_1301.anchor = htonl(-1);
5622 	else
5623 		sp->pfs_1301.anchor = htonl(st->anchor.ptr->nr);
5624 	if (st->nat_rule.ptr == NULL)
5625 		sp->pfs_1301.nat_rule = htonl(-1);
5626 	else
5627 		sp->pfs_1301.nat_rule = htonl(st->nat_rule.ptr->nr);
5628 
5629 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5630 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5631 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5632 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5633 }
5634 
5635 void
5636 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5637 {
5638 	bzero(sp, sizeof(*sp));
5639 
5640 	sp->version = PF_STATE_VERSION;
5641 
5642 	/* copy from state key */
5643 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5644 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5645 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5646 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5647 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5648 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5649 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5650 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5651 	sp->proto = st->key[PF_SK_WIRE]->proto;
5652 	sp->af = st->key[PF_SK_WIRE]->af;
5653 
5654 	/* copy from state */
5655 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5656 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5657 	    sizeof(sp->orig_ifname));
5658 	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5659 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5660 	sp->expire = pf_state_expires(st);
5661 	if (sp->expire <= time_uptime)
5662 		sp->expire = htonl(0);
5663 	else
5664 		sp->expire = htonl(sp->expire - time_uptime);
5665 
5666 	sp->direction = st->direction;
5667 	sp->log = st->act.log;
5668 	sp->timeout = st->timeout;
5669 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5670 	sp->state_flags_compat = st->state_flags;
5671 	sp->state_flags = htons(st->state_flags);
5672 	if (st->src_node)
5673 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5674 	if (st->nat_src_node)
5675 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5676 
5677 	sp->id = st->id;
5678 	sp->creatorid = st->creatorid;
5679 	pf_state_peer_hton(&st->src, &sp->src);
5680 	pf_state_peer_hton(&st->dst, &sp->dst);
5681 
5682 	if (st->rule.ptr == NULL)
5683 		sp->rule = htonl(-1);
5684 	else
5685 		sp->rule = htonl(st->rule.ptr->nr);
5686 	if (st->anchor.ptr == NULL)
5687 		sp->anchor = htonl(-1);
5688 	else
5689 		sp->anchor = htonl(st->anchor.ptr->nr);
5690 	if (st->nat_rule.ptr == NULL)
5691 		sp->nat_rule = htonl(-1);
5692 	else
5693 		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
5694 
5695 	sp->packets[0] = st->packets[0];
5696 	sp->packets[1] = st->packets[1];
5697 	sp->bytes[0] = st->bytes[0];
5698 	sp->bytes[1] = st->bytes[1];
5699 
5700 	sp->qid = htons(st->act.qid);
5701 	sp->pqid = htons(st->act.pqid);
5702 	sp->dnpipe = htons(st->act.dnpipe);
5703 	sp->dnrpipe = htons(st->act.dnrpipe);
5704 	sp->rtableid = htonl(st->act.rtableid);
5705 	sp->min_ttl = st->act.min_ttl;
5706 	sp->set_tos = st->act.set_tos;
5707 	sp->max_mss = htons(st->act.max_mss);
5708 	sp->rt = st->rt;
5709 	if (st->rt_kif)
5710 		strlcpy(sp->rt_ifname, st->rt_kif->pfik_name,
5711 		    sizeof(sp->rt_ifname));
5712 	sp->set_prio[0] = st->act.set_prio[0];
5713 	sp->set_prio[1] = st->act.set_prio[1];
5714 
5715 }
5716 
5717 static void
5718 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5719 {
5720 	struct pfr_ktable *kt;
5721 
5722 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5723 
5724 	kt = aw->p.tbl;
5725 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5726 		kt = kt->pfrkt_root;
5727 	aw->p.tbl = NULL;
5728 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5729 		kt->pfrkt_cnt : -1;
5730 }
5731 
5732 static int
5733 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5734     size_t number, char **names)
5735 {
5736 	nvlist_t        *nvc;
5737 
5738 	nvc = nvlist_create(0);
5739 	if (nvc == NULL)
5740 		return (ENOMEM);
5741 
5742 	for (int i = 0; i < number; i++) {
5743 		nvlist_append_number_array(nvc, "counters",
5744 		    counter_u64_fetch(counters[i]));
5745 		nvlist_append_string_array(nvc, "names",
5746 		    names[i]);
5747 		nvlist_append_number_array(nvc, "ids",
5748 		    i);
5749 	}
5750 	nvlist_add_nvlist(nvl, name, nvc);
5751 	nvlist_destroy(nvc);
5752 
5753 	return (0);
5754 }
5755 
5756 static int
5757 pf_getstatus(struct pfioc_nv *nv)
5758 {
5759 	nvlist_t        *nvl = NULL, *nvc = NULL;
5760 	void            *nvlpacked = NULL;
5761 	int              error;
5762 	struct pf_status s;
5763 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5764 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5765 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5766 	PF_RULES_RLOCK_TRACKER;
5767 
5768 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5769 
5770 	PF_RULES_RLOCK();
5771 
5772 	nvl = nvlist_create(0);
5773 	if (nvl == NULL)
5774 		ERROUT(ENOMEM);
5775 
5776 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5777 	nvlist_add_number(nvl, "since", V_pf_status.since);
5778 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5779 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5780 	nvlist_add_number(nvl, "states", V_pf_status.states);
5781 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5782 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5783 	nvlist_add_bool(nvl, "syncookies_active",
5784 	    V_pf_status.syncookies_active);
5785 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5786 
5787 	/* counters */
5788 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5789 	    PFRES_MAX, pf_reasons);
5790 	if (error != 0)
5791 		ERROUT(error);
5792 
5793 	/* lcounters */
5794 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5795 	    KLCNT_MAX, pf_lcounter);
5796 	if (error != 0)
5797 		ERROUT(error);
5798 
5799 	/* fcounters */
5800 	nvc = nvlist_create(0);
5801 	if (nvc == NULL)
5802 		ERROUT(ENOMEM);
5803 
5804 	for (int i = 0; i < FCNT_MAX; i++) {
5805 		nvlist_append_number_array(nvc, "counters",
5806 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5807 		nvlist_append_string_array(nvc, "names",
5808 		    pf_fcounter[i]);
5809 		nvlist_append_number_array(nvc, "ids",
5810 		    i);
5811 	}
5812 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5813 	nvlist_destroy(nvc);
5814 	nvc = NULL;
5815 
5816 	/* scounters */
5817 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5818 	    SCNT_MAX, pf_fcounter);
5819 	if (error != 0)
5820 		ERROUT(error);
5821 
5822 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5823 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5824 	    PF_MD5_DIGEST_LENGTH);
5825 
5826 	pfi_update_status(V_pf_status.ifname, &s);
5827 
5828 	/* pcounters / bcounters */
5829 	for (int i = 0; i < 2; i++) {
5830 		for (int j = 0; j < 2; j++) {
5831 			for (int k = 0; k < 2; k++) {
5832 				nvlist_append_number_array(nvl, "pcounters",
5833 				    s.pcounters[i][j][k]);
5834 			}
5835 			nvlist_append_number_array(nvl, "bcounters",
5836 			    s.bcounters[i][j]);
5837 		}
5838 	}
5839 
5840 	nvlpacked = nvlist_pack(nvl, &nv->len);
5841 	if (nvlpacked == NULL)
5842 		ERROUT(ENOMEM);
5843 
5844 	if (nv->size == 0)
5845 		ERROUT(0);
5846 	else if (nv->size < nv->len)
5847 		ERROUT(ENOSPC);
5848 
5849 	PF_RULES_RUNLOCK();
5850 	error = copyout(nvlpacked, nv->data, nv->len);
5851 	goto done;
5852 
5853 #undef ERROUT
5854 errout:
5855 	PF_RULES_RUNLOCK();
5856 done:
5857 	free(nvlpacked, M_NVLIST);
5858 	nvlist_destroy(nvc);
5859 	nvlist_destroy(nvl);
5860 
5861 	return (error);
5862 }
5863 
5864 /*
5865  * XXX - Check for version mismatch!!!
5866  */
5867 static void
5868 pf_clear_all_states(void)
5869 {
5870 	struct epoch_tracker	 et;
5871 	struct pf_kstate	*s;
5872 	u_int i;
5873 
5874 	NET_EPOCH_ENTER(et);
5875 	for (i = 0; i <= V_pf_hashmask; i++) {
5876 		struct pf_idhash *ih = &V_pf_idhash[i];
5877 relock:
5878 		PF_HASHROW_LOCK(ih);
5879 		LIST_FOREACH(s, &ih->states, entry) {
5880 			s->timeout = PFTM_PURGE;
5881 			/* Don't send out individual delete messages. */
5882 			s->state_flags |= PFSTATE_NOSYNC;
5883 			pf_unlink_state(s);
5884 			goto relock;
5885 		}
5886 		PF_HASHROW_UNLOCK(ih);
5887 	}
5888 	NET_EPOCH_EXIT(et);
5889 }
5890 
5891 static int
5892 pf_clear_tables(void)
5893 {
5894 	struct pfioc_table io;
5895 	int error;
5896 
5897 	bzero(&io, sizeof(io));
5898 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5899 
5900 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5901 	    io.pfrio_flags);
5902 
5903 	return (error);
5904 }
5905 
5906 static void
5907 pf_clear_srcnodes(struct pf_ksrc_node *n)
5908 {
5909 	struct pf_kstate *s;
5910 	int i;
5911 
5912 	for (i = 0; i <= V_pf_hashmask; i++) {
5913 		struct pf_idhash *ih = &V_pf_idhash[i];
5914 
5915 		PF_HASHROW_LOCK(ih);
5916 		LIST_FOREACH(s, &ih->states, entry) {
5917 			if (n == NULL || n == s->src_node)
5918 				s->src_node = NULL;
5919 			if (n == NULL || n == s->nat_src_node)
5920 				s->nat_src_node = NULL;
5921 		}
5922 		PF_HASHROW_UNLOCK(ih);
5923 	}
5924 
5925 	if (n == NULL) {
5926 		struct pf_srchash *sh;
5927 
5928 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5929 		    i++, sh++) {
5930 			PF_HASHROW_LOCK(sh);
5931 			LIST_FOREACH(n, &sh->nodes, entry) {
5932 				n->expire = 1;
5933 				n->states = 0;
5934 			}
5935 			PF_HASHROW_UNLOCK(sh);
5936 		}
5937 	} else {
5938 		/* XXX: hash slot should already be locked here. */
5939 		n->expire = 1;
5940 		n->states = 0;
5941 	}
5942 }
5943 
5944 static void
5945 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5946 {
5947 	struct pf_ksrc_node_list	 kill;
5948 
5949 	LIST_INIT(&kill);
5950 	for (int i = 0; i <= V_pf_srchashmask; i++) {
5951 		struct pf_srchash *sh = &V_pf_srchash[i];
5952 		struct pf_ksrc_node *sn, *tmp;
5953 
5954 		PF_HASHROW_LOCK(sh);
5955 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5956 			if (PF_MATCHA(psnk->psnk_src.neg,
5957 			      &psnk->psnk_src.addr.v.a.addr,
5958 			      &psnk->psnk_src.addr.v.a.mask,
5959 			      &sn->addr, sn->af) &&
5960 			    PF_MATCHA(psnk->psnk_dst.neg,
5961 			      &psnk->psnk_dst.addr.v.a.addr,
5962 			      &psnk->psnk_dst.addr.v.a.mask,
5963 			      &sn->raddr, sn->af)) {
5964 				pf_unlink_src_node(sn);
5965 				LIST_INSERT_HEAD(&kill, sn, entry);
5966 				sn->expire = 1;
5967 			}
5968 		PF_HASHROW_UNLOCK(sh);
5969 	}
5970 
5971 	for (int i = 0; i <= V_pf_hashmask; i++) {
5972 		struct pf_idhash *ih = &V_pf_idhash[i];
5973 		struct pf_kstate *s;
5974 
5975 		PF_HASHROW_LOCK(ih);
5976 		LIST_FOREACH(s, &ih->states, entry) {
5977 			if (s->src_node && s->src_node->expire == 1)
5978 				s->src_node = NULL;
5979 			if (s->nat_src_node && s->nat_src_node->expire == 1)
5980 				s->nat_src_node = NULL;
5981 		}
5982 		PF_HASHROW_UNLOCK(ih);
5983 	}
5984 
5985 	psnk->psnk_killed = pf_free_src_nodes(&kill);
5986 }
5987 
5988 static int
5989 pf_keepcounters(struct pfioc_nv *nv)
5990 {
5991 	nvlist_t	*nvl = NULL;
5992 	void		*nvlpacked = NULL;
5993 	int		 error = 0;
5994 
5995 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
5996 
5997 	if (nv->len > pf_ioctl_maxcount)
5998 		ERROUT(ENOMEM);
5999 
6000 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6001 	if (nvlpacked == NULL)
6002 		ERROUT(ENOMEM);
6003 
6004 	error = copyin(nv->data, nvlpacked, nv->len);
6005 	if (error)
6006 		ERROUT(error);
6007 
6008 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6009 	if (nvl == NULL)
6010 		ERROUT(EBADMSG);
6011 
6012 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6013 		ERROUT(EBADMSG);
6014 
6015 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6016 
6017 on_error:
6018 	nvlist_destroy(nvl);
6019 	free(nvlpacked, M_NVLIST);
6020 	return (error);
6021 }
6022 
6023 unsigned int
6024 pf_clear_states(const struct pf_kstate_kill *kill)
6025 {
6026 	struct pf_state_key_cmp	 match_key;
6027 	struct pf_kstate	*s;
6028 	struct pfi_kkif	*kif;
6029 	int		 idx;
6030 	unsigned int	 killed = 0, dir;
6031 
6032 	NET_EPOCH_ASSERT();
6033 
6034 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6035 		struct pf_idhash *ih = &V_pf_idhash[i];
6036 
6037 relock_DIOCCLRSTATES:
6038 		PF_HASHROW_LOCK(ih);
6039 		LIST_FOREACH(s, &ih->states, entry) {
6040 			/* For floating states look at the original kif. */
6041 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6042 
6043 			if (kill->psk_ifname[0] &&
6044 			    strcmp(kill->psk_ifname,
6045 			    kif->pfik_name))
6046 				continue;
6047 
6048 			if (kill->psk_kill_match) {
6049 				bzero(&match_key, sizeof(match_key));
6050 
6051 				if (s->direction == PF_OUT) {
6052 					dir = PF_IN;
6053 					idx = PF_SK_STACK;
6054 				} else {
6055 					dir = PF_OUT;
6056 					idx = PF_SK_WIRE;
6057 				}
6058 
6059 				match_key.af = s->key[idx]->af;
6060 				match_key.proto = s->key[idx]->proto;
6061 				PF_ACPY(&match_key.addr[0],
6062 				    &s->key[idx]->addr[1], match_key.af);
6063 				match_key.port[0] = s->key[idx]->port[1];
6064 				PF_ACPY(&match_key.addr[1],
6065 				    &s->key[idx]->addr[0], match_key.af);
6066 				match_key.port[1] = s->key[idx]->port[0];
6067 			}
6068 
6069 			/*
6070 			 * Don't send out individual
6071 			 * delete messages.
6072 			 */
6073 			s->state_flags |= PFSTATE_NOSYNC;
6074 			pf_unlink_state(s);
6075 			killed++;
6076 
6077 			if (kill->psk_kill_match)
6078 				killed += pf_kill_matching_state(&match_key,
6079 				    dir);
6080 
6081 			goto relock_DIOCCLRSTATES;
6082 		}
6083 		PF_HASHROW_UNLOCK(ih);
6084 	}
6085 
6086 	if (V_pfsync_clear_states_ptr != NULL)
6087 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6088 
6089 	return (killed);
6090 }
6091 
6092 void
6093 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6094 {
6095 	struct pf_kstate	*s;
6096 
6097 	NET_EPOCH_ASSERT();
6098 	if (kill->psk_pfcmp.id) {
6099 		if (kill->psk_pfcmp.creatorid == 0)
6100 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6101 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6102 		    kill->psk_pfcmp.creatorid))) {
6103 			pf_unlink_state(s);
6104 			*killed = 1;
6105 		}
6106 		return;
6107 	}
6108 
6109 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6110 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6111 }
6112 
6113 static int
6114 pf_killstates_nv(struct pfioc_nv *nv)
6115 {
6116 	struct pf_kstate_kill	 kill;
6117 	struct epoch_tracker	 et;
6118 	nvlist_t		*nvl = NULL;
6119 	void			*nvlpacked = NULL;
6120 	int			 error = 0;
6121 	unsigned int		 killed = 0;
6122 
6123 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6124 
6125 	if (nv->len > pf_ioctl_maxcount)
6126 		ERROUT(ENOMEM);
6127 
6128 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6129 	if (nvlpacked == NULL)
6130 		ERROUT(ENOMEM);
6131 
6132 	error = copyin(nv->data, nvlpacked, nv->len);
6133 	if (error)
6134 		ERROUT(error);
6135 
6136 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6137 	if (nvl == NULL)
6138 		ERROUT(EBADMSG);
6139 
6140 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6141 	if (error)
6142 		ERROUT(error);
6143 
6144 	NET_EPOCH_ENTER(et);
6145 	pf_killstates(&kill, &killed);
6146 	NET_EPOCH_EXIT(et);
6147 
6148 	free(nvlpacked, M_NVLIST);
6149 	nvlpacked = NULL;
6150 	nvlist_destroy(nvl);
6151 	nvl = nvlist_create(0);
6152 	if (nvl == NULL)
6153 		ERROUT(ENOMEM);
6154 
6155 	nvlist_add_number(nvl, "killed", killed);
6156 
6157 	nvlpacked = nvlist_pack(nvl, &nv->len);
6158 	if (nvlpacked == NULL)
6159 		ERROUT(ENOMEM);
6160 
6161 	if (nv->size == 0)
6162 		ERROUT(0);
6163 	else if (nv->size < nv->len)
6164 		ERROUT(ENOSPC);
6165 
6166 	error = copyout(nvlpacked, nv->data, nv->len);
6167 
6168 on_error:
6169 	nvlist_destroy(nvl);
6170 	free(nvlpacked, M_NVLIST);
6171 	return (error);
6172 }
6173 
6174 static int
6175 pf_clearstates_nv(struct pfioc_nv *nv)
6176 {
6177 	struct pf_kstate_kill	 kill;
6178 	struct epoch_tracker	 et;
6179 	nvlist_t		*nvl = NULL;
6180 	void			*nvlpacked = NULL;
6181 	int			 error = 0;
6182 	unsigned int		 killed;
6183 
6184 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6185 
6186 	if (nv->len > pf_ioctl_maxcount)
6187 		ERROUT(ENOMEM);
6188 
6189 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6190 	if (nvlpacked == NULL)
6191 		ERROUT(ENOMEM);
6192 
6193 	error = copyin(nv->data, nvlpacked, nv->len);
6194 	if (error)
6195 		ERROUT(error);
6196 
6197 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6198 	if (nvl == NULL)
6199 		ERROUT(EBADMSG);
6200 
6201 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6202 	if (error)
6203 		ERROUT(error);
6204 
6205 	NET_EPOCH_ENTER(et);
6206 	killed = pf_clear_states(&kill);
6207 	NET_EPOCH_EXIT(et);
6208 
6209 	free(nvlpacked, M_NVLIST);
6210 	nvlpacked = NULL;
6211 	nvlist_destroy(nvl);
6212 	nvl = nvlist_create(0);
6213 	if (nvl == NULL)
6214 		ERROUT(ENOMEM);
6215 
6216 	nvlist_add_number(nvl, "killed", killed);
6217 
6218 	nvlpacked = nvlist_pack(nvl, &nv->len);
6219 	if (nvlpacked == NULL)
6220 		ERROUT(ENOMEM);
6221 
6222 	if (nv->size == 0)
6223 		ERROUT(0);
6224 	else if (nv->size < nv->len)
6225 		ERROUT(ENOSPC);
6226 
6227 	error = copyout(nvlpacked, nv->data, nv->len);
6228 
6229 #undef ERROUT
6230 on_error:
6231 	nvlist_destroy(nvl);
6232 	free(nvlpacked, M_NVLIST);
6233 	return (error);
6234 }
6235 
6236 static int
6237 pf_getstate(struct pfioc_nv *nv)
6238 {
6239 	nvlist_t		*nvl = NULL, *nvls;
6240 	void			*nvlpacked = NULL;
6241 	struct pf_kstate	*s = NULL;
6242 	int			 error = 0;
6243 	uint64_t		 id, creatorid;
6244 
6245 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6246 
6247 	if (nv->len > pf_ioctl_maxcount)
6248 		ERROUT(ENOMEM);
6249 
6250 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6251 	if (nvlpacked == NULL)
6252 		ERROUT(ENOMEM);
6253 
6254 	error = copyin(nv->data, nvlpacked, nv->len);
6255 	if (error)
6256 		ERROUT(error);
6257 
6258 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6259 	if (nvl == NULL)
6260 		ERROUT(EBADMSG);
6261 
6262 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6263 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6264 
6265 	s = pf_find_state_byid(id, creatorid);
6266 	if (s == NULL)
6267 		ERROUT(ENOENT);
6268 
6269 	free(nvlpacked, M_NVLIST);
6270 	nvlpacked = NULL;
6271 	nvlist_destroy(nvl);
6272 	nvl = nvlist_create(0);
6273 	if (nvl == NULL)
6274 		ERROUT(ENOMEM);
6275 
6276 	nvls = pf_state_to_nvstate(s);
6277 	if (nvls == NULL)
6278 		ERROUT(ENOMEM);
6279 
6280 	nvlist_add_nvlist(nvl, "state", nvls);
6281 	nvlist_destroy(nvls);
6282 
6283 	nvlpacked = nvlist_pack(nvl, &nv->len);
6284 	if (nvlpacked == NULL)
6285 		ERROUT(ENOMEM);
6286 
6287 	if (nv->size == 0)
6288 		ERROUT(0);
6289 	else if (nv->size < nv->len)
6290 		ERROUT(ENOSPC);
6291 
6292 	error = copyout(nvlpacked, nv->data, nv->len);
6293 
6294 #undef ERROUT
6295 errout:
6296 	if (s != NULL)
6297 		PF_STATE_UNLOCK(s);
6298 	free(nvlpacked, M_NVLIST);
6299 	nvlist_destroy(nvl);
6300 	return (error);
6301 }
6302 
6303 /*
6304  * XXX - Check for version mismatch!!!
6305  */
6306 
6307 /*
6308  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6309  */
6310 static int
6311 shutdown_pf(void)
6312 {
6313 	int error = 0;
6314 	u_int32_t t[5];
6315 	char nn = '\0';
6316 	struct pf_kanchor *anchor;
6317 	struct pf_keth_anchor *eth_anchor;
6318 	int rs_num;
6319 
6320 	do {
6321 		/* Unlink rules of all user defined anchors */
6322 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6323 			/* Wildcard based anchors may not have a respective
6324 			 * explicit anchor rule or they may be left empty
6325 			 * without rules. It leads to anchor.refcnt=0, and the
6326 			 * rest of the logic does not expect it. */
6327 			if (anchor->refcnt == 0)
6328 				anchor->refcnt = 1;
6329 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6330 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6331 				    anchor->path)) != 0) {
6332 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6333 					    "anchor.path=%s rs_num=%d\n",
6334 					    anchor->path, rs_num));
6335 					goto error;	/* XXX: rollback? */
6336 				}
6337 			}
6338 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6339 				error = pf_commit_rules(t[rs_num], rs_num,
6340 				    anchor->path);
6341 				MPASS(error == 0);
6342 			}
6343 		}
6344 
6345 		/* Unlink rules of all user defined ether anchors */
6346 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6347 		    &V_pf_keth_anchors) {
6348 			/* Wildcard based anchors may not have a respective
6349 			 * explicit anchor rule or they may be left empty
6350 			 * without rules. It leads to anchor.refcnt=0, and the
6351 			 * rest of the logic does not expect it. */
6352 			if (eth_anchor->refcnt == 0)
6353 				eth_anchor->refcnt = 1;
6354 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6355 			    != 0) {
6356 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6357 				    "anchor.path=%s\n", eth_anchor->path));
6358 				goto error;
6359 			}
6360 			error = pf_commit_eth(t[0], eth_anchor->path);
6361 			MPASS(error == 0);
6362 		}
6363 
6364 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6365 		    != 0) {
6366 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6367 			break;
6368 		}
6369 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6370 		    != 0) {
6371 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6372 			break;		/* XXX: rollback? */
6373 		}
6374 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6375 		    != 0) {
6376 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6377 			break;		/* XXX: rollback? */
6378 		}
6379 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6380 		    != 0) {
6381 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6382 			break;		/* XXX: rollback? */
6383 		}
6384 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6385 		    != 0) {
6386 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6387 			break;		/* XXX: rollback? */
6388 		}
6389 
6390 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6391 		MPASS(error == 0);
6392 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6393 		MPASS(error == 0);
6394 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6395 		MPASS(error == 0);
6396 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6397 		MPASS(error == 0);
6398 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6399 		MPASS(error == 0);
6400 
6401 		if ((error = pf_clear_tables()) != 0)
6402 			break;
6403 
6404 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6405 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6406 			break;
6407 		}
6408 		error = pf_commit_eth(t[0], &nn);
6409 		MPASS(error == 0);
6410 
6411 #ifdef ALTQ
6412 		if ((error = pf_begin_altq(&t[0])) != 0) {
6413 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6414 			break;
6415 		}
6416 		pf_commit_altq(t[0]);
6417 #endif
6418 
6419 		pf_clear_all_states();
6420 
6421 		pf_clear_srcnodes(NULL);
6422 
6423 		/* status does not use malloced mem so no need to cleanup */
6424 		/* fingerprints and interfaces have their own cleanup code */
6425 	} while(0);
6426 
6427 error:
6428 	return (error);
6429 }
6430 
6431 static pfil_return_t
6432 pf_check_return(int chk, struct mbuf **m)
6433 {
6434 
6435 	switch (chk) {
6436 	case PF_PASS:
6437 		if (*m == NULL)
6438 			return (PFIL_CONSUMED);
6439 		else
6440 			return (PFIL_PASS);
6441 		break;
6442 	default:
6443 		if (*m != NULL) {
6444 			m_freem(*m);
6445 			*m = NULL;
6446 		}
6447 		return (PFIL_DROPPED);
6448 	}
6449 }
6450 
6451 static pfil_return_t
6452 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6453     void *ruleset __unused, struct inpcb *inp)
6454 {
6455 	int chk;
6456 
6457 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6458 
6459 	return (pf_check_return(chk, m));
6460 }
6461 
6462 static pfil_return_t
6463 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6464     void *ruleset __unused, struct inpcb *inp)
6465 {
6466 	int chk;
6467 
6468 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6469 
6470 	return (pf_check_return(chk, m));
6471 }
6472 
6473 #ifdef INET
6474 static pfil_return_t
6475 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6476     void *ruleset __unused, struct inpcb *inp)
6477 {
6478 	int chk;
6479 
6480 	chk = pf_test(PF_IN, flags, ifp, m, inp, NULL);
6481 
6482 	return (pf_check_return(chk, m));
6483 }
6484 
6485 static pfil_return_t
6486 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6487     void *ruleset __unused,  struct inpcb *inp)
6488 {
6489 	int chk;
6490 
6491 	chk = pf_test(PF_OUT, flags, ifp, m, inp, NULL);
6492 
6493 	return (pf_check_return(chk, m));
6494 }
6495 #endif
6496 
6497 #ifdef INET6
6498 static pfil_return_t
6499 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6500     void *ruleset __unused,  struct inpcb *inp)
6501 {
6502 	int chk;
6503 
6504 	/*
6505 	 * In case of loopback traffic IPv6 uses the real interface in
6506 	 * order to support scoped addresses. In order to support stateful
6507 	 * filtering we have change this to lo0 as it is the case in IPv4.
6508 	 */
6509 	CURVNET_SET(ifp->if_vnet);
6510 	chk = pf_test6(PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6511 	    m, inp, NULL);
6512 	CURVNET_RESTORE();
6513 
6514 	return (pf_check_return(chk, m));
6515 }
6516 
6517 static pfil_return_t
6518 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6519     void *ruleset __unused,  struct inpcb *inp)
6520 {
6521 	int chk;
6522 
6523 	CURVNET_SET(ifp->if_vnet);
6524 	chk = pf_test6(PF_OUT, flags, ifp, m, inp, NULL);
6525 	CURVNET_RESTORE();
6526 
6527 	return (pf_check_return(chk, m));
6528 }
6529 #endif /* INET6 */
6530 
6531 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6532 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6533 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6534 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6535 
6536 #ifdef INET
6537 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6538 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6539 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6540 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6541 #endif
6542 #ifdef INET6
6543 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6544 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6545 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6546 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6547 #endif
6548 
6549 static void
6550 hook_pf_eth(void)
6551 {
6552 	struct pfil_hook_args pha = {
6553 		.pa_version = PFIL_VERSION,
6554 		.pa_modname = "pf",
6555 		.pa_type = PFIL_TYPE_ETHERNET,
6556 	};
6557 	struct pfil_link_args pla = {
6558 		.pa_version = PFIL_VERSION,
6559 	};
6560 	int ret __diagused;
6561 
6562 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6563 		return;
6564 
6565 	pha.pa_mbuf_chk = pf_eth_check_in;
6566 	pha.pa_flags = PFIL_IN;
6567 	pha.pa_rulname = "eth-in";
6568 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6569 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6570 	pla.pa_head = V_link_pfil_head;
6571 	pla.pa_hook = V_pf_eth_in_hook;
6572 	ret = pfil_link(&pla);
6573 	MPASS(ret == 0);
6574 	pha.pa_mbuf_chk = pf_eth_check_out;
6575 	pha.pa_flags = PFIL_OUT;
6576 	pha.pa_rulname = "eth-out";
6577 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6578 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6579 	pla.pa_head = V_link_pfil_head;
6580 	pla.pa_hook = V_pf_eth_out_hook;
6581 	ret = pfil_link(&pla);
6582 	MPASS(ret == 0);
6583 
6584 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6585 }
6586 
6587 static void
6588 hook_pf(void)
6589 {
6590 	struct pfil_hook_args pha = {
6591 		.pa_version = PFIL_VERSION,
6592 		.pa_modname = "pf",
6593 	};
6594 	struct pfil_link_args pla = {
6595 		.pa_version = PFIL_VERSION,
6596 	};
6597 	int ret __diagused;
6598 
6599 	if (atomic_load_bool(&V_pf_pfil_hooked))
6600 		return;
6601 
6602 #ifdef INET
6603 	pha.pa_type = PFIL_TYPE_IP4;
6604 	pha.pa_mbuf_chk = pf_check_in;
6605 	pha.pa_flags = PFIL_IN;
6606 	pha.pa_rulname = "default-in";
6607 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6608 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6609 	pla.pa_head = V_inet_pfil_head;
6610 	pla.pa_hook = V_pf_ip4_in_hook;
6611 	ret = pfil_link(&pla);
6612 	MPASS(ret == 0);
6613 	pha.pa_mbuf_chk = pf_check_out;
6614 	pha.pa_flags = PFIL_OUT;
6615 	pha.pa_rulname = "default-out";
6616 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6617 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6618 	pla.pa_head = V_inet_pfil_head;
6619 	pla.pa_hook = V_pf_ip4_out_hook;
6620 	ret = pfil_link(&pla);
6621 	MPASS(ret == 0);
6622 	if (V_pf_filter_local) {
6623 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6624 		pla.pa_head = V_inet_local_pfil_head;
6625 		pla.pa_hook = V_pf_ip4_out_hook;
6626 		ret = pfil_link(&pla);
6627 		MPASS(ret == 0);
6628 	}
6629 #endif
6630 #ifdef INET6
6631 	pha.pa_type = PFIL_TYPE_IP6;
6632 	pha.pa_mbuf_chk = pf_check6_in;
6633 	pha.pa_flags = PFIL_IN;
6634 	pha.pa_rulname = "default-in6";
6635 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6636 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6637 	pla.pa_head = V_inet6_pfil_head;
6638 	pla.pa_hook = V_pf_ip6_in_hook;
6639 	ret = pfil_link(&pla);
6640 	MPASS(ret == 0);
6641 	pha.pa_mbuf_chk = pf_check6_out;
6642 	pha.pa_rulname = "default-out6";
6643 	pha.pa_flags = PFIL_OUT;
6644 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6645 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6646 	pla.pa_head = V_inet6_pfil_head;
6647 	pla.pa_hook = V_pf_ip6_out_hook;
6648 	ret = pfil_link(&pla);
6649 	MPASS(ret == 0);
6650 	if (V_pf_filter_local) {
6651 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6652 		pla.pa_head = V_inet6_local_pfil_head;
6653 		pla.pa_hook = V_pf_ip6_out_hook;
6654 		ret = pfil_link(&pla);
6655 		MPASS(ret == 0);
6656 	}
6657 #endif
6658 
6659 	atomic_store_bool(&V_pf_pfil_hooked, true);
6660 }
6661 
6662 static void
6663 dehook_pf_eth(void)
6664 {
6665 
6666 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6667 		return;
6668 
6669 	pfil_remove_hook(V_pf_eth_in_hook);
6670 	pfil_remove_hook(V_pf_eth_out_hook);
6671 
6672 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6673 }
6674 
6675 static void
6676 dehook_pf(void)
6677 {
6678 
6679 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6680 		return;
6681 
6682 #ifdef INET
6683 	pfil_remove_hook(V_pf_ip4_in_hook);
6684 	pfil_remove_hook(V_pf_ip4_out_hook);
6685 #endif
6686 #ifdef INET6
6687 	pfil_remove_hook(V_pf_ip6_in_hook);
6688 	pfil_remove_hook(V_pf_ip6_out_hook);
6689 #endif
6690 
6691 	atomic_store_bool(&V_pf_pfil_hooked, false);
6692 }
6693 
6694 static void
6695 pf_load_vnet(void)
6696 {
6697 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6698 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6699 
6700 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6701 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6702 
6703 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6704 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6705 #ifdef ALTQ
6706 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6707 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6708 #endif
6709 
6710 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6711 
6712 	pfattach_vnet();
6713 	V_pf_vnet_active = 1;
6714 }
6715 
6716 static int
6717 pf_load(void)
6718 {
6719 	int error;
6720 
6721 	sx_init(&pf_end_lock, "pf end thread");
6722 
6723 	pf_mtag_initialize();
6724 
6725 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6726 	if (pf_dev == NULL)
6727 		return (ENOMEM);
6728 
6729 	pf_end_threads = 0;
6730 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6731 	if (error != 0)
6732 		return (error);
6733 
6734 	pfi_initialize();
6735 
6736 	return (0);
6737 }
6738 
6739 static void
6740 pf_unload_vnet(void)
6741 {
6742 	int ret __diagused;
6743 
6744 	V_pf_vnet_active = 0;
6745 	V_pf_status.running = 0;
6746 	dehook_pf();
6747 	dehook_pf_eth();
6748 
6749 	PF_RULES_WLOCK();
6750 	pf_syncookies_cleanup();
6751 	shutdown_pf();
6752 	PF_RULES_WUNLOCK();
6753 
6754 	/* Make sure we've cleaned up ethernet rules before we continue. */
6755 	NET_EPOCH_DRAIN_CALLBACKS();
6756 
6757 	ret = swi_remove(V_pf_swi_cookie);
6758 	MPASS(ret == 0);
6759 	ret = intr_event_destroy(V_pf_swi_ie);
6760 	MPASS(ret == 0);
6761 
6762 	pf_unload_vnet_purge();
6763 
6764 	pf_normalize_cleanup();
6765 	PF_RULES_WLOCK();
6766 	pfi_cleanup_vnet();
6767 	PF_RULES_WUNLOCK();
6768 	pfr_cleanup();
6769 	pf_osfp_flush();
6770 	pf_cleanup();
6771 	if (IS_DEFAULT_VNET(curvnet))
6772 		pf_mtag_cleanup();
6773 
6774 	pf_cleanup_tagset(&V_pf_tags);
6775 #ifdef ALTQ
6776 	pf_cleanup_tagset(&V_pf_qids);
6777 #endif
6778 	uma_zdestroy(V_pf_tag_z);
6779 
6780 #ifdef PF_WANT_32_TO_64_COUNTER
6781 	PF_RULES_WLOCK();
6782 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6783 
6784 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6785 	MPASS(V_pf_allkifcount == 0);
6786 
6787 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6788 	V_pf_allrulecount--;
6789 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6790 
6791 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6792 	MPASS(V_pf_allrulecount == 0);
6793 
6794 	PF_RULES_WUNLOCK();
6795 
6796 	free(V_pf_kifmarker, PFI_MTYPE);
6797 	free(V_pf_rulemarker, M_PFRULE);
6798 #endif
6799 
6800 	/* Free counters last as we updated them during shutdown. */
6801 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6802 	for (int i = 0; i < 2; i++) {
6803 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6804 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6805 	}
6806 	counter_u64_free(V_pf_default_rule.states_cur);
6807 	counter_u64_free(V_pf_default_rule.states_tot);
6808 	counter_u64_free(V_pf_default_rule.src_nodes);
6809 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6810 
6811 	for (int i = 0; i < PFRES_MAX; i++)
6812 		counter_u64_free(V_pf_status.counters[i]);
6813 	for (int i = 0; i < KLCNT_MAX; i++)
6814 		counter_u64_free(V_pf_status.lcounters[i]);
6815 	for (int i = 0; i < FCNT_MAX; i++)
6816 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6817 	for (int i = 0; i < SCNT_MAX; i++)
6818 		counter_u64_free(V_pf_status.scounters[i]);
6819 
6820 	rm_destroy(&V_pf_rules_lock);
6821 	sx_destroy(&V_pf_ioctl_lock);
6822 }
6823 
6824 static void
6825 pf_unload(void)
6826 {
6827 
6828 	sx_xlock(&pf_end_lock);
6829 	pf_end_threads = 1;
6830 	while (pf_end_threads < 2) {
6831 		wakeup_one(pf_purge_thread);
6832 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6833 	}
6834 	sx_xunlock(&pf_end_lock);
6835 
6836 	pf_nl_unregister();
6837 
6838 	if (pf_dev != NULL)
6839 		destroy_dev(pf_dev);
6840 
6841 	pfi_cleanup();
6842 
6843 	sx_destroy(&pf_end_lock);
6844 }
6845 
6846 static void
6847 vnet_pf_init(void *unused __unused)
6848 {
6849 
6850 	pf_load_vnet();
6851 }
6852 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6853     vnet_pf_init, NULL);
6854 
6855 static void
6856 vnet_pf_uninit(const void *unused __unused)
6857 {
6858 
6859 	pf_unload_vnet();
6860 }
6861 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6862 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6863     vnet_pf_uninit, NULL);
6864 
6865 static int
6866 pf_modevent(module_t mod, int type, void *data)
6867 {
6868 	int error = 0;
6869 
6870 	switch(type) {
6871 	case MOD_LOAD:
6872 		error = pf_load();
6873 		pf_nl_register();
6874 		break;
6875 	case MOD_UNLOAD:
6876 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6877 		 * the vnet_pf_uninit()s */
6878 		break;
6879 	default:
6880 		error = EINVAL;
6881 		break;
6882 	}
6883 
6884 	return (error);
6885 }
6886 
6887 static moduledata_t pf_mod = {
6888 	"pf",
6889 	pf_modevent,
6890 	0
6891 };
6892 
6893 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6894 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6895 MODULE_VERSION(pf, PF_MODVER);
6896