xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision f1ddb6fb8c4d051a205dae3a848776c9d56f86ff)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static void		 pf_rollback_eth_cb(struct epoch_context *);
111 static int		 pf_rollback_eth(uint32_t, const char *);
112 static int		 pf_commit_eth(uint32_t, const char *);
113 static void		 pf_free_eth_rule(struct pf_keth_rule *);
114 #ifdef ALTQ
115 static int		 pf_begin_altq(u_int32_t *);
116 static int		 pf_rollback_altq(u_int32_t);
117 static int		 pf_commit_altq(u_int32_t);
118 static int		 pf_enable_altq(struct pf_altq *);
119 static int		 pf_disable_altq(struct pf_altq *);
120 static uint16_t		 pf_qname2qid(const char *);
121 static void		 pf_qid_unref(uint16_t);
122 #endif /* ALTQ */
123 static int		 pf_begin_rules(u_int32_t *, int, const char *);
124 static int		 pf_rollback_rules(u_int32_t, int, char *);
125 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
126 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
127 static void		 pf_hash_rule(struct pf_krule *);
128 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
129 static int		 pf_commit_rules(u_int32_t, int, char *);
130 static int		 pf_addr_setup(struct pf_kruleset *,
131 			    struct pf_addr_wrap *, sa_family_t);
132 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
133 			    struct pf_src_node *);
134 #ifdef ALTQ
135 static int		 pf_export_kaltq(struct pf_altq *,
136 			    struct pfioc_altq_v1 *, size_t);
137 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
138 			    struct pf_altq *, size_t);
139 #endif /* ALTQ */
140 
141 VNET_DEFINE(struct pf_krule,	pf_default_rule);
142 
143 static __inline int             pf_krule_compare(struct pf_krule *,
144 				    struct pf_krule *);
145 
146 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
147 
148 #ifdef ALTQ
149 VNET_DEFINE_STATIC(int,		pf_altq_running);
150 #define	V_pf_altq_running	VNET(pf_altq_running)
151 #endif
152 
153 #define	TAGID_MAX	 50000
154 struct pf_tagname {
155 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
156 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
157 	char			name[PF_TAG_NAME_SIZE];
158 	uint16_t		tag;
159 	int			ref;
160 };
161 
162 struct pf_tagset {
163 	TAILQ_HEAD(, pf_tagname)	*namehash;
164 	TAILQ_HEAD(, pf_tagname)	*taghash;
165 	unsigned int			 mask;
166 	uint32_t			 seed;
167 	BITSET_DEFINE(, TAGID_MAX)	 avail;
168 };
169 
170 VNET_DEFINE(struct pf_tagset, pf_tags);
171 #define	V_pf_tags	VNET(pf_tags)
172 static unsigned int	pf_rule_tag_hashsize;
173 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
174 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
175     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
176     "Size of pf(4) rule tag hashtable");
177 
178 #ifdef ALTQ
179 VNET_DEFINE(struct pf_tagset, pf_qids);
180 #define	V_pf_qids	VNET(pf_qids)
181 static unsigned int	pf_queue_tag_hashsize;
182 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
183 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
184     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
185     "Size of pf(4) queue tag hashtable");
186 #endif
187 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
188 #define	V_pf_tag_z		 VNET(pf_tag_z)
189 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
190 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
191 
192 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
193 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
194 #endif
195 
196 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
197 #define V_pf_filter_local	VNET(pf_filter_local)
198 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
199     &VNET_NAME(pf_filter_local), false,
200     "Enable filtering for packets delivered to local network stack");
201 
202 #ifdef PF_DEFAULT_TO_DROP
203 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
204 #else
205 VNET_DEFINE_STATIC(bool, default_to_drop);
206 #endif
207 #define	V_default_to_drop VNET(default_to_drop)
208 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
209     &VNET_NAME(default_to_drop), false,
210     "Make the default rule drop all packets.");
211 
212 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
213 			    unsigned int);
214 static void		 pf_cleanup_tagset(struct pf_tagset *);
215 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
216 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
217 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
218 static u_int16_t	 pf_tagname2tag(const char *);
219 static void		 tag_unref(struct pf_tagset *, u_int16_t);
220 
221 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
222 
223 struct cdev *pf_dev;
224 
225 /*
226  * XXX - These are new and need to be checked when moveing to a new version
227  */
228 static void		 pf_clear_all_states(void);
229 static int		 pf_killstates_row(struct pf_kstate_kill *,
230 			    struct pf_idhash *);
231 static int		 pf_killstates_nv(struct pfioc_nv *);
232 static int		 pf_clearstates_nv(struct pfioc_nv *);
233 static int		 pf_getstate(struct pfioc_nv *);
234 static int		 pf_getstatus(struct pfioc_nv *);
235 static int		 pf_clear_tables(void);
236 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
237 static int		 pf_keepcounters(struct pfioc_nv *);
238 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
239 
240 /*
241  * Wrapper functions for pfil(9) hooks
242  */
243 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
244     int flags, void *ruleset __unused, struct inpcb *inp);
245 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
246     int flags, void *ruleset __unused, struct inpcb *inp);
247 #ifdef INET
248 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
251     int flags, void *ruleset __unused, struct inpcb *inp);
252 #endif
253 #ifdef INET6
254 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
255     int flags, void *ruleset __unused, struct inpcb *inp);
256 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
257     int flags, void *ruleset __unused, struct inpcb *inp);
258 #endif
259 
260 static void		hook_pf_eth(void);
261 static void		hook_pf(void);
262 static void		dehook_pf_eth(void);
263 static void		dehook_pf(void);
264 static int		shutdown_pf(void);
265 static int		pf_load(void);
266 static void		pf_unload(void);
267 
268 static struct cdevsw pf_cdevsw = {
269 	.d_ioctl =	pfioctl,
270 	.d_name =	PF_NAME,
271 	.d_version =	D_VERSION,
272 };
273 
274 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
275 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
276 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
277 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
278 
279 /*
280  * We need a flag that is neither hooked nor running to know when
281  * the VNET is "valid".  We primarily need this to control (global)
282  * external event, e.g., eventhandlers.
283  */
284 VNET_DEFINE(int, pf_vnet_active);
285 #define V_pf_vnet_active	VNET(pf_vnet_active)
286 
287 int pf_end_threads;
288 struct proc *pf_purge_proc;
289 
290 VNET_DEFINE(struct rmlock, pf_rules_lock);
291 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
292 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
293 struct sx			pf_end_lock;
294 
295 /* pfsync */
296 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
297 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
298 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
299 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
300 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
301 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
302 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
303 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
304 
305 /* pflog */
306 pflog_packet_t			*pflog_packet_ptr = NULL;
307 
308 /*
309  * Copy a user-provided string, returning an error if truncation would occur.
310  * Avoid scanning past "sz" bytes in the source string since there's no
311  * guarantee that it's nul-terminated.
312  */
313 static int
314 pf_user_strcpy(char *dst, const char *src, size_t sz)
315 {
316 	if (strnlen(src, sz) == sz)
317 		return (EINVAL);
318 	(void)strlcpy(dst, src, sz);
319 	return (0);
320 }
321 
322 static void
323 pfattach_vnet(void)
324 {
325 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
326 
327 	bzero(&V_pf_status, sizeof(V_pf_status));
328 
329 	pf_initialize();
330 	pfr_initialize();
331 	pfi_initialize_vnet();
332 	pf_normalize_init();
333 	pf_syncookies_init();
334 
335 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
336 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
337 
338 	RB_INIT(&V_pf_anchors);
339 	pf_init_kruleset(&pf_main_ruleset);
340 
341 	pf_init_keth(V_pf_keth);
342 
343 	/* default rule should never be garbage collected */
344 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
345 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
346 	V_pf_default_rule.nr = -1;
347 	V_pf_default_rule.rtableid = -1;
348 
349 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
350 	for (int i = 0; i < 2; i++) {
351 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
352 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
353 	}
354 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
355 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
356 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
357 
358 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
359 	    M_WAITOK | M_ZERO);
360 
361 #ifdef PF_WANT_32_TO_64_COUNTER
362 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
363 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
364 	PF_RULES_WLOCK();
365 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
367 	V_pf_allrulecount++;
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
369 	PF_RULES_WUNLOCK();
370 #endif
371 
372 	/* initialize default timeouts */
373 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
374 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
375 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
376 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
377 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
378 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
379 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
380 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
381 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
382 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
383 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
384 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
385 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
386 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
387 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
389 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
391 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
392 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
393 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
394 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
395 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
396 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
397 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
398 
399 	V_pf_status.debug = PF_DEBUG_URGENT;
400 	/*
401 	 * XXX This is different than in OpenBSD where reassembly is enabled by
402 	 * defult. In FreeBSD we expect people to still use scrub rules and
403 	 * switch to the new syntax later. Only when they switch they must
404 	 * explicitly enable reassemle. We could change the default once the
405 	 * scrub rule functionality is hopefully removed some day in future.
406 	 */
407 	V_pf_status.reass = 0;
408 
409 	V_pf_pfil_hooked = false;
410 	V_pf_pfil_eth_hooked = false;
411 
412 	/* XXX do our best to avoid a conflict */
413 	V_pf_status.hostid = arc4random();
414 
415 	for (int i = 0; i < PFRES_MAX; i++)
416 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < KLCNT_MAX; i++)
418 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < FCNT_MAX; i++)
420 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
421 	for (int i = 0; i < SCNT_MAX; i++)
422 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
423 
424 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
425 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
426 		/* XXXGL: leaked all above. */
427 		return;
428 }
429 
430 static struct pf_kpool *
431 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
432     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
433     u_int8_t check_ticket, int which)
434 {
435 	struct pf_kruleset	*ruleset;
436 	struct pf_krule		*rule;
437 	int			 rs_num;
438 
439 	MPASS(which == PF_RDR || which == PF_NAT);
440 
441 	ruleset = pf_find_kruleset(anchor);
442 	if (ruleset == NULL)
443 		return (NULL);
444 	rs_num = pf_get_ruleset_number(rule_action);
445 	if (rs_num >= PF_RULESET_MAX)
446 		return (NULL);
447 	if (active) {
448 		if (check_ticket && ticket !=
449 		    ruleset->rules[rs_num].active.ticket)
450 			return (NULL);
451 		if (r_last)
452 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
453 			    pf_krulequeue);
454 		else
455 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
456 	} else {
457 		if (check_ticket && ticket !=
458 		    ruleset->rules[rs_num].inactive.ticket)
459 			return (NULL);
460 		if (r_last)
461 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
462 			    pf_krulequeue);
463 		else
464 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
465 	}
466 	if (!r_last) {
467 		while ((rule != NULL) && (rule->nr != rule_number))
468 			rule = TAILQ_NEXT(rule, entries);
469 	}
470 	if (rule == NULL)
471 		return (NULL);
472 
473 	if (which == PF_NAT)
474 		return (&rule->nat);
475 	else
476 		return (&rule->rdr);
477 }
478 
479 static void
480 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
481 {
482 	struct pf_kpooladdr	*mv_pool_pa;
483 
484 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
485 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
486 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
487 	}
488 }
489 
490 static void
491 pf_empty_kpool(struct pf_kpalist *poola)
492 {
493 	struct pf_kpooladdr *pa;
494 
495 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
496 		switch (pa->addr.type) {
497 		case PF_ADDR_DYNIFTL:
498 			pfi_dynaddr_remove(pa->addr.p.dyn);
499 			break;
500 		case PF_ADDR_TABLE:
501 			/* XXX: this could be unfinished pooladdr on pabuf */
502 			if (pa->addr.p.tbl != NULL)
503 				pfr_detach_table(pa->addr.p.tbl);
504 			break;
505 		}
506 		if (pa->kif)
507 			pfi_kkif_unref(pa->kif);
508 		TAILQ_REMOVE(poola, pa, entries);
509 		free(pa, M_PFRULE);
510 	}
511 }
512 
513 static void
514 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
515 {
516 
517 	PF_RULES_WASSERT();
518 	PF_UNLNKDRULES_ASSERT();
519 
520 	TAILQ_REMOVE(rulequeue, rule, entries);
521 
522 	rule->rule_ref |= PFRULE_REFS;
523 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
524 }
525 
526 static void
527 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
528 {
529 
530 	PF_RULES_WASSERT();
531 
532 	PF_UNLNKDRULES_LOCK();
533 	pf_unlink_rule_locked(rulequeue, rule);
534 	PF_UNLNKDRULES_UNLOCK();
535 }
536 
537 static void
538 pf_free_eth_rule(struct pf_keth_rule *rule)
539 {
540 	PF_RULES_WASSERT();
541 
542 	if (rule == NULL)
543 		return;
544 
545 	if (rule->tag)
546 		tag_unref(&V_pf_tags, rule->tag);
547 	if (rule->match_tag)
548 		tag_unref(&V_pf_tags, rule->match_tag);
549 #ifdef ALTQ
550 	pf_qid_unref(rule->qid);
551 #endif
552 
553 	if (rule->bridge_to)
554 		pfi_kkif_unref(rule->bridge_to);
555 	if (rule->kif)
556 		pfi_kkif_unref(rule->kif);
557 
558 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
559 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
560 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
561 		pfr_detach_table(rule->ipdst.addr.p.tbl);
562 
563 	counter_u64_free(rule->evaluations);
564 	for (int i = 0; i < 2; i++) {
565 		counter_u64_free(rule->packets[i]);
566 		counter_u64_free(rule->bytes[i]);
567 	}
568 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
569 	pf_keth_anchor_remove(rule);
570 
571 	free(rule, M_PFRULE);
572 }
573 
574 void
575 pf_free_rule(struct pf_krule *rule)
576 {
577 
578 	PF_RULES_WASSERT();
579 	PF_CONFIG_ASSERT();
580 
581 	if (rule->tag)
582 		tag_unref(&V_pf_tags, rule->tag);
583 	if (rule->match_tag)
584 		tag_unref(&V_pf_tags, rule->match_tag);
585 #ifdef ALTQ
586 	if (rule->pqid != rule->qid)
587 		pf_qid_unref(rule->pqid);
588 	pf_qid_unref(rule->qid);
589 #endif
590 	switch (rule->src.addr.type) {
591 	case PF_ADDR_DYNIFTL:
592 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
593 		break;
594 	case PF_ADDR_TABLE:
595 		pfr_detach_table(rule->src.addr.p.tbl);
596 		break;
597 	}
598 	switch (rule->dst.addr.type) {
599 	case PF_ADDR_DYNIFTL:
600 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
601 		break;
602 	case PF_ADDR_TABLE:
603 		pfr_detach_table(rule->dst.addr.p.tbl);
604 		break;
605 	}
606 	if (rule->overload_tbl)
607 		pfr_detach_table(rule->overload_tbl);
608 	if (rule->kif)
609 		pfi_kkif_unref(rule->kif);
610 	if (rule->rcv_kif)
611 		pfi_kkif_unref(rule->rcv_kif);
612 	pf_kanchor_remove(rule);
613 	pf_empty_kpool(&rule->rdr.list);
614 	pf_empty_kpool(&rule->nat.list);
615 
616 	pf_krule_free(rule);
617 }
618 
619 static void
620 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
621     unsigned int default_size)
622 {
623 	unsigned int i;
624 	unsigned int hashsize;
625 
626 	if (*tunable_size == 0 || !powerof2(*tunable_size))
627 		*tunable_size = default_size;
628 
629 	hashsize = *tunable_size;
630 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
631 	    M_WAITOK);
632 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
633 	    M_WAITOK);
634 	ts->mask = hashsize - 1;
635 	ts->seed = arc4random();
636 	for (i = 0; i < hashsize; i++) {
637 		TAILQ_INIT(&ts->namehash[i]);
638 		TAILQ_INIT(&ts->taghash[i]);
639 	}
640 	BIT_FILL(TAGID_MAX, &ts->avail);
641 }
642 
643 static void
644 pf_cleanup_tagset(struct pf_tagset *ts)
645 {
646 	unsigned int i;
647 	unsigned int hashsize;
648 	struct pf_tagname *t, *tmp;
649 
650 	/*
651 	 * Only need to clean up one of the hashes as each tag is hashed
652 	 * into each table.
653 	 */
654 	hashsize = ts->mask + 1;
655 	for (i = 0; i < hashsize; i++)
656 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
657 			uma_zfree(V_pf_tag_z, t);
658 
659 	free(ts->namehash, M_PFHASH);
660 	free(ts->taghash, M_PFHASH);
661 }
662 
663 static uint16_t
664 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
665 {
666 	size_t len;
667 
668 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
669 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
670 }
671 
672 static uint16_t
673 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
674 {
675 
676 	return (tag & ts->mask);
677 }
678 
679 static u_int16_t
680 tagname2tag(struct pf_tagset *ts, const char *tagname)
681 {
682 	struct pf_tagname	*tag;
683 	u_int32_t		 index;
684 	u_int16_t		 new_tagid;
685 
686 	PF_RULES_WASSERT();
687 
688 	index = tagname2hashindex(ts, tagname);
689 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
690 		if (strcmp(tagname, tag->name) == 0) {
691 			tag->ref++;
692 			return (tag->tag);
693 		}
694 
695 	/*
696 	 * new entry
697 	 *
698 	 * to avoid fragmentation, we do a linear search from the beginning
699 	 * and take the first free slot we find.
700 	 */
701 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
702 	/*
703 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
704 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
705 	 * set.  It may also return a bit number greater than TAGID_MAX due
706 	 * to rounding of the number of bits in the vector up to a multiple
707 	 * of the vector word size at declaration/allocation time.
708 	 */
709 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
710 		return (0);
711 
712 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
713 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
714 
715 	/* allocate and fill new struct pf_tagname */
716 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
717 	if (tag == NULL)
718 		return (0);
719 	strlcpy(tag->name, tagname, sizeof(tag->name));
720 	tag->tag = new_tagid;
721 	tag->ref = 1;
722 
723 	/* Insert into namehash */
724 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
725 
726 	/* Insert into taghash */
727 	index = tag2hashindex(ts, new_tagid);
728 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
729 
730 	return (tag->tag);
731 }
732 
733 static void
734 tag_unref(struct pf_tagset *ts, u_int16_t tag)
735 {
736 	struct pf_tagname	*t;
737 	uint16_t		 index;
738 
739 	PF_RULES_WASSERT();
740 
741 	index = tag2hashindex(ts, tag);
742 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
743 		if (tag == t->tag) {
744 			if (--t->ref == 0) {
745 				TAILQ_REMOVE(&ts->taghash[index], t,
746 				    taghash_entries);
747 				index = tagname2hashindex(ts, t->name);
748 				TAILQ_REMOVE(&ts->namehash[index], t,
749 				    namehash_entries);
750 				/* Bits are 0-based for BIT_SET() */
751 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
752 				uma_zfree(V_pf_tag_z, t);
753 			}
754 			break;
755 		}
756 }
757 
758 static uint16_t
759 pf_tagname2tag(const char *tagname)
760 {
761 	return (tagname2tag(&V_pf_tags, tagname));
762 }
763 
764 static int
765 pf_begin_eth(uint32_t *ticket, const char *anchor)
766 {
767 	struct pf_keth_rule *rule, *tmp;
768 	struct pf_keth_ruleset *rs;
769 
770 	PF_RULES_WASSERT();
771 
772 	rs = pf_find_or_create_keth_ruleset(anchor);
773 	if (rs == NULL)
774 		return (EINVAL);
775 
776 	/* Purge old inactive rules. */
777 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
778 	    tmp) {
779 		TAILQ_REMOVE(rs->inactive.rules, rule,
780 		    entries);
781 		pf_free_eth_rule(rule);
782 	}
783 
784 	*ticket = ++rs->inactive.ticket;
785 	rs->inactive.open = 1;
786 
787 	return (0);
788 }
789 
790 static void
791 pf_rollback_eth_cb(struct epoch_context *ctx)
792 {
793 	struct pf_keth_ruleset *rs;
794 
795 	rs = __containerof(ctx, struct pf_keth_ruleset, epoch_ctx);
796 
797 	CURVNET_SET(rs->vnet);
798 
799 	PF_RULES_WLOCK();
800 	pf_rollback_eth(rs->inactive.ticket,
801 	    rs->anchor ? rs->anchor->path : "");
802 	PF_RULES_WUNLOCK();
803 
804 	CURVNET_RESTORE();
805 }
806 
807 static int
808 pf_rollback_eth(uint32_t ticket, const char *anchor)
809 {
810 	struct pf_keth_rule *rule, *tmp;
811 	struct pf_keth_ruleset *rs;
812 
813 	PF_RULES_WASSERT();
814 
815 	rs = pf_find_keth_ruleset(anchor);
816 	if (rs == NULL)
817 		return (EINVAL);
818 
819 	if (!rs->inactive.open ||
820 	    ticket != rs->inactive.ticket)
821 		return (0);
822 
823 	/* Purge old inactive rules. */
824 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
825 	    tmp) {
826 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
827 		pf_free_eth_rule(rule);
828 	}
829 
830 	rs->inactive.open = 0;
831 
832 	pf_remove_if_empty_keth_ruleset(rs);
833 
834 	return (0);
835 }
836 
837 #define	PF_SET_SKIP_STEPS(i)					\
838 	do {							\
839 		while (head[i] != cur) {			\
840 			head[i]->skip[i].ptr = cur;		\
841 			head[i] = TAILQ_NEXT(head[i], entries);	\
842 		}						\
843 	} while (0)
844 
845 static void
846 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
847 {
848 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
849 	int i;
850 
851 	cur = TAILQ_FIRST(rules);
852 	prev = cur;
853 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
854 		head[i] = cur;
855 	while (cur != NULL) {
856 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
857 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
858 		if (cur->direction != prev->direction)
859 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
860 		if (cur->proto != prev->proto)
861 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
862 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
863 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
864 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
865 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
866 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
867 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
868 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
869 		if (cur->ipdst.neg != prev->ipdst.neg ||
870 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
871 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
872 
873 		prev = cur;
874 		cur = TAILQ_NEXT(cur, entries);
875 	}
876 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
877 		PF_SET_SKIP_STEPS(i);
878 }
879 
880 static int
881 pf_commit_eth(uint32_t ticket, const char *anchor)
882 {
883 	struct pf_keth_ruleq *rules;
884 	struct pf_keth_ruleset *rs;
885 
886 	rs = pf_find_keth_ruleset(anchor);
887 	if (rs == NULL) {
888 		return (EINVAL);
889 	}
890 
891 	if (!rs->inactive.open ||
892 	    ticket != rs->inactive.ticket)
893 		return (EBUSY);
894 
895 	PF_RULES_WASSERT();
896 
897 	pf_eth_calc_skip_steps(rs->inactive.rules);
898 
899 	rules = rs->active.rules;
900 	ck_pr_store_ptr(&rs->active.rules, rs->inactive.rules);
901 	rs->inactive.rules = rules;
902 	rs->inactive.ticket = rs->active.ticket;
903 
904 	/* Clean up inactive rules (i.e. previously active rules), only when
905 	 * we're sure they're no longer used. */
906 	NET_EPOCH_CALL(pf_rollback_eth_cb, &rs->epoch_ctx);
907 
908 	return (0);
909 }
910 
911 #ifdef ALTQ
912 static uint16_t
913 pf_qname2qid(const char *qname)
914 {
915 	return (tagname2tag(&V_pf_qids, qname));
916 }
917 
918 static void
919 pf_qid_unref(uint16_t qid)
920 {
921 	tag_unref(&V_pf_qids, qid);
922 }
923 
924 static int
925 pf_begin_altq(u_int32_t *ticket)
926 {
927 	struct pf_altq	*altq, *tmp;
928 	int		 error = 0;
929 
930 	PF_RULES_WASSERT();
931 
932 	/* Purge the old altq lists */
933 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
934 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
935 			/* detach and destroy the discipline */
936 			error = altq_remove(altq);
937 		}
938 		free(altq, M_PFALTQ);
939 	}
940 	TAILQ_INIT(V_pf_altq_ifs_inactive);
941 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
942 		pf_qid_unref(altq->qid);
943 		free(altq, M_PFALTQ);
944 	}
945 	TAILQ_INIT(V_pf_altqs_inactive);
946 	if (error)
947 		return (error);
948 	*ticket = ++V_ticket_altqs_inactive;
949 	V_altqs_inactive_open = 1;
950 	return (0);
951 }
952 
953 static int
954 pf_rollback_altq(u_int32_t ticket)
955 {
956 	struct pf_altq	*altq, *tmp;
957 	int		 error = 0;
958 
959 	PF_RULES_WASSERT();
960 
961 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
962 		return (0);
963 	/* Purge the old altq lists */
964 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
965 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
966 			/* detach and destroy the discipline */
967 			error = altq_remove(altq);
968 		}
969 		free(altq, M_PFALTQ);
970 	}
971 	TAILQ_INIT(V_pf_altq_ifs_inactive);
972 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
973 		pf_qid_unref(altq->qid);
974 		free(altq, M_PFALTQ);
975 	}
976 	TAILQ_INIT(V_pf_altqs_inactive);
977 	V_altqs_inactive_open = 0;
978 	return (error);
979 }
980 
981 static int
982 pf_commit_altq(u_int32_t ticket)
983 {
984 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
985 	struct pf_altq		*altq, *tmp;
986 	int			 err, error = 0;
987 
988 	PF_RULES_WASSERT();
989 
990 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
991 		return (EBUSY);
992 
993 	/* swap altqs, keep the old. */
994 	old_altqs = V_pf_altqs_active;
995 	old_altq_ifs = V_pf_altq_ifs_active;
996 	V_pf_altqs_active = V_pf_altqs_inactive;
997 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
998 	V_pf_altqs_inactive = old_altqs;
999 	V_pf_altq_ifs_inactive = old_altq_ifs;
1000 	V_ticket_altqs_active = V_ticket_altqs_inactive;
1001 
1002 	/* Attach new disciplines */
1003 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1004 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1005 			/* attach the discipline */
1006 			error = altq_pfattach(altq);
1007 			if (error == 0 && V_pf_altq_running)
1008 				error = pf_enable_altq(altq);
1009 			if (error != 0)
1010 				return (error);
1011 		}
1012 	}
1013 
1014 	/* Purge the old altq lists */
1015 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1016 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1017 			/* detach and destroy the discipline */
1018 			if (V_pf_altq_running)
1019 				error = pf_disable_altq(altq);
1020 			err = altq_pfdetach(altq);
1021 			if (err != 0 && error == 0)
1022 				error = err;
1023 			err = altq_remove(altq);
1024 			if (err != 0 && error == 0)
1025 				error = err;
1026 		}
1027 		free(altq, M_PFALTQ);
1028 	}
1029 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1030 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1031 		pf_qid_unref(altq->qid);
1032 		free(altq, M_PFALTQ);
1033 	}
1034 	TAILQ_INIT(V_pf_altqs_inactive);
1035 
1036 	V_altqs_inactive_open = 0;
1037 	return (error);
1038 }
1039 
1040 static int
1041 pf_enable_altq(struct pf_altq *altq)
1042 {
1043 	struct ifnet		*ifp;
1044 	struct tb_profile	 tb;
1045 	int			 error = 0;
1046 
1047 	if ((ifp = ifunit(altq->ifname)) == NULL)
1048 		return (EINVAL);
1049 
1050 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1051 		error = altq_enable(&ifp->if_snd);
1052 
1053 	/* set tokenbucket regulator */
1054 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1055 		tb.rate = altq->ifbandwidth;
1056 		tb.depth = altq->tbrsize;
1057 		error = tbr_set(&ifp->if_snd, &tb);
1058 	}
1059 
1060 	return (error);
1061 }
1062 
1063 static int
1064 pf_disable_altq(struct pf_altq *altq)
1065 {
1066 	struct ifnet		*ifp;
1067 	struct tb_profile	 tb;
1068 	int			 error;
1069 
1070 	if ((ifp = ifunit(altq->ifname)) == NULL)
1071 		return (EINVAL);
1072 
1073 	/*
1074 	 * when the discipline is no longer referenced, it was overridden
1075 	 * by a new one.  if so, just return.
1076 	 */
1077 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1078 		return (0);
1079 
1080 	error = altq_disable(&ifp->if_snd);
1081 
1082 	if (error == 0) {
1083 		/* clear tokenbucket regulator */
1084 		tb.rate = 0;
1085 		error = tbr_set(&ifp->if_snd, &tb);
1086 	}
1087 
1088 	return (error);
1089 }
1090 
1091 static int
1092 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1093     struct pf_altq *altq)
1094 {
1095 	struct ifnet	*ifp1;
1096 	int		 error = 0;
1097 
1098 	/* Deactivate the interface in question */
1099 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1100 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1101 	    (remove && ifp1 == ifp)) {
1102 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1103 	} else {
1104 		error = altq_add(ifp1, altq);
1105 
1106 		if (ticket != V_ticket_altqs_inactive)
1107 			error = EBUSY;
1108 
1109 		if (error)
1110 			free(altq, M_PFALTQ);
1111 	}
1112 
1113 	return (error);
1114 }
1115 
1116 void
1117 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1118 {
1119 	struct pf_altq	*a1, *a2, *a3;
1120 	u_int32_t	 ticket;
1121 	int		 error = 0;
1122 
1123 	/*
1124 	 * No need to re-evaluate the configuration for events on interfaces
1125 	 * that do not support ALTQ, as it's not possible for such
1126 	 * interfaces to be part of the configuration.
1127 	 */
1128 	if (!ALTQ_IS_READY(&ifp->if_snd))
1129 		return;
1130 
1131 	/* Interrupt userland queue modifications */
1132 	if (V_altqs_inactive_open)
1133 		pf_rollback_altq(V_ticket_altqs_inactive);
1134 
1135 	/* Start new altq ruleset */
1136 	if (pf_begin_altq(&ticket))
1137 		return;
1138 
1139 	/* Copy the current active set */
1140 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1141 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1142 		if (a2 == NULL) {
1143 			error = ENOMEM;
1144 			break;
1145 		}
1146 		bcopy(a1, a2, sizeof(struct pf_altq));
1147 
1148 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1149 		if (error)
1150 			break;
1151 
1152 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1153 	}
1154 	if (error)
1155 		goto out;
1156 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1157 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1158 		if (a2 == NULL) {
1159 			error = ENOMEM;
1160 			break;
1161 		}
1162 		bcopy(a1, a2, sizeof(struct pf_altq));
1163 
1164 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1165 			error = EBUSY;
1166 			free(a2, M_PFALTQ);
1167 			break;
1168 		}
1169 		a2->altq_disc = NULL;
1170 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1171 			if (strncmp(a3->ifname, a2->ifname,
1172 				IFNAMSIZ) == 0) {
1173 				a2->altq_disc = a3->altq_disc;
1174 				break;
1175 			}
1176 		}
1177 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1178 		if (error)
1179 			break;
1180 
1181 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1182 	}
1183 
1184 out:
1185 	if (error != 0)
1186 		pf_rollback_altq(ticket);
1187 	else
1188 		pf_commit_altq(ticket);
1189 }
1190 #endif /* ALTQ */
1191 
1192 static struct pf_krule_global *
1193 pf_rule_tree_alloc(int flags)
1194 {
1195 	struct pf_krule_global *tree;
1196 
1197 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1198 	if (tree == NULL)
1199 		return (NULL);
1200 	RB_INIT(tree);
1201 	return (tree);
1202 }
1203 
1204 static void
1205 pf_rule_tree_free(struct pf_krule_global *tree)
1206 {
1207 
1208 	free(tree, M_TEMP);
1209 }
1210 
1211 static int
1212 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1213 {
1214 	struct pf_krule_global *tree;
1215 	struct pf_kruleset	*rs;
1216 	struct pf_krule		*rule;
1217 
1218 	PF_RULES_WASSERT();
1219 
1220 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1221 		return (EINVAL);
1222 	tree = pf_rule_tree_alloc(M_NOWAIT);
1223 	if (tree == NULL)
1224 		return (ENOMEM);
1225 	rs = pf_find_or_create_kruleset(anchor);
1226 	if (rs == NULL) {
1227 		free(tree, M_TEMP);
1228 		return (EINVAL);
1229 	}
1230 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1231 	rs->rules[rs_num].inactive.tree = tree;
1232 
1233 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1234 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1235 		rs->rules[rs_num].inactive.rcount--;
1236 	}
1237 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1238 	rs->rules[rs_num].inactive.open = 1;
1239 	return (0);
1240 }
1241 
1242 static int
1243 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1244 {
1245 	struct pf_kruleset	*rs;
1246 	struct pf_krule		*rule;
1247 
1248 	PF_RULES_WASSERT();
1249 
1250 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1251 		return (EINVAL);
1252 	rs = pf_find_kruleset(anchor);
1253 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1254 	    rs->rules[rs_num].inactive.ticket != ticket)
1255 		return (0);
1256 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1257 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1258 		rs->rules[rs_num].inactive.rcount--;
1259 	}
1260 	rs->rules[rs_num].inactive.open = 0;
1261 	return (0);
1262 }
1263 
1264 #define PF_MD5_UPD(st, elm)						\
1265 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1266 
1267 #define PF_MD5_UPD_STR(st, elm)						\
1268 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1269 
1270 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1271 		(stor) = htonl((st)->elm);				\
1272 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1273 } while (0)
1274 
1275 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1276 		(stor) = htons((st)->elm);				\
1277 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1278 } while (0)
1279 
1280 static void
1281 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1282 {
1283 	PF_MD5_UPD(pfr, addr.type);
1284 	switch (pfr->addr.type) {
1285 		case PF_ADDR_DYNIFTL:
1286 			PF_MD5_UPD(pfr, addr.v.ifname);
1287 			PF_MD5_UPD(pfr, addr.iflags);
1288 			break;
1289 		case PF_ADDR_TABLE:
1290 			PF_MD5_UPD(pfr, addr.v.tblname);
1291 			break;
1292 		case PF_ADDR_ADDRMASK:
1293 			/* XXX ignore af? */
1294 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1295 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1296 			break;
1297 	}
1298 
1299 	PF_MD5_UPD(pfr, port[0]);
1300 	PF_MD5_UPD(pfr, port[1]);
1301 	PF_MD5_UPD(pfr, neg);
1302 	PF_MD5_UPD(pfr, port_op);
1303 }
1304 
1305 static void
1306 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1307 {
1308 	u_int16_t x;
1309 	u_int32_t y;
1310 
1311 	pf_hash_rule_addr(ctx, &rule->src);
1312 	pf_hash_rule_addr(ctx, &rule->dst);
1313 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1314 		PF_MD5_UPD_STR(rule, label[i]);
1315 	PF_MD5_UPD_STR(rule, ifname);
1316 	PF_MD5_UPD_STR(rule, rcv_ifname);
1317 	PF_MD5_UPD_STR(rule, match_tagname);
1318 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1319 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1320 	PF_MD5_UPD_HTONL(rule, prob, y);
1321 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1322 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1323 	PF_MD5_UPD(rule, uid.op);
1324 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1325 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1326 	PF_MD5_UPD(rule, gid.op);
1327 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1328 	PF_MD5_UPD(rule, action);
1329 	PF_MD5_UPD(rule, direction);
1330 	PF_MD5_UPD(rule, af);
1331 	PF_MD5_UPD(rule, quick);
1332 	PF_MD5_UPD(rule, ifnot);
1333 	PF_MD5_UPD(rule, match_tag_not);
1334 	PF_MD5_UPD(rule, natpass);
1335 	PF_MD5_UPD(rule, keep_state);
1336 	PF_MD5_UPD(rule, proto);
1337 	PF_MD5_UPD(rule, type);
1338 	PF_MD5_UPD(rule, code);
1339 	PF_MD5_UPD(rule, flags);
1340 	PF_MD5_UPD(rule, flagset);
1341 	PF_MD5_UPD(rule, allow_opts);
1342 	PF_MD5_UPD(rule, rt);
1343 	PF_MD5_UPD(rule, tos);
1344 	PF_MD5_UPD(rule, scrub_flags);
1345 	PF_MD5_UPD(rule, min_ttl);
1346 	PF_MD5_UPD(rule, set_tos);
1347 	if (rule->anchor != NULL)
1348 		PF_MD5_UPD_STR(rule, anchor->path);
1349 }
1350 
1351 static void
1352 pf_hash_rule(struct pf_krule *rule)
1353 {
1354 	MD5_CTX		ctx;
1355 
1356 	MD5Init(&ctx);
1357 	pf_hash_rule_rolling(&ctx, rule);
1358 	MD5Final(rule->md5sum, &ctx);
1359 }
1360 
1361 static int
1362 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1363 {
1364 
1365 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1366 }
1367 
1368 static int
1369 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1370 {
1371 	struct pf_kruleset	*rs;
1372 	struct pf_krule		*rule, **old_array, *old_rule;
1373 	struct pf_krulequeue	*old_rules;
1374 	struct pf_krule_global  *old_tree;
1375 	int			 error;
1376 	u_int32_t		 old_rcount;
1377 
1378 	PF_RULES_WASSERT();
1379 
1380 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1381 		return (EINVAL);
1382 	rs = pf_find_kruleset(anchor);
1383 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1384 	    ticket != rs->rules[rs_num].inactive.ticket)
1385 		return (EBUSY);
1386 
1387 	/* Calculate checksum for the main ruleset */
1388 	if (rs == &pf_main_ruleset) {
1389 		error = pf_setup_pfsync_matching(rs);
1390 		if (error != 0)
1391 			return (error);
1392 	}
1393 
1394 	/* Swap rules, keep the old. */
1395 	old_rules = rs->rules[rs_num].active.ptr;
1396 	old_rcount = rs->rules[rs_num].active.rcount;
1397 	old_array = rs->rules[rs_num].active.ptr_array;
1398 	old_tree = rs->rules[rs_num].active.tree;
1399 
1400 	rs->rules[rs_num].active.ptr =
1401 	    rs->rules[rs_num].inactive.ptr;
1402 	rs->rules[rs_num].active.ptr_array =
1403 	    rs->rules[rs_num].inactive.ptr_array;
1404 	rs->rules[rs_num].active.tree =
1405 	    rs->rules[rs_num].inactive.tree;
1406 	rs->rules[rs_num].active.rcount =
1407 	    rs->rules[rs_num].inactive.rcount;
1408 
1409 	/* Attempt to preserve counter information. */
1410 	if (V_pf_status.keep_counters && old_tree != NULL) {
1411 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1412 		    entries) {
1413 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1414 			if (old_rule == NULL) {
1415 				continue;
1416 			}
1417 			pf_counter_u64_critical_enter();
1418 			pf_counter_u64_rollup_protected(&rule->evaluations,
1419 			    pf_counter_u64_fetch(&old_rule->evaluations));
1420 			pf_counter_u64_rollup_protected(&rule->packets[0],
1421 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1422 			pf_counter_u64_rollup_protected(&rule->packets[1],
1423 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1424 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1425 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1426 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1427 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1428 			pf_counter_u64_critical_exit();
1429 		}
1430 	}
1431 
1432 	rs->rules[rs_num].inactive.ptr = old_rules;
1433 	rs->rules[rs_num].inactive.ptr_array = old_array;
1434 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1435 	rs->rules[rs_num].inactive.rcount = old_rcount;
1436 
1437 	rs->rules[rs_num].active.ticket =
1438 	    rs->rules[rs_num].inactive.ticket;
1439 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1440 
1441 	/* Purge the old rule list. */
1442 	PF_UNLNKDRULES_LOCK();
1443 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1444 		pf_unlink_rule_locked(old_rules, rule);
1445 	PF_UNLNKDRULES_UNLOCK();
1446 	if (rs->rules[rs_num].inactive.ptr_array)
1447 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1448 	rs->rules[rs_num].inactive.ptr_array = NULL;
1449 	rs->rules[rs_num].inactive.rcount = 0;
1450 	rs->rules[rs_num].inactive.open = 0;
1451 	pf_remove_if_empty_kruleset(rs);
1452 	free(old_tree, M_TEMP);
1453 
1454 	return (0);
1455 }
1456 
1457 static int
1458 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1459 {
1460 	MD5_CTX			 ctx;
1461 	struct pf_krule		*rule;
1462 	int			 rs_cnt;
1463 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1464 
1465 	MD5Init(&ctx);
1466 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1467 		/* XXX PF_RULESET_SCRUB as well? */
1468 		if (rs_cnt == PF_RULESET_SCRUB)
1469 			continue;
1470 
1471 		if (rs->rules[rs_cnt].inactive.ptr_array)
1472 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1473 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1474 
1475 		if (rs->rules[rs_cnt].inactive.rcount) {
1476 			rs->rules[rs_cnt].inactive.ptr_array =
1477 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1478 			    sizeof(struct pf_rule **),
1479 			    M_TEMP, M_NOWAIT);
1480 
1481 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1482 				return (ENOMEM);
1483 		}
1484 
1485 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1486 		    entries) {
1487 			pf_hash_rule_rolling(&ctx, rule);
1488 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1489 		}
1490 	}
1491 
1492 	MD5Final(digest, &ctx);
1493 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1494 	return (0);
1495 }
1496 
1497 static int
1498 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1499 {
1500 	int error = 0;
1501 
1502 	switch (addr->type) {
1503 	case PF_ADDR_TABLE:
1504 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1505 		if (addr->p.tbl == NULL)
1506 			error = ENOMEM;
1507 		break;
1508 	default:
1509 		error = EINVAL;
1510 	}
1511 
1512 	return (error);
1513 }
1514 
1515 static int
1516 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1517     sa_family_t af)
1518 {
1519 	int error = 0;
1520 
1521 	switch (addr->type) {
1522 	case PF_ADDR_TABLE:
1523 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1524 		if (addr->p.tbl == NULL)
1525 			error = ENOMEM;
1526 		break;
1527 	case PF_ADDR_DYNIFTL:
1528 		error = pfi_dynaddr_setup(addr, af);
1529 		break;
1530 	}
1531 
1532 	return (error);
1533 }
1534 
1535 void
1536 pf_addr_copyout(struct pf_addr_wrap *addr)
1537 {
1538 
1539 	switch (addr->type) {
1540 	case PF_ADDR_DYNIFTL:
1541 		pfi_dynaddr_copyout(addr);
1542 		break;
1543 	case PF_ADDR_TABLE:
1544 		pf_tbladdr_copyout(addr);
1545 		break;
1546 	}
1547 }
1548 
1549 static void
1550 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1551 {
1552 	int	secs = time_uptime, diff;
1553 
1554 	bzero(out, sizeof(struct pf_src_node));
1555 
1556 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1557 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1558 
1559 	if (in->rule != NULL)
1560 		out->rule.nr = in->rule->nr;
1561 
1562 	for (int i = 0; i < 2; i++) {
1563 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1564 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1565 	}
1566 
1567 	out->states = in->states;
1568 	out->conn = in->conn;
1569 	out->af = in->af;
1570 	out->ruletype = in->ruletype;
1571 
1572 	out->creation = secs - in->creation;
1573 	if (out->expire > secs)
1574 		out->expire -= secs;
1575 	else
1576 		out->expire = 0;
1577 
1578 	/* Adjust the connection rate estimate. */
1579 	out->conn_rate = in->conn_rate;
1580 	diff = secs - in->conn_rate.last;
1581 	if (diff >= in->conn_rate.seconds)
1582 		out->conn_rate.count = 0;
1583 	else
1584 		out->conn_rate.count -=
1585 		    in->conn_rate.count * diff /
1586 		    in->conn_rate.seconds;
1587 }
1588 
1589 #ifdef ALTQ
1590 /*
1591  * Handle export of struct pf_kaltq to user binaries that may be using any
1592  * version of struct pf_altq.
1593  */
1594 static int
1595 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1596 {
1597 	u_int32_t version;
1598 
1599 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1600 		version = 0;
1601 	else
1602 		version = pa->version;
1603 
1604 	if (version > PFIOC_ALTQ_VERSION)
1605 		return (EINVAL);
1606 
1607 #define ASSIGN(x) exported_q->x = q->x
1608 #define COPY(x) \
1609 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1610 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1611 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1612 
1613 	switch (version) {
1614 	case 0: {
1615 		struct pf_altq_v0 *exported_q =
1616 		    &((struct pfioc_altq_v0 *)pa)->altq;
1617 
1618 		COPY(ifname);
1619 
1620 		ASSIGN(scheduler);
1621 		ASSIGN(tbrsize);
1622 		exported_q->tbrsize = SATU16(q->tbrsize);
1623 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1624 
1625 		COPY(qname);
1626 		COPY(parent);
1627 		ASSIGN(parent_qid);
1628 		exported_q->bandwidth = SATU32(q->bandwidth);
1629 		ASSIGN(priority);
1630 		ASSIGN(local_flags);
1631 
1632 		ASSIGN(qlimit);
1633 		ASSIGN(flags);
1634 
1635 		if (q->scheduler == ALTQT_HFSC) {
1636 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1637 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1638 			    SATU32(q->pq_u.hfsc_opts.x)
1639 
1640 			ASSIGN_OPT_SATU32(rtsc_m1);
1641 			ASSIGN_OPT(rtsc_d);
1642 			ASSIGN_OPT_SATU32(rtsc_m2);
1643 
1644 			ASSIGN_OPT_SATU32(lssc_m1);
1645 			ASSIGN_OPT(lssc_d);
1646 			ASSIGN_OPT_SATU32(lssc_m2);
1647 
1648 			ASSIGN_OPT_SATU32(ulsc_m1);
1649 			ASSIGN_OPT(ulsc_d);
1650 			ASSIGN_OPT_SATU32(ulsc_m2);
1651 
1652 			ASSIGN_OPT(flags);
1653 
1654 #undef ASSIGN_OPT
1655 #undef ASSIGN_OPT_SATU32
1656 		} else
1657 			COPY(pq_u);
1658 
1659 		ASSIGN(qid);
1660 		break;
1661 	}
1662 	case 1:	{
1663 		struct pf_altq_v1 *exported_q =
1664 		    &((struct pfioc_altq_v1 *)pa)->altq;
1665 
1666 		COPY(ifname);
1667 
1668 		ASSIGN(scheduler);
1669 		ASSIGN(tbrsize);
1670 		ASSIGN(ifbandwidth);
1671 
1672 		COPY(qname);
1673 		COPY(parent);
1674 		ASSIGN(parent_qid);
1675 		ASSIGN(bandwidth);
1676 		ASSIGN(priority);
1677 		ASSIGN(local_flags);
1678 
1679 		ASSIGN(qlimit);
1680 		ASSIGN(flags);
1681 		COPY(pq_u);
1682 
1683 		ASSIGN(qid);
1684 		break;
1685 	}
1686 	default:
1687 		panic("%s: unhandled struct pfioc_altq version", __func__);
1688 		break;
1689 	}
1690 
1691 #undef ASSIGN
1692 #undef COPY
1693 #undef SATU16
1694 #undef SATU32
1695 
1696 	return (0);
1697 }
1698 
1699 /*
1700  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1701  * that may be using any version of it.
1702  */
1703 static int
1704 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1705 {
1706 	u_int32_t version;
1707 
1708 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1709 		version = 0;
1710 	else
1711 		version = pa->version;
1712 
1713 	if (version > PFIOC_ALTQ_VERSION)
1714 		return (EINVAL);
1715 
1716 #define ASSIGN(x) q->x = imported_q->x
1717 #define COPY(x) \
1718 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1719 
1720 	switch (version) {
1721 	case 0: {
1722 		struct pf_altq_v0 *imported_q =
1723 		    &((struct pfioc_altq_v0 *)pa)->altq;
1724 
1725 		COPY(ifname);
1726 
1727 		ASSIGN(scheduler);
1728 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1729 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1730 
1731 		COPY(qname);
1732 		COPY(parent);
1733 		ASSIGN(parent_qid);
1734 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1735 		ASSIGN(priority);
1736 		ASSIGN(local_flags);
1737 
1738 		ASSIGN(qlimit);
1739 		ASSIGN(flags);
1740 
1741 		if (imported_q->scheduler == ALTQT_HFSC) {
1742 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1743 
1744 			/*
1745 			 * The m1 and m2 parameters are being copied from
1746 			 * 32-bit to 64-bit.
1747 			 */
1748 			ASSIGN_OPT(rtsc_m1);
1749 			ASSIGN_OPT(rtsc_d);
1750 			ASSIGN_OPT(rtsc_m2);
1751 
1752 			ASSIGN_OPT(lssc_m1);
1753 			ASSIGN_OPT(lssc_d);
1754 			ASSIGN_OPT(lssc_m2);
1755 
1756 			ASSIGN_OPT(ulsc_m1);
1757 			ASSIGN_OPT(ulsc_d);
1758 			ASSIGN_OPT(ulsc_m2);
1759 
1760 			ASSIGN_OPT(flags);
1761 
1762 #undef ASSIGN_OPT
1763 		} else
1764 			COPY(pq_u);
1765 
1766 		ASSIGN(qid);
1767 		break;
1768 	}
1769 	case 1: {
1770 		struct pf_altq_v1 *imported_q =
1771 		    &((struct pfioc_altq_v1 *)pa)->altq;
1772 
1773 		COPY(ifname);
1774 
1775 		ASSIGN(scheduler);
1776 		ASSIGN(tbrsize);
1777 		ASSIGN(ifbandwidth);
1778 
1779 		COPY(qname);
1780 		COPY(parent);
1781 		ASSIGN(parent_qid);
1782 		ASSIGN(bandwidth);
1783 		ASSIGN(priority);
1784 		ASSIGN(local_flags);
1785 
1786 		ASSIGN(qlimit);
1787 		ASSIGN(flags);
1788 		COPY(pq_u);
1789 
1790 		ASSIGN(qid);
1791 		break;
1792 	}
1793 	default:
1794 		panic("%s: unhandled struct pfioc_altq version", __func__);
1795 		break;
1796 	}
1797 
1798 #undef ASSIGN
1799 #undef COPY
1800 
1801 	return (0);
1802 }
1803 
1804 static struct pf_altq *
1805 pf_altq_get_nth_active(u_int32_t n)
1806 {
1807 	struct pf_altq		*altq;
1808 	u_int32_t		 nr;
1809 
1810 	nr = 0;
1811 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1812 		if (nr == n)
1813 			return (altq);
1814 		nr++;
1815 	}
1816 
1817 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1818 		if (nr == n)
1819 			return (altq);
1820 		nr++;
1821 	}
1822 
1823 	return (NULL);
1824 }
1825 #endif /* ALTQ */
1826 
1827 struct pf_krule *
1828 pf_krule_alloc(void)
1829 {
1830 	struct pf_krule *rule;
1831 
1832 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1833 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1834 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1835 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1836 	    M_WAITOK | M_ZERO);
1837 	return (rule);
1838 }
1839 
1840 void
1841 pf_krule_free(struct pf_krule *rule)
1842 {
1843 #ifdef PF_WANT_32_TO_64_COUNTER
1844 	bool wowned;
1845 #endif
1846 
1847 	if (rule == NULL)
1848 		return;
1849 
1850 #ifdef PF_WANT_32_TO_64_COUNTER
1851 	if (rule->allrulelinked) {
1852 		wowned = PF_RULES_WOWNED();
1853 		if (!wowned)
1854 			PF_RULES_WLOCK();
1855 		LIST_REMOVE(rule, allrulelist);
1856 		V_pf_allrulecount--;
1857 		if (!wowned)
1858 			PF_RULES_WUNLOCK();
1859 	}
1860 #endif
1861 
1862 	pf_counter_u64_deinit(&rule->evaluations);
1863 	for (int i = 0; i < 2; i++) {
1864 		pf_counter_u64_deinit(&rule->packets[i]);
1865 		pf_counter_u64_deinit(&rule->bytes[i]);
1866 	}
1867 	counter_u64_free(rule->states_cur);
1868 	counter_u64_free(rule->states_tot);
1869 	counter_u64_free(rule->src_nodes);
1870 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1871 
1872 	mtx_destroy(&rule->nat.mtx);
1873 	mtx_destroy(&rule->rdr.mtx);
1874 	free(rule, M_PFRULE);
1875 }
1876 
1877 void
1878 pf_krule_clear_counters(struct pf_krule *rule)
1879 {
1880 	pf_counter_u64_zero(&rule->evaluations);
1881 	for (int i = 0; i < 2; i++) {
1882 		pf_counter_u64_zero(&rule->packets[i]);
1883 		pf_counter_u64_zero(&rule->bytes[i]);
1884 	}
1885 	counter_u64_zero(rule->states_tot);
1886 }
1887 
1888 static void
1889 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1890     struct pf_pooladdr *pool)
1891 {
1892 
1893 	bzero(pool, sizeof(*pool));
1894 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1895 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1896 }
1897 
1898 static int
1899 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1900     struct pf_kpooladdr *kpool)
1901 {
1902 	int ret;
1903 
1904 	bzero(kpool, sizeof(*kpool));
1905 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1906 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1907 	    sizeof(kpool->ifname));
1908 	return (ret);
1909 }
1910 
1911 static void
1912 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1913 {
1914 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1915 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1916 
1917 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1918 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1919 
1920 	kpool->tblidx = pool->tblidx;
1921 	kpool->proxy_port[0] = pool->proxy_port[0];
1922 	kpool->proxy_port[1] = pool->proxy_port[1];
1923 	kpool->opts = pool->opts;
1924 }
1925 
1926 static int
1927 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1928 {
1929 	int ret;
1930 
1931 #ifndef INET
1932 	if (rule->af == AF_INET) {
1933 		return (EAFNOSUPPORT);
1934 	}
1935 #endif /* INET */
1936 #ifndef INET6
1937 	if (rule->af == AF_INET6) {
1938 		return (EAFNOSUPPORT);
1939 	}
1940 #endif /* INET6 */
1941 
1942 	ret = pf_check_rule_addr(&rule->src);
1943 	if (ret != 0)
1944 		return (ret);
1945 	ret = pf_check_rule_addr(&rule->dst);
1946 	if (ret != 0)
1947 		return (ret);
1948 
1949 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1950 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1951 
1952 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1953 	if (ret != 0)
1954 		return (ret);
1955 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1956 	if (ret != 0)
1957 		return (ret);
1958 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1959 	if (ret != 0)
1960 		return (ret);
1961 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1962 	if (ret != 0)
1963 		return (ret);
1964 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1965 	    sizeof(rule->tagname));
1966 	if (ret != 0)
1967 		return (ret);
1968 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1969 	    sizeof(rule->match_tagname));
1970 	if (ret != 0)
1971 		return (ret);
1972 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1973 	    sizeof(rule->overload_tblname));
1974 	if (ret != 0)
1975 		return (ret);
1976 
1977 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1978 
1979 	/* Don't allow userspace to set evaluations, packets or bytes. */
1980 	/* kif, anchor, overload_tbl are not copied over. */
1981 
1982 	krule->os_fingerprint = rule->os_fingerprint;
1983 
1984 	krule->rtableid = rule->rtableid;
1985 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1986 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1987 	krule->max_states = rule->max_states;
1988 	krule->max_src_nodes = rule->max_src_nodes;
1989 	krule->max_src_states = rule->max_src_states;
1990 	krule->max_src_conn = rule->max_src_conn;
1991 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1992 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1993 	krule->qid = rule->qid;
1994 	krule->pqid = rule->pqid;
1995 	krule->nr = rule->nr;
1996 	krule->prob = rule->prob;
1997 	krule->cuid = rule->cuid;
1998 	krule->cpid = rule->cpid;
1999 
2000 	krule->return_icmp = rule->return_icmp;
2001 	krule->return_icmp6 = rule->return_icmp6;
2002 	krule->max_mss = rule->max_mss;
2003 	krule->tag = rule->tag;
2004 	krule->match_tag = rule->match_tag;
2005 	krule->scrub_flags = rule->scrub_flags;
2006 
2007 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2008 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2009 
2010 	krule->rule_flag = rule->rule_flag;
2011 	krule->action = rule->action;
2012 	krule->direction = rule->direction;
2013 	krule->log = rule->log;
2014 	krule->logif = rule->logif;
2015 	krule->quick = rule->quick;
2016 	krule->ifnot = rule->ifnot;
2017 	krule->match_tag_not = rule->match_tag_not;
2018 	krule->natpass = rule->natpass;
2019 
2020 	krule->keep_state = rule->keep_state;
2021 	krule->af = rule->af;
2022 	krule->proto = rule->proto;
2023 	krule->type = rule->type;
2024 	krule->code = rule->code;
2025 	krule->flags = rule->flags;
2026 	krule->flagset = rule->flagset;
2027 	krule->min_ttl = rule->min_ttl;
2028 	krule->allow_opts = rule->allow_opts;
2029 	krule->rt = rule->rt;
2030 	krule->return_ttl = rule->return_ttl;
2031 	krule->tos = rule->tos;
2032 	krule->set_tos = rule->set_tos;
2033 
2034 	krule->flush = rule->flush;
2035 	krule->prio = rule->prio;
2036 	krule->set_prio[0] = rule->set_prio[0];
2037 	krule->set_prio[1] = rule->set_prio[1];
2038 
2039 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2040 
2041 	return (0);
2042 }
2043 
2044 int
2045 pf_ioctl_getrules(struct pfioc_rule *pr)
2046 {
2047 	struct pf_kruleset	*ruleset;
2048 	struct pf_krule		*tail;
2049 	int			 rs_num;
2050 
2051 	PF_RULES_WLOCK();
2052 	ruleset = pf_find_kruleset(pr->anchor);
2053 	if (ruleset == NULL) {
2054 		PF_RULES_WUNLOCK();
2055 		return (EINVAL);
2056 	}
2057 	rs_num = pf_get_ruleset_number(pr->rule.action);
2058 	if (rs_num >= PF_RULESET_MAX) {
2059 		PF_RULES_WUNLOCK();
2060 		return (EINVAL);
2061 	}
2062 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2063 	    pf_krulequeue);
2064 	if (tail)
2065 		pr->nr = tail->nr + 1;
2066 	else
2067 		pr->nr = 0;
2068 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2069 	PF_RULES_WUNLOCK();
2070 
2071 	return (0);
2072 }
2073 
2074 int
2075 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2076     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2077     uid_t uid, pid_t pid)
2078 {
2079 	struct pf_kruleset	*ruleset;
2080 	struct pf_krule		*tail;
2081 	struct pf_kpooladdr	*pa;
2082 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2083 	int			 rs_num;
2084 	int			 error = 0;
2085 
2086 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2087 		error = EINVAL;
2088 		goto errout_unlocked;
2089 	}
2090 
2091 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2092 
2093 	if (rule->ifname[0])
2094 		kif = pf_kkif_create(M_WAITOK);
2095 	if (rule->rcv_ifname[0])
2096 		rcv_kif = pf_kkif_create(M_WAITOK);
2097 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2098 	for (int i = 0; i < 2; i++) {
2099 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2100 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2101 	}
2102 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2103 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2104 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2105 	rule->cuid = uid;
2106 	rule->cpid = pid;
2107 	TAILQ_INIT(&rule->rdr.list);
2108 	TAILQ_INIT(&rule->nat.list);
2109 
2110 	PF_CONFIG_LOCK();
2111 	PF_RULES_WLOCK();
2112 #ifdef PF_WANT_32_TO_64_COUNTER
2113 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2114 	MPASS(!rule->allrulelinked);
2115 	rule->allrulelinked = true;
2116 	V_pf_allrulecount++;
2117 #endif
2118 	ruleset = pf_find_kruleset(anchor);
2119 	if (ruleset == NULL)
2120 		ERROUT(EINVAL);
2121 	rs_num = pf_get_ruleset_number(rule->action);
2122 	if (rs_num >= PF_RULESET_MAX)
2123 		ERROUT(EINVAL);
2124 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2125 		DPFPRINTF(PF_DEBUG_MISC,
2126 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2127 		    ruleset->rules[rs_num].inactive.ticket));
2128 		ERROUT(EBUSY);
2129 	}
2130 	if (pool_ticket != V_ticket_pabuf) {
2131 		DPFPRINTF(PF_DEBUG_MISC,
2132 		    ("pool_ticket: %d != %d\n", pool_ticket,
2133 		    V_ticket_pabuf));
2134 		ERROUT(EBUSY);
2135 	}
2136 	/*
2137 	 * XXXMJG hack: there is no mechanism to ensure they started the
2138 	 * transaction. Ticket checked above may happen to match by accident,
2139 	 * even if nobody called DIOCXBEGIN, let alone this process.
2140 	 * Partially work around it by checking if the RB tree got allocated,
2141 	 * see pf_begin_rules.
2142 	 */
2143 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2144 		ERROUT(EINVAL);
2145 	}
2146 
2147 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2148 	    pf_krulequeue);
2149 	if (tail)
2150 		rule->nr = tail->nr + 1;
2151 	else
2152 		rule->nr = 0;
2153 	if (rule->ifname[0]) {
2154 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2155 		kif = NULL;
2156 		pfi_kkif_ref(rule->kif);
2157 	} else
2158 		rule->kif = NULL;
2159 
2160 	if (rule->rcv_ifname[0]) {
2161 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2162 		rcv_kif = NULL;
2163 		pfi_kkif_ref(rule->rcv_kif);
2164 	} else
2165 		rule->rcv_kif = NULL;
2166 
2167 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2168 		error = EBUSY;
2169 
2170 #ifdef ALTQ
2171 	/* set queue IDs */
2172 	if (rule->qname[0] != 0) {
2173 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2174 			error = EBUSY;
2175 		else if (rule->pqname[0] != 0) {
2176 			if ((rule->pqid =
2177 			    pf_qname2qid(rule->pqname)) == 0)
2178 				error = EBUSY;
2179 		} else
2180 			rule->pqid = rule->qid;
2181 	}
2182 #endif
2183 	if (rule->tagname[0])
2184 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2185 			error = EBUSY;
2186 	if (rule->match_tagname[0])
2187 		if ((rule->match_tag =
2188 		    pf_tagname2tag(rule->match_tagname)) == 0)
2189 			error = EBUSY;
2190 	if (rule->rt && !rule->direction)
2191 		error = EINVAL;
2192 	if (!rule->log)
2193 		rule->logif = 0;
2194 	if (rule->logif >= PFLOGIFS_MAX)
2195 		error = EINVAL;
2196 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2197 		error = ENOMEM;
2198 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2199 		error = ENOMEM;
2200 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2201 		error = EINVAL;
2202 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2203 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2204 	    rule->set_prio[1] > PF_PRIO_MAX))
2205 		error = EINVAL;
2206 	for (int i = 0; i < 2; i++) {
2207 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2208 			if (pa->addr.type == PF_ADDR_TABLE) {
2209 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2210 				    pa->addr.v.tblname);
2211 				if (pa->addr.p.tbl == NULL)
2212 					error = ENOMEM;
2213 			}
2214 	}
2215 
2216 	rule->overload_tbl = NULL;
2217 	if (rule->overload_tblname[0]) {
2218 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2219 		    rule->overload_tblname)) == NULL)
2220 			error = EINVAL;
2221 		else
2222 			rule->overload_tbl->pfrkt_flags |=
2223 			    PFR_TFLAG_ACTIVE;
2224 	}
2225 
2226 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2227 	pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2228 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2229 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2230 	    (rule->rt > PF_NOPFROUTE)) &&
2231 	    (TAILQ_FIRST(&rule->rdr.list) == NULL))
2232 		error = EINVAL;
2233 
2234 	if (rule->action == PF_PASS && rule->rdr.opts & PF_POOL_STICKYADDR &&
2235 	    !rule->keep_state) {
2236 		error = EINVAL;
2237 	}
2238 
2239 	if (error) {
2240 		pf_free_rule(rule);
2241 		rule = NULL;
2242 		ERROUT(error);
2243 	}
2244 
2245 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2246 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2247 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2248 	    rule, entries);
2249 	ruleset->rules[rs_num].inactive.rcount++;
2250 
2251 	PF_RULES_WUNLOCK();
2252 	pf_hash_rule(rule);
2253 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2254 		PF_RULES_WLOCK();
2255 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2256 		ruleset->rules[rs_num].inactive.rcount--;
2257 		pf_free_rule(rule);
2258 		rule = NULL;
2259 		ERROUT(EEXIST);
2260 	}
2261 	PF_CONFIG_UNLOCK();
2262 
2263 	return (0);
2264 
2265 #undef ERROUT
2266 errout:
2267 	PF_RULES_WUNLOCK();
2268 	PF_CONFIG_UNLOCK();
2269 errout_unlocked:
2270 	pf_kkif_free(rcv_kif);
2271 	pf_kkif_free(kif);
2272 	pf_krule_free(rule);
2273 	return (error);
2274 }
2275 
2276 static bool
2277 pf_label_match(const struct pf_krule *rule, const char *label)
2278 {
2279 	int i = 0;
2280 
2281 	while (*rule->label[i]) {
2282 		if (strcmp(rule->label[i], label) == 0)
2283 			return (true);
2284 		i++;
2285 	}
2286 
2287 	return (false);
2288 }
2289 
2290 static unsigned int
2291 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2292 {
2293 	struct pf_kstate *s;
2294 	int more = 0;
2295 
2296 	s = pf_find_state_all(key, dir, &more);
2297 	if (s == NULL)
2298 		return (0);
2299 
2300 	if (more) {
2301 		PF_STATE_UNLOCK(s);
2302 		return (0);
2303 	}
2304 
2305 	pf_unlink_state(s);
2306 	return (1);
2307 }
2308 
2309 static int
2310 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2311 {
2312 	struct pf_kstate	*s;
2313 	struct pf_state_key	*sk;
2314 	struct pf_addr		*srcaddr, *dstaddr;
2315 	struct pf_state_key_cmp	 match_key;
2316 	int			 idx, killed = 0;
2317 	unsigned int		 dir;
2318 	u_int16_t		 srcport, dstport;
2319 	struct pfi_kkif		*kif;
2320 
2321 relock_DIOCKILLSTATES:
2322 	PF_HASHROW_LOCK(ih);
2323 	LIST_FOREACH(s, &ih->states, entry) {
2324 		/* For floating states look at the original kif. */
2325 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2326 
2327 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2328 		if (s->direction == PF_OUT) {
2329 			srcaddr = &sk->addr[1];
2330 			dstaddr = &sk->addr[0];
2331 			srcport = sk->port[1];
2332 			dstport = sk->port[0];
2333 		} else {
2334 			srcaddr = &sk->addr[0];
2335 			dstaddr = &sk->addr[1];
2336 			srcport = sk->port[0];
2337 			dstport = sk->port[1];
2338 		}
2339 
2340 		if (psk->psk_af && sk->af != psk->psk_af)
2341 			continue;
2342 
2343 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2344 			continue;
2345 
2346 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2347 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2348 			continue;
2349 
2350 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2351 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2352 			continue;
2353 
2354 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2355 		    &psk->psk_rt_addr.addr.v.a.addr,
2356 		    &psk->psk_rt_addr.addr.v.a.mask,
2357 		    &s->act.rt_addr, sk->af))
2358 			continue;
2359 
2360 		if (psk->psk_src.port_op != 0 &&
2361 		    ! pf_match_port(psk->psk_src.port_op,
2362 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2363 			continue;
2364 
2365 		if (psk->psk_dst.port_op != 0 &&
2366 		    ! pf_match_port(psk->psk_dst.port_op,
2367 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2368 			continue;
2369 
2370 		if (psk->psk_label[0] &&
2371 		    ! pf_label_match(s->rule, psk->psk_label))
2372 			continue;
2373 
2374 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2375 		    kif->pfik_name))
2376 			continue;
2377 
2378 		if (psk->psk_kill_match) {
2379 			/* Create the key to find matching states, with lock
2380 			 * held. */
2381 
2382 			bzero(&match_key, sizeof(match_key));
2383 
2384 			if (s->direction == PF_OUT) {
2385 				dir = PF_IN;
2386 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2387 			} else {
2388 				dir = PF_OUT;
2389 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2390 			}
2391 
2392 			match_key.af = s->key[idx]->af;
2393 			match_key.proto = s->key[idx]->proto;
2394 			PF_ACPY(&match_key.addr[0],
2395 			    &s->key[idx]->addr[1], match_key.af);
2396 			match_key.port[0] = s->key[idx]->port[1];
2397 			PF_ACPY(&match_key.addr[1],
2398 			    &s->key[idx]->addr[0], match_key.af);
2399 			match_key.port[1] = s->key[idx]->port[0];
2400 		}
2401 
2402 		pf_unlink_state(s);
2403 		killed++;
2404 
2405 		if (psk->psk_kill_match)
2406 			killed += pf_kill_matching_state(&match_key, dir);
2407 
2408 		goto relock_DIOCKILLSTATES;
2409 	}
2410 	PF_HASHROW_UNLOCK(ih);
2411 
2412 	return (killed);
2413 }
2414 
2415 int
2416 pf_start(void)
2417 {
2418 	int error = 0;
2419 
2420 	sx_xlock(&V_pf_ioctl_lock);
2421 	if (V_pf_status.running)
2422 		error = EEXIST;
2423 	else {
2424 		hook_pf();
2425 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2426 			hook_pf_eth();
2427 		V_pf_status.running = 1;
2428 		V_pf_status.since = time_second;
2429 		new_unrhdr64(&V_pf_stateid, time_second);
2430 
2431 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2432 	}
2433 	sx_xunlock(&V_pf_ioctl_lock);
2434 
2435 	return (error);
2436 }
2437 
2438 int
2439 pf_stop(void)
2440 {
2441 	int error = 0;
2442 
2443 	sx_xlock(&V_pf_ioctl_lock);
2444 	if (!V_pf_status.running)
2445 		error = ENOENT;
2446 	else {
2447 		V_pf_status.running = 0;
2448 		dehook_pf();
2449 		dehook_pf_eth();
2450 		V_pf_status.since = time_second;
2451 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2452 	}
2453 	sx_xunlock(&V_pf_ioctl_lock);
2454 
2455 	return (error);
2456 }
2457 
2458 void
2459 pf_ioctl_clear_status(void)
2460 {
2461 	PF_RULES_WLOCK();
2462 	for (int i = 0; i < PFRES_MAX; i++)
2463 		counter_u64_zero(V_pf_status.counters[i]);
2464 	for (int i = 0; i < FCNT_MAX; i++)
2465 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2466 	for (int i = 0; i < SCNT_MAX; i++)
2467 		counter_u64_zero(V_pf_status.scounters[i]);
2468 	for (int i = 0; i < KLCNT_MAX; i++)
2469 		counter_u64_zero(V_pf_status.lcounters[i]);
2470 	V_pf_status.since = time_second;
2471 	if (*V_pf_status.ifname)
2472 		pfi_update_status(V_pf_status.ifname, NULL);
2473 	PF_RULES_WUNLOCK();
2474 }
2475 
2476 int
2477 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2478 {
2479 	uint32_t old;
2480 
2481 	if (timeout < 0 || timeout >= PFTM_MAX ||
2482 	    seconds < 0)
2483 		return (EINVAL);
2484 
2485 	PF_RULES_WLOCK();
2486 	old = V_pf_default_rule.timeout[timeout];
2487 	if (timeout == PFTM_INTERVAL && seconds == 0)
2488 		seconds = 1;
2489 	V_pf_default_rule.timeout[timeout] = seconds;
2490 	if (timeout == PFTM_INTERVAL && seconds < old)
2491 		wakeup(pf_purge_thread);
2492 
2493 	if (prev_seconds != NULL)
2494 		*prev_seconds = old;
2495 
2496 	PF_RULES_WUNLOCK();
2497 
2498 	return (0);
2499 }
2500 
2501 int
2502 pf_ioctl_get_timeout(int timeout, int *seconds)
2503 {
2504 	PF_RULES_RLOCK_TRACKER;
2505 
2506 	if (timeout < 0 || timeout >= PFTM_MAX)
2507 		return (EINVAL);
2508 
2509 	PF_RULES_RLOCK();
2510 	*seconds = V_pf_default_rule.timeout[timeout];
2511 	PF_RULES_RUNLOCK();
2512 
2513 	return (0);
2514 }
2515 
2516 int
2517 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2518 {
2519 
2520 	PF_RULES_WLOCK();
2521 	if (index < 0 || index >= PF_LIMIT_MAX ||
2522 	    V_pf_limits[index].zone == NULL) {
2523 		PF_RULES_WUNLOCK();
2524 		return (EINVAL);
2525 	}
2526 	uma_zone_set_max(V_pf_limits[index].zone, limit);
2527 	if (old_limit != NULL)
2528 		*old_limit = V_pf_limits[index].limit;
2529 	V_pf_limits[index].limit = limit;
2530 	PF_RULES_WUNLOCK();
2531 
2532 	return (0);
2533 }
2534 
2535 int
2536 pf_ioctl_get_limit(int index, unsigned int *limit)
2537 {
2538 	PF_RULES_RLOCK_TRACKER;
2539 
2540 	if (index < 0 || index >= PF_LIMIT_MAX)
2541 		return (EINVAL);
2542 
2543 	PF_RULES_RLOCK();
2544 	*limit = V_pf_limits[index].limit;
2545 	PF_RULES_RUNLOCK();
2546 
2547 	return (0);
2548 }
2549 
2550 int
2551 pf_ioctl_begin_addrs(uint32_t *ticket)
2552 {
2553 	PF_RULES_WLOCK();
2554 	pf_empty_kpool(&V_pf_pabuf[0]);
2555 	pf_empty_kpool(&V_pf_pabuf[1]);
2556 	*ticket = ++V_ticket_pabuf;
2557 	PF_RULES_WUNLOCK();
2558 
2559 	return (0);
2560 }
2561 
2562 int
2563 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2564 {
2565 	struct pf_kpooladdr	*pa = NULL;
2566 	struct pfi_kkif		*kif = NULL;
2567 	int error;
2568 
2569 	MPASS(pp->which == PF_RDR || pp->which == PF_NAT);
2570 
2571 #ifndef INET
2572 	if (pp->af == AF_INET)
2573 		return (EAFNOSUPPORT);
2574 #endif /* INET */
2575 #ifndef INET6
2576 	if (pp->af == AF_INET6)
2577 		return (EAFNOSUPPORT);
2578 #endif /* INET6 */
2579 
2580 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2581 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2582 	    pp->addr.addr.type != PF_ADDR_TABLE)
2583 		return (EINVAL);
2584 
2585 	if (pp->addr.addr.p.dyn != NULL)
2586 		return (EINVAL);
2587 
2588 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2589 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2590 	if (error != 0)
2591 		goto out;
2592 	if (pa->ifname[0])
2593 		kif = pf_kkif_create(M_WAITOK);
2594 	PF_RULES_WLOCK();
2595 	if (pp->ticket != V_ticket_pabuf) {
2596 		PF_RULES_WUNLOCK();
2597 		if (pa->ifname[0])
2598 			pf_kkif_free(kif);
2599 		error = EBUSY;
2600 		goto out;
2601 	}
2602 	if (pa->ifname[0]) {
2603 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2604 		kif = NULL;
2605 		pfi_kkif_ref(pa->kif);
2606 	} else
2607 		pa->kif = NULL;
2608 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2609 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2610 		if (pa->ifname[0])
2611 			pfi_kkif_unref(pa->kif);
2612 		PF_RULES_WUNLOCK();
2613 		goto out;
2614 	}
2615 	TAILQ_INSERT_TAIL(&V_pf_pabuf[pp->which == PF_RDR ? 1 : 0],
2616 	    pa, entries);
2617 	PF_RULES_WUNLOCK();
2618 
2619 	return (0);
2620 
2621 out:
2622 	free(pa, M_PFRULE);
2623 	return (error);
2624 }
2625 
2626 int
2627 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2628 {
2629 	struct pf_kpool		*pool;
2630 	struct pf_kpooladdr	*pa;
2631 
2632 	PF_RULES_RLOCK_TRACKER;
2633 
2634 	MPASS(pp->which == PF_RDR || pp->which == PF_NAT);
2635 
2636 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2637 	pp->nr = 0;
2638 
2639 	PF_RULES_RLOCK();
2640 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2641 	    pp->r_num, 0, 1, 0, pp->which);
2642 	if (pool == NULL) {
2643 		PF_RULES_RUNLOCK();
2644 		return (EBUSY);
2645 	}
2646 	TAILQ_FOREACH(pa, &pool->list, entries)
2647 		pp->nr++;
2648 	PF_RULES_RUNLOCK();
2649 
2650 	return (0);
2651 }
2652 
2653 int
2654 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2655 {
2656 	struct pf_kpool		*pool;
2657 	struct pf_kpooladdr	*pa;
2658 	u_int32_t		 nr = 0;
2659 
2660 	MPASS(pp->which == PF_RDR || pp->which == PF_NAT);
2661 
2662 	PF_RULES_RLOCK_TRACKER;
2663 
2664 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2665 
2666 	PF_RULES_RLOCK();
2667 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2668 	    pp->r_num, 0, 1, 1, pp->which);
2669 	if (pool == NULL) {
2670 		PF_RULES_RUNLOCK();
2671 		return (EBUSY);
2672 	}
2673 	pa = TAILQ_FIRST(&pool->list);
2674 	while ((pa != NULL) && (nr < pp->nr)) {
2675 		pa = TAILQ_NEXT(pa, entries);
2676 		nr++;
2677 	}
2678 	if (pa == NULL) {
2679 		PF_RULES_RUNLOCK();
2680 		return (EBUSY);
2681 	}
2682 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2683 	pf_addr_copyout(&pp->addr.addr);
2684 	PF_RULES_RUNLOCK();
2685 
2686 	return (0);
2687 }
2688 
2689 int
2690 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2691 {
2692 	struct pf_kruleset	*ruleset;
2693 	struct pf_kanchor	*anchor;
2694 
2695 	PF_RULES_RLOCK_TRACKER;
2696 
2697 	pr->path[sizeof(pr->path) - 1] = 0;
2698 
2699 	PF_RULES_RLOCK();
2700 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2701 		PF_RULES_RUNLOCK();
2702 		return (ENOENT);
2703 	}
2704 	pr->nr = 0;
2705 	if (ruleset->anchor == NULL) {
2706 		/* XXX kludge for pf_main_ruleset */
2707 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2708 			if (anchor->parent == NULL)
2709 				pr->nr++;
2710 	} else {
2711 		RB_FOREACH(anchor, pf_kanchor_node,
2712 		    &ruleset->anchor->children)
2713 			pr->nr++;
2714 	}
2715 	PF_RULES_RUNLOCK();
2716 
2717 	return (0);
2718 }
2719 
2720 int
2721 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2722 {
2723 	struct pf_kruleset	*ruleset;
2724 	struct pf_kanchor	*anchor;
2725 	u_int32_t		 nr = 0;
2726 	int			 error = 0;
2727 
2728 	PF_RULES_RLOCK_TRACKER;
2729 
2730 	PF_RULES_RLOCK();
2731 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2732 		PF_RULES_RUNLOCK();
2733 		return (ENOENT);
2734 	}
2735 
2736 	pr->name[0] = 0;
2737 	if (ruleset->anchor == NULL) {
2738 		/* XXX kludge for pf_main_ruleset */
2739 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2740 			if (anchor->parent == NULL && nr++ == pr->nr) {
2741 				strlcpy(pr->name, anchor->name,
2742 				    sizeof(pr->name));
2743 				break;
2744 			}
2745 	} else {
2746 		RB_FOREACH(anchor, pf_kanchor_node,
2747 		    &ruleset->anchor->children)
2748 			if (nr++ == pr->nr) {
2749 				strlcpy(pr->name, anchor->name,
2750 				    sizeof(pr->name));
2751 				break;
2752 			}
2753 	}
2754 	if (!pr->name[0])
2755 		error = EBUSY;
2756 	PF_RULES_RUNLOCK();
2757 
2758 	return (error);
2759 }
2760 
2761 static int
2762 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2763 {
2764 	int			 error = 0;
2765 	PF_RULES_RLOCK_TRACKER;
2766 
2767 #define	ERROUT_IOCTL(target, x)					\
2768     do {								\
2769 	    error = (x);						\
2770 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2771 	    goto target;						\
2772     } while (0)
2773 
2774 
2775 	/* XXX keep in sync with switch() below */
2776 	if (securelevel_gt(td->td_ucred, 2))
2777 		switch (cmd) {
2778 		case DIOCGETRULES:
2779 		case DIOCGETRULENV:
2780 		case DIOCGETADDRS:
2781 		case DIOCGETADDR:
2782 		case DIOCGETSTATE:
2783 		case DIOCGETSTATENV:
2784 		case DIOCSETSTATUSIF:
2785 		case DIOCGETSTATUSNV:
2786 		case DIOCCLRSTATUS:
2787 		case DIOCNATLOOK:
2788 		case DIOCSETDEBUG:
2789 #ifdef COMPAT_FREEBSD14
2790 		case DIOCGETSTATES:
2791 		case DIOCGETSTATESV2:
2792 #endif
2793 		case DIOCGETTIMEOUT:
2794 		case DIOCCLRRULECTRS:
2795 		case DIOCGETLIMIT:
2796 		case DIOCGETALTQSV0:
2797 		case DIOCGETALTQSV1:
2798 		case DIOCGETALTQV0:
2799 		case DIOCGETALTQV1:
2800 		case DIOCGETQSTATSV0:
2801 		case DIOCGETQSTATSV1:
2802 		case DIOCGETRULESETS:
2803 		case DIOCGETRULESET:
2804 		case DIOCRGETTABLES:
2805 		case DIOCRGETTSTATS:
2806 		case DIOCRCLRTSTATS:
2807 		case DIOCRCLRADDRS:
2808 		case DIOCRADDADDRS:
2809 		case DIOCRDELADDRS:
2810 		case DIOCRSETADDRS:
2811 		case DIOCRGETADDRS:
2812 		case DIOCRGETASTATS:
2813 		case DIOCRCLRASTATS:
2814 		case DIOCRTSTADDRS:
2815 		case DIOCOSFPGET:
2816 		case DIOCGETSRCNODES:
2817 		case DIOCCLRSRCNODES:
2818 		case DIOCGETSYNCOOKIES:
2819 		case DIOCIGETIFACES:
2820 		case DIOCGIFSPEEDV0:
2821 		case DIOCGIFSPEEDV1:
2822 		case DIOCSETIFFLAG:
2823 		case DIOCCLRIFFLAG:
2824 		case DIOCGETETHRULES:
2825 		case DIOCGETETHRULE:
2826 		case DIOCGETETHRULESETS:
2827 		case DIOCGETETHRULESET:
2828 			break;
2829 		case DIOCRCLRTABLES:
2830 		case DIOCRADDTABLES:
2831 		case DIOCRDELTABLES:
2832 		case DIOCRSETTFLAGS:
2833 			if (((struct pfioc_table *)addr)->pfrio_flags &
2834 			    PFR_FLAG_DUMMY)
2835 				break; /* dummy operation ok */
2836 			return (EPERM);
2837 		default:
2838 			return (EPERM);
2839 		}
2840 
2841 	if (!(flags & FWRITE))
2842 		switch (cmd) {
2843 		case DIOCGETRULES:
2844 		case DIOCGETADDRS:
2845 		case DIOCGETADDR:
2846 		case DIOCGETSTATE:
2847 		case DIOCGETSTATENV:
2848 		case DIOCGETSTATUSNV:
2849 #ifdef COMPAT_FREEBSD14
2850 		case DIOCGETSTATES:
2851 		case DIOCGETSTATESV2:
2852 #endif
2853 		case DIOCGETTIMEOUT:
2854 		case DIOCGETLIMIT:
2855 		case DIOCGETALTQSV0:
2856 		case DIOCGETALTQSV1:
2857 		case DIOCGETALTQV0:
2858 		case DIOCGETALTQV1:
2859 		case DIOCGETQSTATSV0:
2860 		case DIOCGETQSTATSV1:
2861 		case DIOCGETRULESETS:
2862 		case DIOCGETRULESET:
2863 		case DIOCNATLOOK:
2864 		case DIOCRGETTABLES:
2865 		case DIOCRGETTSTATS:
2866 		case DIOCRGETADDRS:
2867 		case DIOCRGETASTATS:
2868 		case DIOCRTSTADDRS:
2869 		case DIOCOSFPGET:
2870 		case DIOCGETSRCNODES:
2871 		case DIOCGETSYNCOOKIES:
2872 		case DIOCIGETIFACES:
2873 		case DIOCGIFSPEEDV1:
2874 		case DIOCGIFSPEEDV0:
2875 		case DIOCGETRULENV:
2876 		case DIOCGETETHRULES:
2877 		case DIOCGETETHRULE:
2878 		case DIOCGETETHRULESETS:
2879 		case DIOCGETETHRULESET:
2880 			break;
2881 		case DIOCRCLRTABLES:
2882 		case DIOCRADDTABLES:
2883 		case DIOCRDELTABLES:
2884 		case DIOCRCLRTSTATS:
2885 		case DIOCRCLRADDRS:
2886 		case DIOCRADDADDRS:
2887 		case DIOCRDELADDRS:
2888 		case DIOCRSETADDRS:
2889 		case DIOCRSETTFLAGS:
2890 			if (((struct pfioc_table *)addr)->pfrio_flags &
2891 			    PFR_FLAG_DUMMY) {
2892 				flags |= FWRITE; /* need write lock for dummy */
2893 				break; /* dummy operation ok */
2894 			}
2895 			return (EACCES);
2896 		default:
2897 			return (EACCES);
2898 		}
2899 
2900 	CURVNET_SET(TD_TO_VNET(td));
2901 
2902 	switch (cmd) {
2903 #ifdef COMPAT_FREEBSD14
2904 	case DIOCSTART:
2905 		error = pf_start();
2906 		break;
2907 
2908 	case DIOCSTOP:
2909 		error = pf_stop();
2910 		break;
2911 #endif
2912 
2913 	case DIOCGETETHRULES: {
2914 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2915 		nvlist_t		*nvl;
2916 		void			*packed;
2917 		struct pf_keth_rule	*tail;
2918 		struct pf_keth_ruleset	*rs;
2919 		u_int32_t		 ticket, nr;
2920 		const char		*anchor = "";
2921 
2922 		nvl = NULL;
2923 		packed = NULL;
2924 
2925 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2926 
2927 		if (nv->len > pf_ioctl_maxcount)
2928 			ERROUT(ENOMEM);
2929 
2930 		/* Copy the request in */
2931 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2932 		error = copyin(nv->data, packed, nv->len);
2933 		if (error)
2934 			ERROUT(error);
2935 
2936 		nvl = nvlist_unpack(packed, nv->len, 0);
2937 		if (nvl == NULL)
2938 			ERROUT(EBADMSG);
2939 
2940 		if (! nvlist_exists_string(nvl, "anchor"))
2941 			ERROUT(EBADMSG);
2942 
2943 		anchor = nvlist_get_string(nvl, "anchor");
2944 
2945 		rs = pf_find_keth_ruleset(anchor);
2946 
2947 		nvlist_destroy(nvl);
2948 		nvl = NULL;
2949 		free(packed, M_NVLIST);
2950 		packed = NULL;
2951 
2952 		if (rs == NULL)
2953 			ERROUT(ENOENT);
2954 
2955 		/* Reply */
2956 		nvl = nvlist_create(0);
2957 		if (nvl == NULL)
2958 			ERROUT(ENOMEM);
2959 
2960 		PF_RULES_RLOCK();
2961 
2962 		ticket = rs->active.ticket;
2963 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2964 		if (tail)
2965 			nr = tail->nr + 1;
2966 		else
2967 			nr = 0;
2968 
2969 		PF_RULES_RUNLOCK();
2970 
2971 		nvlist_add_number(nvl, "ticket", ticket);
2972 		nvlist_add_number(nvl, "nr", nr);
2973 
2974 		packed = nvlist_pack(nvl, &nv->len);
2975 		if (packed == NULL)
2976 			ERROUT(ENOMEM);
2977 
2978 		if (nv->size == 0)
2979 			ERROUT(0);
2980 		else if (nv->size < nv->len)
2981 			ERROUT(ENOSPC);
2982 
2983 		error = copyout(packed, nv->data, nv->len);
2984 
2985 #undef ERROUT
2986 DIOCGETETHRULES_error:
2987 		free(packed, M_NVLIST);
2988 		nvlist_destroy(nvl);
2989 		break;
2990 	}
2991 
2992 	case DIOCGETETHRULE: {
2993 		struct epoch_tracker	 et;
2994 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2995 		nvlist_t		*nvl = NULL;
2996 		void			*nvlpacked = NULL;
2997 		struct pf_keth_rule	*rule = NULL;
2998 		struct pf_keth_ruleset	*rs;
2999 		u_int32_t		 ticket, nr;
3000 		bool			 clear = false;
3001 		const char		*anchor;
3002 
3003 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3004 
3005 		if (nv->len > pf_ioctl_maxcount)
3006 			ERROUT(ENOMEM);
3007 
3008 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3009 		error = copyin(nv->data, nvlpacked, nv->len);
3010 		if (error)
3011 			ERROUT(error);
3012 
3013 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3014 		if (nvl == NULL)
3015 			ERROUT(EBADMSG);
3016 		if (! nvlist_exists_number(nvl, "ticket"))
3017 			ERROUT(EBADMSG);
3018 		ticket = nvlist_get_number(nvl, "ticket");
3019 		if (! nvlist_exists_string(nvl, "anchor"))
3020 			ERROUT(EBADMSG);
3021 		anchor = nvlist_get_string(nvl, "anchor");
3022 
3023 		if (nvlist_exists_bool(nvl, "clear"))
3024 			clear = nvlist_get_bool(nvl, "clear");
3025 
3026 		if (clear && !(flags & FWRITE))
3027 			ERROUT(EACCES);
3028 
3029 		if (! nvlist_exists_number(nvl, "nr"))
3030 			ERROUT(EBADMSG);
3031 		nr = nvlist_get_number(nvl, "nr");
3032 
3033 		PF_RULES_RLOCK();
3034 		rs = pf_find_keth_ruleset(anchor);
3035 		if (rs == NULL) {
3036 			PF_RULES_RUNLOCK();
3037 			ERROUT(ENOENT);
3038 		}
3039 		if (ticket != rs->active.ticket) {
3040 			PF_RULES_RUNLOCK();
3041 			ERROUT(EBUSY);
3042 		}
3043 
3044 		nvlist_destroy(nvl);
3045 		nvl = NULL;
3046 		free(nvlpacked, M_NVLIST);
3047 		nvlpacked = NULL;
3048 
3049 		rule = TAILQ_FIRST(rs->active.rules);
3050 		while ((rule != NULL) && (rule->nr != nr))
3051 			rule = TAILQ_NEXT(rule, entries);
3052 		if (rule == NULL) {
3053 			PF_RULES_RUNLOCK();
3054 			ERROUT(ENOENT);
3055 		}
3056 		/* Make sure rule can't go away. */
3057 		NET_EPOCH_ENTER(et);
3058 		PF_RULES_RUNLOCK();
3059 		nvl = pf_keth_rule_to_nveth_rule(rule);
3060 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3061 			NET_EPOCH_EXIT(et);
3062 			ERROUT(EBUSY);
3063 		}
3064 		NET_EPOCH_EXIT(et);
3065 		if (nvl == NULL)
3066 			ERROUT(ENOMEM);
3067 
3068 		nvlpacked = nvlist_pack(nvl, &nv->len);
3069 		if (nvlpacked == NULL)
3070 			ERROUT(ENOMEM);
3071 
3072 		if (nv->size == 0)
3073 			ERROUT(0);
3074 		else if (nv->size < nv->len)
3075 			ERROUT(ENOSPC);
3076 
3077 		error = copyout(nvlpacked, nv->data, nv->len);
3078 		if (error == 0 && clear) {
3079 			counter_u64_zero(rule->evaluations);
3080 			for (int i = 0; i < 2; i++) {
3081 				counter_u64_zero(rule->packets[i]);
3082 				counter_u64_zero(rule->bytes[i]);
3083 			}
3084 		}
3085 
3086 #undef ERROUT
3087 DIOCGETETHRULE_error:
3088 		free(nvlpacked, M_NVLIST);
3089 		nvlist_destroy(nvl);
3090 		break;
3091 	}
3092 
3093 	case DIOCADDETHRULE: {
3094 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3095 		nvlist_t		*nvl = NULL;
3096 		void			*nvlpacked = NULL;
3097 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3098 		struct pf_keth_ruleset	*ruleset = NULL;
3099 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3100 		const char		*anchor = "", *anchor_call = "";
3101 
3102 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3103 
3104 		if (nv->len > pf_ioctl_maxcount)
3105 			ERROUT(ENOMEM);
3106 
3107 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3108 		error = copyin(nv->data, nvlpacked, nv->len);
3109 		if (error)
3110 			ERROUT(error);
3111 
3112 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3113 		if (nvl == NULL)
3114 			ERROUT(EBADMSG);
3115 
3116 		if (! nvlist_exists_number(nvl, "ticket"))
3117 			ERROUT(EBADMSG);
3118 
3119 		if (nvlist_exists_string(nvl, "anchor"))
3120 			anchor = nvlist_get_string(nvl, "anchor");
3121 		if (nvlist_exists_string(nvl, "anchor_call"))
3122 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3123 
3124 		ruleset = pf_find_keth_ruleset(anchor);
3125 		if (ruleset == NULL)
3126 			ERROUT(EINVAL);
3127 
3128 		if (nvlist_get_number(nvl, "ticket") !=
3129 		    ruleset->inactive.ticket) {
3130 			DPFPRINTF(PF_DEBUG_MISC,
3131 			    ("ticket: %d != %d\n",
3132 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3133 			    ruleset->inactive.ticket));
3134 			ERROUT(EBUSY);
3135 		}
3136 
3137 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3138 		rule->timestamp = NULL;
3139 
3140 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3141 		if (error != 0)
3142 			ERROUT(error);
3143 
3144 		if (rule->ifname[0])
3145 			kif = pf_kkif_create(M_WAITOK);
3146 		if (rule->bridge_to_name[0])
3147 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3148 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3149 		for (int i = 0; i < 2; i++) {
3150 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3151 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3152 		}
3153 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3154 		    M_WAITOK | M_ZERO);
3155 
3156 		PF_RULES_WLOCK();
3157 
3158 		if (rule->ifname[0]) {
3159 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3160 			pfi_kkif_ref(rule->kif);
3161 		} else
3162 			rule->kif = NULL;
3163 		if (rule->bridge_to_name[0]) {
3164 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3165 			    rule->bridge_to_name);
3166 			pfi_kkif_ref(rule->bridge_to);
3167 		} else
3168 			rule->bridge_to = NULL;
3169 
3170 #ifdef ALTQ
3171 		/* set queue IDs */
3172 		if (rule->qname[0] != 0) {
3173 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3174 				error = EBUSY;
3175 			else
3176 				rule->qid = rule->qid;
3177 		}
3178 #endif
3179 		if (rule->tagname[0])
3180 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3181 				error = EBUSY;
3182 		if (rule->match_tagname[0])
3183 			if ((rule->match_tag = pf_tagname2tag(
3184 			    rule->match_tagname)) == 0)
3185 				error = EBUSY;
3186 
3187 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3188 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3189 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3190 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3191 
3192 		if (error) {
3193 			pf_free_eth_rule(rule);
3194 			PF_RULES_WUNLOCK();
3195 			ERROUT(error);
3196 		}
3197 
3198 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3199 			pf_free_eth_rule(rule);
3200 			PF_RULES_WUNLOCK();
3201 			ERROUT(EINVAL);
3202 		}
3203 
3204 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3205 		if (tail)
3206 			rule->nr = tail->nr + 1;
3207 		else
3208 			rule->nr = 0;
3209 
3210 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3211 
3212 		PF_RULES_WUNLOCK();
3213 
3214 #undef ERROUT
3215 DIOCADDETHRULE_error:
3216 		nvlist_destroy(nvl);
3217 		free(nvlpacked, M_NVLIST);
3218 		break;
3219 	}
3220 
3221 	case DIOCGETETHRULESETS: {
3222 		struct epoch_tracker	 et;
3223 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3224 		nvlist_t		*nvl = NULL;
3225 		void			*nvlpacked = NULL;
3226 		struct pf_keth_ruleset	*ruleset;
3227 		struct pf_keth_anchor	*anchor;
3228 		int			 nr = 0;
3229 
3230 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3231 
3232 		if (nv->len > pf_ioctl_maxcount)
3233 			ERROUT(ENOMEM);
3234 
3235 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3236 		error = copyin(nv->data, nvlpacked, nv->len);
3237 		if (error)
3238 			ERROUT(error);
3239 
3240 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3241 		if (nvl == NULL)
3242 			ERROUT(EBADMSG);
3243 		if (! nvlist_exists_string(nvl, "path"))
3244 			ERROUT(EBADMSG);
3245 
3246 		NET_EPOCH_ENTER(et);
3247 
3248 		if ((ruleset = pf_find_keth_ruleset(
3249 		    nvlist_get_string(nvl, "path"))) == NULL) {
3250 			NET_EPOCH_EXIT(et);
3251 			ERROUT(ENOENT);
3252 		}
3253 
3254 		if (ruleset->anchor == NULL) {
3255 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3256 				if (anchor->parent == NULL)
3257 					nr++;
3258 		} else {
3259 			RB_FOREACH(anchor, pf_keth_anchor_node,
3260 			    &ruleset->anchor->children)
3261 				nr++;
3262 		}
3263 
3264 		NET_EPOCH_EXIT(et);
3265 
3266 		nvlist_destroy(nvl);
3267 		nvl = NULL;
3268 		free(nvlpacked, M_NVLIST);
3269 		nvlpacked = NULL;
3270 
3271 		nvl = nvlist_create(0);
3272 		if (nvl == NULL)
3273 			ERROUT(ENOMEM);
3274 
3275 		nvlist_add_number(nvl, "nr", nr);
3276 
3277 		nvlpacked = nvlist_pack(nvl, &nv->len);
3278 		if (nvlpacked == NULL)
3279 			ERROUT(ENOMEM);
3280 
3281 		if (nv->size == 0)
3282 			ERROUT(0);
3283 		else if (nv->size < nv->len)
3284 			ERROUT(ENOSPC);
3285 
3286 		error = copyout(nvlpacked, nv->data, nv->len);
3287 
3288 #undef ERROUT
3289 DIOCGETETHRULESETS_error:
3290 		free(nvlpacked, M_NVLIST);
3291 		nvlist_destroy(nvl);
3292 		break;
3293 	}
3294 
3295 	case DIOCGETETHRULESET: {
3296 		struct epoch_tracker	 et;
3297 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3298 		nvlist_t		*nvl = NULL;
3299 		void			*nvlpacked = NULL;
3300 		struct pf_keth_ruleset	*ruleset;
3301 		struct pf_keth_anchor	*anchor;
3302 		int			 nr = 0, req_nr = 0;
3303 		bool			 found = false;
3304 
3305 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3306 
3307 		if (nv->len > pf_ioctl_maxcount)
3308 			ERROUT(ENOMEM);
3309 
3310 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3311 		error = copyin(nv->data, nvlpacked, nv->len);
3312 		if (error)
3313 			ERROUT(error);
3314 
3315 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3316 		if (nvl == NULL)
3317 			ERROUT(EBADMSG);
3318 		if (! nvlist_exists_string(nvl, "path"))
3319 			ERROUT(EBADMSG);
3320 		if (! nvlist_exists_number(nvl, "nr"))
3321 			ERROUT(EBADMSG);
3322 
3323 		req_nr = nvlist_get_number(nvl, "nr");
3324 
3325 		NET_EPOCH_ENTER(et);
3326 
3327 		if ((ruleset = pf_find_keth_ruleset(
3328 		    nvlist_get_string(nvl, "path"))) == NULL) {
3329 			NET_EPOCH_EXIT(et);
3330 			ERROUT(ENOENT);
3331 		}
3332 
3333 		nvlist_destroy(nvl);
3334 		nvl = NULL;
3335 		free(nvlpacked, M_NVLIST);
3336 		nvlpacked = NULL;
3337 
3338 		nvl = nvlist_create(0);
3339 		if (nvl == NULL) {
3340 			NET_EPOCH_EXIT(et);
3341 			ERROUT(ENOMEM);
3342 		}
3343 
3344 		if (ruleset->anchor == NULL) {
3345 			RB_FOREACH(anchor, pf_keth_anchor_global,
3346 			    &V_pf_keth_anchors) {
3347 				if (anchor->parent == NULL && nr++ == req_nr) {
3348 					found = true;
3349 					break;
3350 				}
3351 			}
3352 		} else {
3353 			RB_FOREACH(anchor, pf_keth_anchor_node,
3354 			     &ruleset->anchor->children) {
3355 				if (nr++ == req_nr) {
3356 					found = true;
3357 					break;
3358 				}
3359 			}
3360 		}
3361 
3362 		NET_EPOCH_EXIT(et);
3363 		if (found) {
3364 			nvlist_add_number(nvl, "nr", nr);
3365 			nvlist_add_string(nvl, "name", anchor->name);
3366 			if (ruleset->anchor)
3367 				nvlist_add_string(nvl, "path",
3368 				    ruleset->anchor->path);
3369 			else
3370 				nvlist_add_string(nvl, "path", "");
3371 		} else {
3372 			ERROUT(EBUSY);
3373 		}
3374 
3375 		nvlpacked = nvlist_pack(nvl, &nv->len);
3376 		if (nvlpacked == NULL)
3377 			ERROUT(ENOMEM);
3378 
3379 		if (nv->size == 0)
3380 			ERROUT(0);
3381 		else if (nv->size < nv->len)
3382 			ERROUT(ENOSPC);
3383 
3384 		error = copyout(nvlpacked, nv->data, nv->len);
3385 
3386 #undef ERROUT
3387 DIOCGETETHRULESET_error:
3388 		free(nvlpacked, M_NVLIST);
3389 		nvlist_destroy(nvl);
3390 		break;
3391 	}
3392 
3393 	case DIOCADDRULENV: {
3394 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3395 		nvlist_t	*nvl = NULL;
3396 		void		*nvlpacked = NULL;
3397 		struct pf_krule	*rule = NULL;
3398 		const char	*anchor = "", *anchor_call = "";
3399 		uint32_t	 ticket = 0, pool_ticket = 0;
3400 
3401 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3402 
3403 		if (nv->len > pf_ioctl_maxcount)
3404 			ERROUT(ENOMEM);
3405 
3406 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3407 		error = copyin(nv->data, nvlpacked, nv->len);
3408 		if (error)
3409 			ERROUT(error);
3410 
3411 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3412 		if (nvl == NULL)
3413 			ERROUT(EBADMSG);
3414 
3415 		if (! nvlist_exists_number(nvl, "ticket"))
3416 			ERROUT(EINVAL);
3417 		ticket = nvlist_get_number(nvl, "ticket");
3418 
3419 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3420 			ERROUT(EINVAL);
3421 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3422 
3423 		if (! nvlist_exists_nvlist(nvl, "rule"))
3424 			ERROUT(EINVAL);
3425 
3426 		rule = pf_krule_alloc();
3427 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3428 		    rule);
3429 		if (error)
3430 			ERROUT(error);
3431 
3432 		if (nvlist_exists_string(nvl, "anchor"))
3433 			anchor = nvlist_get_string(nvl, "anchor");
3434 		if (nvlist_exists_string(nvl, "anchor_call"))
3435 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3436 
3437 		if ((error = nvlist_error(nvl)))
3438 			ERROUT(error);
3439 
3440 		/* Frees rule on error */
3441 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3442 		    anchor_call, td->td_ucred->cr_ruid,
3443 		    td->td_proc ? td->td_proc->p_pid : 0);
3444 
3445 		nvlist_destroy(nvl);
3446 		free(nvlpacked, M_NVLIST);
3447 		break;
3448 #undef ERROUT
3449 DIOCADDRULENV_error:
3450 		pf_krule_free(rule);
3451 		nvlist_destroy(nvl);
3452 		free(nvlpacked, M_NVLIST);
3453 
3454 		break;
3455 	}
3456 	case DIOCADDRULE: {
3457 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3458 		struct pf_krule		*rule;
3459 
3460 		rule = pf_krule_alloc();
3461 		error = pf_rule_to_krule(&pr->rule, rule);
3462 		if (error != 0) {
3463 			pf_krule_free(rule);
3464 			break;
3465 		}
3466 
3467 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3468 
3469 		/* Frees rule on error */
3470 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3471 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3472 		    td->td_proc ? td->td_proc->p_pid : 0);
3473 		break;
3474 	}
3475 
3476 	case DIOCGETRULES: {
3477 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3478 
3479 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3480 
3481 		error = pf_ioctl_getrules(pr);
3482 
3483 		break;
3484 	}
3485 
3486 	case DIOCGETRULENV: {
3487 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3488 		nvlist_t		*nvrule = NULL;
3489 		nvlist_t		*nvl = NULL;
3490 		struct pf_kruleset	*ruleset;
3491 		struct pf_krule		*rule;
3492 		void			*nvlpacked = NULL;
3493 		int			 rs_num, nr;
3494 		bool			 clear_counter = false;
3495 
3496 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3497 
3498 		if (nv->len > pf_ioctl_maxcount)
3499 			ERROUT(ENOMEM);
3500 
3501 		/* Copy the request in */
3502 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3503 		error = copyin(nv->data, nvlpacked, nv->len);
3504 		if (error)
3505 			ERROUT(error);
3506 
3507 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3508 		if (nvl == NULL)
3509 			ERROUT(EBADMSG);
3510 
3511 		if (! nvlist_exists_string(nvl, "anchor"))
3512 			ERROUT(EBADMSG);
3513 		if (! nvlist_exists_number(nvl, "ruleset"))
3514 			ERROUT(EBADMSG);
3515 		if (! nvlist_exists_number(nvl, "ticket"))
3516 			ERROUT(EBADMSG);
3517 		if (! nvlist_exists_number(nvl, "nr"))
3518 			ERROUT(EBADMSG);
3519 
3520 		if (nvlist_exists_bool(nvl, "clear_counter"))
3521 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3522 
3523 		if (clear_counter && !(flags & FWRITE))
3524 			ERROUT(EACCES);
3525 
3526 		nr = nvlist_get_number(nvl, "nr");
3527 
3528 		PF_RULES_WLOCK();
3529 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3530 		if (ruleset == NULL) {
3531 			PF_RULES_WUNLOCK();
3532 			ERROUT(ENOENT);
3533 		}
3534 
3535 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3536 		if (rs_num >= PF_RULESET_MAX) {
3537 			PF_RULES_WUNLOCK();
3538 			ERROUT(EINVAL);
3539 		}
3540 
3541 		if (nvlist_get_number(nvl, "ticket") !=
3542 		    ruleset->rules[rs_num].active.ticket) {
3543 			PF_RULES_WUNLOCK();
3544 			ERROUT(EBUSY);
3545 		}
3546 
3547 		if ((error = nvlist_error(nvl))) {
3548 			PF_RULES_WUNLOCK();
3549 			ERROUT(error);
3550 		}
3551 
3552 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3553 		while ((rule != NULL) && (rule->nr != nr))
3554 			rule = TAILQ_NEXT(rule, entries);
3555 		if (rule == NULL) {
3556 			PF_RULES_WUNLOCK();
3557 			ERROUT(EBUSY);
3558 		}
3559 
3560 		nvrule = pf_krule_to_nvrule(rule);
3561 
3562 		nvlist_destroy(nvl);
3563 		nvl = nvlist_create(0);
3564 		if (nvl == NULL) {
3565 			PF_RULES_WUNLOCK();
3566 			ERROUT(ENOMEM);
3567 		}
3568 		nvlist_add_number(nvl, "nr", nr);
3569 		nvlist_add_nvlist(nvl, "rule", nvrule);
3570 		nvlist_destroy(nvrule);
3571 		nvrule = NULL;
3572 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3573 			PF_RULES_WUNLOCK();
3574 			ERROUT(EBUSY);
3575 		}
3576 
3577 		free(nvlpacked, M_NVLIST);
3578 		nvlpacked = nvlist_pack(nvl, &nv->len);
3579 		if (nvlpacked == NULL) {
3580 			PF_RULES_WUNLOCK();
3581 			ERROUT(ENOMEM);
3582 		}
3583 
3584 		if (nv->size == 0) {
3585 			PF_RULES_WUNLOCK();
3586 			ERROUT(0);
3587 		}
3588 		else if (nv->size < nv->len) {
3589 			PF_RULES_WUNLOCK();
3590 			ERROUT(ENOSPC);
3591 		}
3592 
3593 		if (clear_counter)
3594 			pf_krule_clear_counters(rule);
3595 
3596 		PF_RULES_WUNLOCK();
3597 
3598 		error = copyout(nvlpacked, nv->data, nv->len);
3599 
3600 #undef ERROUT
3601 DIOCGETRULENV_error:
3602 		free(nvlpacked, M_NVLIST);
3603 		nvlist_destroy(nvrule);
3604 		nvlist_destroy(nvl);
3605 
3606 		break;
3607 	}
3608 
3609 	case DIOCCHANGERULE: {
3610 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3611 		struct pf_kruleset	*ruleset;
3612 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3613 		struct pfi_kkif		*kif = NULL;
3614 		struct pf_kpooladdr	*pa;
3615 		u_int32_t		 nr = 0;
3616 		int			 rs_num;
3617 
3618 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3619 
3620 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3621 		    pcr->action > PF_CHANGE_GET_TICKET) {
3622 			error = EINVAL;
3623 			break;
3624 		}
3625 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3626 			error = EINVAL;
3627 			break;
3628 		}
3629 
3630 		if (pcr->action != PF_CHANGE_REMOVE) {
3631 			newrule = pf_krule_alloc();
3632 			error = pf_rule_to_krule(&pcr->rule, newrule);
3633 			if (error != 0) {
3634 				pf_krule_free(newrule);
3635 				break;
3636 			}
3637 
3638 			if (newrule->ifname[0])
3639 				kif = pf_kkif_create(M_WAITOK);
3640 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3641 			for (int i = 0; i < 2; i++) {
3642 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3643 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3644 			}
3645 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3646 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3647 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3648 			newrule->cuid = td->td_ucred->cr_ruid;
3649 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3650 			TAILQ_INIT(&newrule->nat.list);
3651 			TAILQ_INIT(&newrule->rdr.list);
3652 		}
3653 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3654 
3655 		PF_CONFIG_LOCK();
3656 		PF_RULES_WLOCK();
3657 #ifdef PF_WANT_32_TO_64_COUNTER
3658 		if (newrule != NULL) {
3659 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3660 			newrule->allrulelinked = true;
3661 			V_pf_allrulecount++;
3662 		}
3663 #endif
3664 
3665 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3666 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3667 		    pcr->pool_ticket != V_ticket_pabuf)
3668 			ERROUT(EBUSY);
3669 
3670 		ruleset = pf_find_kruleset(pcr->anchor);
3671 		if (ruleset == NULL)
3672 			ERROUT(EINVAL);
3673 
3674 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3675 		if (rs_num >= PF_RULESET_MAX)
3676 			ERROUT(EINVAL);
3677 
3678 		/*
3679 		 * XXXMJG: there is no guarantee that the ruleset was
3680 		 * created by the usual route of calling DIOCXBEGIN.
3681 		 * As a result it is possible the rule tree will not
3682 		 * be allocated yet. Hack around it by doing it here.
3683 		 * Note it is fine to let the tree persist in case of
3684 		 * error as it will be freed down the road on future
3685 		 * updates (if need be).
3686 		 */
3687 		if (ruleset->rules[rs_num].active.tree == NULL) {
3688 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3689 			if (ruleset->rules[rs_num].active.tree == NULL) {
3690 				ERROUT(ENOMEM);
3691 			}
3692 		}
3693 
3694 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3695 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3696 			ERROUT(0);
3697 		} else if (pcr->ticket !=
3698 			    ruleset->rules[rs_num].active.ticket)
3699 				ERROUT(EINVAL);
3700 
3701 		if (pcr->action != PF_CHANGE_REMOVE) {
3702 			if (newrule->ifname[0]) {
3703 				newrule->kif = pfi_kkif_attach(kif,
3704 				    newrule->ifname);
3705 				kif = NULL;
3706 				pfi_kkif_ref(newrule->kif);
3707 			} else
3708 				newrule->kif = NULL;
3709 
3710 			if (newrule->rtableid > 0 &&
3711 			    newrule->rtableid >= rt_numfibs)
3712 				error = EBUSY;
3713 
3714 #ifdef ALTQ
3715 			/* set queue IDs */
3716 			if (newrule->qname[0] != 0) {
3717 				if ((newrule->qid =
3718 				    pf_qname2qid(newrule->qname)) == 0)
3719 					error = EBUSY;
3720 				else if (newrule->pqname[0] != 0) {
3721 					if ((newrule->pqid =
3722 					    pf_qname2qid(newrule->pqname)) == 0)
3723 						error = EBUSY;
3724 				} else
3725 					newrule->pqid = newrule->qid;
3726 			}
3727 #endif /* ALTQ */
3728 			if (newrule->tagname[0])
3729 				if ((newrule->tag =
3730 				    pf_tagname2tag(newrule->tagname)) == 0)
3731 					error = EBUSY;
3732 			if (newrule->match_tagname[0])
3733 				if ((newrule->match_tag = pf_tagname2tag(
3734 				    newrule->match_tagname)) == 0)
3735 					error = EBUSY;
3736 			if (newrule->rt && !newrule->direction)
3737 				error = EINVAL;
3738 			if (!newrule->log)
3739 				newrule->logif = 0;
3740 			if (newrule->logif >= PFLOGIFS_MAX)
3741 				error = EINVAL;
3742 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3743 				error = ENOMEM;
3744 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3745 				error = ENOMEM;
3746 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3747 				error = EINVAL;
3748 			for (int i = 0; i < 2; i++) {
3749 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3750 					if (pa->addr.type == PF_ADDR_TABLE) {
3751 						pa->addr.p.tbl =
3752 						    pfr_attach_table(ruleset,
3753 						    pa->addr.v.tblname);
3754 						if (pa->addr.p.tbl == NULL)
3755 							error = ENOMEM;
3756 					}
3757 			}
3758 
3759 			newrule->overload_tbl = NULL;
3760 			if (newrule->overload_tblname[0]) {
3761 				if ((newrule->overload_tbl = pfr_attach_table(
3762 				    ruleset, newrule->overload_tblname)) ==
3763 				    NULL)
3764 					error = EINVAL;
3765 				else
3766 					newrule->overload_tbl->pfrkt_flags |=
3767 					    PFR_TFLAG_ACTIVE;
3768 			}
3769 
3770 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3771 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3772 			if (((((newrule->action == PF_NAT) ||
3773 			    (newrule->action == PF_RDR) ||
3774 			    (newrule->action == PF_BINAT) ||
3775 			    (newrule->rt > PF_NOPFROUTE)) &&
3776 			    !newrule->anchor)) &&
3777 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3778 				error = EINVAL;
3779 
3780 			if (error) {
3781 				pf_free_rule(newrule);
3782 				PF_RULES_WUNLOCK();
3783 				PF_CONFIG_UNLOCK();
3784 				break;
3785 			}
3786 
3787 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3788 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3789 		}
3790 		pf_empty_kpool(&V_pf_pabuf[0]);
3791 		pf_empty_kpool(&V_pf_pabuf[1]);
3792 
3793 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3794 			oldrule = TAILQ_FIRST(
3795 			    ruleset->rules[rs_num].active.ptr);
3796 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3797 			oldrule = TAILQ_LAST(
3798 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3799 		else {
3800 			oldrule = TAILQ_FIRST(
3801 			    ruleset->rules[rs_num].active.ptr);
3802 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3803 				oldrule = TAILQ_NEXT(oldrule, entries);
3804 			if (oldrule == NULL) {
3805 				if (newrule != NULL)
3806 					pf_free_rule(newrule);
3807 				PF_RULES_WUNLOCK();
3808 				PF_CONFIG_UNLOCK();
3809 				error = EINVAL;
3810 				break;
3811 			}
3812 		}
3813 
3814 		if (pcr->action == PF_CHANGE_REMOVE) {
3815 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3816 			    oldrule);
3817 			RB_REMOVE(pf_krule_global,
3818 			    ruleset->rules[rs_num].active.tree, oldrule);
3819 			ruleset->rules[rs_num].active.rcount--;
3820 		} else {
3821 			pf_hash_rule(newrule);
3822 			if (RB_INSERT(pf_krule_global,
3823 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3824 				pf_free_rule(newrule);
3825 				PF_RULES_WUNLOCK();
3826 				PF_CONFIG_UNLOCK();
3827 				error = EEXIST;
3828 				break;
3829 			}
3830 
3831 			if (oldrule == NULL)
3832 				TAILQ_INSERT_TAIL(
3833 				    ruleset->rules[rs_num].active.ptr,
3834 				    newrule, entries);
3835 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3836 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3837 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3838 			else
3839 				TAILQ_INSERT_AFTER(
3840 				    ruleset->rules[rs_num].active.ptr,
3841 				    oldrule, newrule, entries);
3842 			ruleset->rules[rs_num].active.rcount++;
3843 		}
3844 
3845 		nr = 0;
3846 		TAILQ_FOREACH(oldrule,
3847 		    ruleset->rules[rs_num].active.ptr, entries)
3848 			oldrule->nr = nr++;
3849 
3850 		ruleset->rules[rs_num].active.ticket++;
3851 
3852 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3853 		pf_remove_if_empty_kruleset(ruleset);
3854 
3855 		PF_RULES_WUNLOCK();
3856 		PF_CONFIG_UNLOCK();
3857 		break;
3858 
3859 #undef ERROUT
3860 DIOCCHANGERULE_error:
3861 		PF_RULES_WUNLOCK();
3862 		PF_CONFIG_UNLOCK();
3863 		pf_krule_free(newrule);
3864 		pf_kkif_free(kif);
3865 		break;
3866 	}
3867 
3868 	case DIOCCLRSTATESNV: {
3869 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3870 		break;
3871 	}
3872 
3873 	case DIOCKILLSTATESNV: {
3874 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3875 		break;
3876 	}
3877 
3878 	case DIOCADDSTATE: {
3879 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3880 		struct pfsync_state_1301	*sp = &ps->state;
3881 
3882 		if (sp->timeout >= PFTM_MAX) {
3883 			error = EINVAL;
3884 			break;
3885 		}
3886 		if (V_pfsync_state_import_ptr != NULL) {
3887 			PF_RULES_RLOCK();
3888 			error = V_pfsync_state_import_ptr(
3889 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3890 			    PFSYNC_MSG_VERSION_1301);
3891 			PF_RULES_RUNLOCK();
3892 		} else
3893 			error = EOPNOTSUPP;
3894 		break;
3895 	}
3896 
3897 	case DIOCGETSTATE: {
3898 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3899 		struct pf_kstate	*s;
3900 
3901 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3902 		if (s == NULL) {
3903 			error = ENOENT;
3904 			break;
3905 		}
3906 
3907 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3908 		    s, PFSYNC_MSG_VERSION_1301);
3909 		PF_STATE_UNLOCK(s);
3910 		break;
3911 	}
3912 
3913 	case DIOCGETSTATENV: {
3914 		error = pf_getstate((struct pfioc_nv *)addr);
3915 		break;
3916 	}
3917 
3918 #ifdef COMPAT_FREEBSD14
3919 	case DIOCGETSTATES: {
3920 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3921 		struct pf_kstate	*s;
3922 		struct pfsync_state_1301	*pstore, *p;
3923 		int			 i, nr;
3924 		size_t			 slice_count = 16, count;
3925 		void			*out;
3926 
3927 		if (ps->ps_len <= 0) {
3928 			nr = uma_zone_get_cur(V_pf_state_z);
3929 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3930 			break;
3931 		}
3932 
3933 		out = ps->ps_states;
3934 		pstore = mallocarray(slice_count,
3935 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3936 		nr = 0;
3937 
3938 		for (i = 0; i <= V_pf_hashmask; i++) {
3939 			struct pf_idhash *ih = &V_pf_idhash[i];
3940 
3941 DIOCGETSTATES_retry:
3942 			p = pstore;
3943 
3944 			if (LIST_EMPTY(&ih->states))
3945 				continue;
3946 
3947 			PF_HASHROW_LOCK(ih);
3948 			count = 0;
3949 			LIST_FOREACH(s, &ih->states, entry) {
3950 				if (s->timeout == PFTM_UNLINKED)
3951 					continue;
3952 				count++;
3953 			}
3954 
3955 			if (count > slice_count) {
3956 				PF_HASHROW_UNLOCK(ih);
3957 				free(pstore, M_TEMP);
3958 				slice_count = count * 2;
3959 				pstore = mallocarray(slice_count,
3960 				    sizeof(struct pfsync_state_1301), M_TEMP,
3961 				    M_WAITOK | M_ZERO);
3962 				goto DIOCGETSTATES_retry;
3963 			}
3964 
3965 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3966 				PF_HASHROW_UNLOCK(ih);
3967 				goto DIOCGETSTATES_full;
3968 			}
3969 
3970 			LIST_FOREACH(s, &ih->states, entry) {
3971 				if (s->timeout == PFTM_UNLINKED)
3972 					continue;
3973 
3974 				pfsync_state_export((union pfsync_state_union*)p,
3975 				    s, PFSYNC_MSG_VERSION_1301);
3976 				p++;
3977 				nr++;
3978 			}
3979 			PF_HASHROW_UNLOCK(ih);
3980 			error = copyout(pstore, out,
3981 			    sizeof(struct pfsync_state_1301) * count);
3982 			if (error)
3983 				break;
3984 			out = ps->ps_states + nr;
3985 		}
3986 DIOCGETSTATES_full:
3987 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3988 		free(pstore, M_TEMP);
3989 
3990 		break;
3991 	}
3992 
3993 	case DIOCGETSTATESV2: {
3994 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
3995 		struct pf_kstate	*s;
3996 		struct pf_state_export	*pstore, *p;
3997 		int i, nr;
3998 		size_t slice_count = 16, count;
3999 		void *out;
4000 
4001 		if (ps->ps_req_version > PF_STATE_VERSION) {
4002 			error = ENOTSUP;
4003 			break;
4004 		}
4005 
4006 		if (ps->ps_len <= 0) {
4007 			nr = uma_zone_get_cur(V_pf_state_z);
4008 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4009 			break;
4010 		}
4011 
4012 		out = ps->ps_states;
4013 		pstore = mallocarray(slice_count,
4014 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
4015 		nr = 0;
4016 
4017 		for (i = 0; i <= V_pf_hashmask; i++) {
4018 			struct pf_idhash *ih = &V_pf_idhash[i];
4019 
4020 DIOCGETSTATESV2_retry:
4021 			p = pstore;
4022 
4023 			if (LIST_EMPTY(&ih->states))
4024 				continue;
4025 
4026 			PF_HASHROW_LOCK(ih);
4027 			count = 0;
4028 			LIST_FOREACH(s, &ih->states, entry) {
4029 				if (s->timeout == PFTM_UNLINKED)
4030 					continue;
4031 				count++;
4032 			}
4033 
4034 			if (count > slice_count) {
4035 				PF_HASHROW_UNLOCK(ih);
4036 				free(pstore, M_TEMP);
4037 				slice_count = count * 2;
4038 				pstore = mallocarray(slice_count,
4039 				    sizeof(struct pf_state_export), M_TEMP,
4040 				    M_WAITOK | M_ZERO);
4041 				goto DIOCGETSTATESV2_retry;
4042 			}
4043 
4044 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4045 				PF_HASHROW_UNLOCK(ih);
4046 				goto DIOCGETSTATESV2_full;
4047 			}
4048 
4049 			LIST_FOREACH(s, &ih->states, entry) {
4050 				if (s->timeout == PFTM_UNLINKED)
4051 					continue;
4052 
4053 				pf_state_export(p, s);
4054 				p++;
4055 				nr++;
4056 			}
4057 			PF_HASHROW_UNLOCK(ih);
4058 			error = copyout(pstore, out,
4059 			    sizeof(struct pf_state_export) * count);
4060 			if (error)
4061 				break;
4062 			out = ps->ps_states + nr;
4063 		}
4064 DIOCGETSTATESV2_full:
4065 		ps->ps_len = nr * sizeof(struct pf_state_export);
4066 		free(pstore, M_TEMP);
4067 
4068 		break;
4069 	}
4070 #endif
4071 	case DIOCGETSTATUSNV: {
4072 		error = pf_getstatus((struct pfioc_nv *)addr);
4073 		break;
4074 	}
4075 
4076 	case DIOCSETSTATUSIF: {
4077 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4078 
4079 		if (pi->ifname[0] == 0) {
4080 			bzero(V_pf_status.ifname, IFNAMSIZ);
4081 			break;
4082 		}
4083 		PF_RULES_WLOCK();
4084 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4085 		PF_RULES_WUNLOCK();
4086 		break;
4087 	}
4088 
4089 	case DIOCCLRSTATUS: {
4090 		pf_ioctl_clear_status();
4091 		break;
4092 	}
4093 
4094 	case DIOCNATLOOK: {
4095 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4096 		struct pf_state_key	*sk;
4097 		struct pf_kstate	*state;
4098 		struct pf_state_key_cmp	 key;
4099 		int			 m = 0, direction = pnl->direction;
4100 		int			 sidx, didx;
4101 
4102 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4103 		sidx = (direction == PF_IN) ? 1 : 0;
4104 		didx = (direction == PF_IN) ? 0 : 1;
4105 
4106 		if (!pnl->proto ||
4107 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4108 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4109 		    ((pnl->proto == IPPROTO_TCP ||
4110 		    pnl->proto == IPPROTO_UDP) &&
4111 		    (!pnl->dport || !pnl->sport)))
4112 			error = EINVAL;
4113 		else {
4114 			bzero(&key, sizeof(key));
4115 			key.af = pnl->af;
4116 			key.proto = pnl->proto;
4117 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4118 			key.port[sidx] = pnl->sport;
4119 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4120 			key.port[didx] = pnl->dport;
4121 
4122 			state = pf_find_state_all(&key, direction, &m);
4123 			if (state == NULL) {
4124 				error = ENOENT;
4125 			} else {
4126 				if (m > 1) {
4127 					PF_STATE_UNLOCK(state);
4128 					error = E2BIG;	/* more than one state */
4129 				} else {
4130 					sk = state->key[sidx];
4131 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4132 					pnl->rsport = sk->port[sidx];
4133 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4134 					pnl->rdport = sk->port[didx];
4135 					PF_STATE_UNLOCK(state);
4136 				}
4137 			}
4138 		}
4139 		break;
4140 	}
4141 
4142 	case DIOCSETTIMEOUT: {
4143 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4144 
4145 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4146 		    &pt->seconds);
4147 		break;
4148 	}
4149 
4150 	case DIOCGETTIMEOUT: {
4151 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4152 
4153 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4154 		break;
4155 	}
4156 
4157 	case DIOCGETLIMIT: {
4158 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4159 
4160 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4161 		break;
4162 	}
4163 
4164 	case DIOCSETLIMIT: {
4165 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4166 		unsigned int old_limit;
4167 
4168 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4169 		pl->limit = old_limit;
4170 		break;
4171 	}
4172 
4173 	case DIOCSETDEBUG: {
4174 		u_int32_t	*level = (u_int32_t *)addr;
4175 
4176 		PF_RULES_WLOCK();
4177 		V_pf_status.debug = *level;
4178 		PF_RULES_WUNLOCK();
4179 		break;
4180 	}
4181 
4182 	case DIOCCLRRULECTRS: {
4183 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4184 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4185 		struct pf_krule		*rule;
4186 
4187 		PF_RULES_WLOCK();
4188 		TAILQ_FOREACH(rule,
4189 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4190 			pf_counter_u64_zero(&rule->evaluations);
4191 			for (int i = 0; i < 2; i++) {
4192 				pf_counter_u64_zero(&rule->packets[i]);
4193 				pf_counter_u64_zero(&rule->bytes[i]);
4194 			}
4195 		}
4196 		PF_RULES_WUNLOCK();
4197 		break;
4198 	}
4199 
4200 	case DIOCGIFSPEEDV0:
4201 	case DIOCGIFSPEEDV1: {
4202 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4203 		struct pf_ifspeed_v1	ps;
4204 		struct ifnet		*ifp;
4205 
4206 		if (psp->ifname[0] == '\0') {
4207 			error = EINVAL;
4208 			break;
4209 		}
4210 
4211 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4212 		if (error != 0)
4213 			break;
4214 		ifp = ifunit(ps.ifname);
4215 		if (ifp != NULL) {
4216 			psp->baudrate32 =
4217 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4218 			if (cmd == DIOCGIFSPEEDV1)
4219 				psp->baudrate = ifp->if_baudrate;
4220 		} else {
4221 			error = EINVAL;
4222 		}
4223 		break;
4224 	}
4225 
4226 #ifdef ALTQ
4227 	case DIOCSTARTALTQ: {
4228 		struct pf_altq		*altq;
4229 
4230 		PF_RULES_WLOCK();
4231 		/* enable all altq interfaces on active list */
4232 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4233 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4234 				error = pf_enable_altq(altq);
4235 				if (error != 0)
4236 					break;
4237 			}
4238 		}
4239 		if (error == 0)
4240 			V_pf_altq_running = 1;
4241 		PF_RULES_WUNLOCK();
4242 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4243 		break;
4244 	}
4245 
4246 	case DIOCSTOPALTQ: {
4247 		struct pf_altq		*altq;
4248 
4249 		PF_RULES_WLOCK();
4250 		/* disable all altq interfaces on active list */
4251 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4252 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4253 				error = pf_disable_altq(altq);
4254 				if (error != 0)
4255 					break;
4256 			}
4257 		}
4258 		if (error == 0)
4259 			V_pf_altq_running = 0;
4260 		PF_RULES_WUNLOCK();
4261 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4262 		break;
4263 	}
4264 
4265 	case DIOCADDALTQV0:
4266 	case DIOCADDALTQV1: {
4267 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4268 		struct pf_altq		*altq, *a;
4269 		struct ifnet		*ifp;
4270 
4271 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4272 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4273 		if (error)
4274 			break;
4275 		altq->local_flags = 0;
4276 
4277 		PF_RULES_WLOCK();
4278 		if (pa->ticket != V_ticket_altqs_inactive) {
4279 			PF_RULES_WUNLOCK();
4280 			free(altq, M_PFALTQ);
4281 			error = EBUSY;
4282 			break;
4283 		}
4284 
4285 		/*
4286 		 * if this is for a queue, find the discipline and
4287 		 * copy the necessary fields
4288 		 */
4289 		if (altq->qname[0] != 0) {
4290 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4291 				PF_RULES_WUNLOCK();
4292 				error = EBUSY;
4293 				free(altq, M_PFALTQ);
4294 				break;
4295 			}
4296 			altq->altq_disc = NULL;
4297 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4298 				if (strncmp(a->ifname, altq->ifname,
4299 				    IFNAMSIZ) == 0) {
4300 					altq->altq_disc = a->altq_disc;
4301 					break;
4302 				}
4303 			}
4304 		}
4305 
4306 		if ((ifp = ifunit(altq->ifname)) == NULL)
4307 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4308 		else
4309 			error = altq_add(ifp, altq);
4310 
4311 		if (error) {
4312 			PF_RULES_WUNLOCK();
4313 			free(altq, M_PFALTQ);
4314 			break;
4315 		}
4316 
4317 		if (altq->qname[0] != 0)
4318 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4319 		else
4320 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4321 		/* version error check done on import above */
4322 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4323 		PF_RULES_WUNLOCK();
4324 		break;
4325 	}
4326 
4327 	case DIOCGETALTQSV0:
4328 	case DIOCGETALTQSV1: {
4329 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4330 		struct pf_altq		*altq;
4331 
4332 		PF_RULES_RLOCK();
4333 		pa->nr = 0;
4334 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4335 			pa->nr++;
4336 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4337 			pa->nr++;
4338 		pa->ticket = V_ticket_altqs_active;
4339 		PF_RULES_RUNLOCK();
4340 		break;
4341 	}
4342 
4343 	case DIOCGETALTQV0:
4344 	case DIOCGETALTQV1: {
4345 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4346 		struct pf_altq		*altq;
4347 
4348 		PF_RULES_RLOCK();
4349 		if (pa->ticket != V_ticket_altqs_active) {
4350 			PF_RULES_RUNLOCK();
4351 			error = EBUSY;
4352 			break;
4353 		}
4354 		altq = pf_altq_get_nth_active(pa->nr);
4355 		if (altq == NULL) {
4356 			PF_RULES_RUNLOCK();
4357 			error = EBUSY;
4358 			break;
4359 		}
4360 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4361 		PF_RULES_RUNLOCK();
4362 		break;
4363 	}
4364 
4365 	case DIOCCHANGEALTQV0:
4366 	case DIOCCHANGEALTQV1:
4367 		/* CHANGEALTQ not supported yet! */
4368 		error = ENODEV;
4369 		break;
4370 
4371 	case DIOCGETQSTATSV0:
4372 	case DIOCGETQSTATSV1: {
4373 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4374 		struct pf_altq		*altq;
4375 		int			 nbytes;
4376 		u_int32_t		 version;
4377 
4378 		PF_RULES_RLOCK();
4379 		if (pq->ticket != V_ticket_altqs_active) {
4380 			PF_RULES_RUNLOCK();
4381 			error = EBUSY;
4382 			break;
4383 		}
4384 		nbytes = pq->nbytes;
4385 		altq = pf_altq_get_nth_active(pq->nr);
4386 		if (altq == NULL) {
4387 			PF_RULES_RUNLOCK();
4388 			error = EBUSY;
4389 			break;
4390 		}
4391 
4392 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4393 			PF_RULES_RUNLOCK();
4394 			error = ENXIO;
4395 			break;
4396 		}
4397 		PF_RULES_RUNLOCK();
4398 		if (cmd == DIOCGETQSTATSV0)
4399 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4400 		else
4401 			version = pq->version;
4402 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4403 		if (error == 0) {
4404 			pq->scheduler = altq->scheduler;
4405 			pq->nbytes = nbytes;
4406 		}
4407 		break;
4408 	}
4409 #endif /* ALTQ */
4410 
4411 	case DIOCBEGINADDRS: {
4412 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4413 
4414 		error = pf_ioctl_begin_addrs(&pp->ticket);
4415 		break;
4416 	}
4417 
4418 	case DIOCADDADDR: {
4419 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4420 		struct pf_nl_pooladdr npp = {};
4421 
4422 		npp.which = PF_RDR;
4423 		memcpy(&npp, pp, sizeof(*pp));
4424 		error = pf_ioctl_add_addr(&npp);
4425 		break;
4426 	}
4427 
4428 	case DIOCGETADDRS: {
4429 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4430 		struct pf_nl_pooladdr npp = {};
4431 
4432 		npp.which = PF_RDR;
4433 		memcpy(&npp, pp, sizeof(*pp));
4434 		error = pf_ioctl_get_addrs(&npp);
4435 		memcpy(pp, &npp, sizeof(*pp));
4436 
4437 		break;
4438 	}
4439 
4440 	case DIOCGETADDR: {
4441 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4442 		struct pf_nl_pooladdr npp = {};
4443 
4444 		npp.which = PF_RDR;
4445 		memcpy(&npp, pp, sizeof(*pp));
4446 		error = pf_ioctl_get_addr(&npp);
4447 		memcpy(pp, &npp, sizeof(*pp));
4448 
4449 		break;
4450 	}
4451 
4452 	case DIOCCHANGEADDR: {
4453 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4454 		struct pf_kpool		*pool;
4455 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4456 		struct pf_kruleset	*ruleset;
4457 		struct pfi_kkif		*kif = NULL;
4458 
4459 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4460 
4461 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4462 		    pca->action > PF_CHANGE_REMOVE) {
4463 			error = EINVAL;
4464 			break;
4465 		}
4466 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4467 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4468 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4469 			error = EINVAL;
4470 			break;
4471 		}
4472 		if (pca->addr.addr.p.dyn != NULL) {
4473 			error = EINVAL;
4474 			break;
4475 		}
4476 
4477 		if (pca->action != PF_CHANGE_REMOVE) {
4478 #ifndef INET
4479 			if (pca->af == AF_INET) {
4480 				error = EAFNOSUPPORT;
4481 				break;
4482 			}
4483 #endif /* INET */
4484 #ifndef INET6
4485 			if (pca->af == AF_INET6) {
4486 				error = EAFNOSUPPORT;
4487 				break;
4488 			}
4489 #endif /* INET6 */
4490 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4491 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4492 			if (newpa->ifname[0])
4493 				kif = pf_kkif_create(M_WAITOK);
4494 			newpa->kif = NULL;
4495 		}
4496 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4497 		PF_RULES_WLOCK();
4498 		ruleset = pf_find_kruleset(pca->anchor);
4499 		if (ruleset == NULL)
4500 			ERROUT(EBUSY);
4501 
4502 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4503 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4504 		if (pool == NULL)
4505 			ERROUT(EBUSY);
4506 
4507 		if (pca->action != PF_CHANGE_REMOVE) {
4508 			if (newpa->ifname[0]) {
4509 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4510 				pfi_kkif_ref(newpa->kif);
4511 				kif = NULL;
4512 			}
4513 
4514 			switch (newpa->addr.type) {
4515 			case PF_ADDR_DYNIFTL:
4516 				error = pfi_dynaddr_setup(&newpa->addr,
4517 				    pca->af);
4518 				break;
4519 			case PF_ADDR_TABLE:
4520 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4521 				    newpa->addr.v.tblname);
4522 				if (newpa->addr.p.tbl == NULL)
4523 					error = ENOMEM;
4524 				break;
4525 			}
4526 			if (error)
4527 				goto DIOCCHANGEADDR_error;
4528 		}
4529 
4530 		switch (pca->action) {
4531 		case PF_CHANGE_ADD_HEAD:
4532 			oldpa = TAILQ_FIRST(&pool->list);
4533 			break;
4534 		case PF_CHANGE_ADD_TAIL:
4535 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4536 			break;
4537 		default:
4538 			oldpa = TAILQ_FIRST(&pool->list);
4539 			for (int i = 0; oldpa && i < pca->nr; i++)
4540 				oldpa = TAILQ_NEXT(oldpa, entries);
4541 
4542 			if (oldpa == NULL)
4543 				ERROUT(EINVAL);
4544 		}
4545 
4546 		if (pca->action == PF_CHANGE_REMOVE) {
4547 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4548 			switch (oldpa->addr.type) {
4549 			case PF_ADDR_DYNIFTL:
4550 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4551 				break;
4552 			case PF_ADDR_TABLE:
4553 				pfr_detach_table(oldpa->addr.p.tbl);
4554 				break;
4555 			}
4556 			if (oldpa->kif)
4557 				pfi_kkif_unref(oldpa->kif);
4558 			free(oldpa, M_PFRULE);
4559 		} else {
4560 			if (oldpa == NULL)
4561 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4562 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4563 			    pca->action == PF_CHANGE_ADD_BEFORE)
4564 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4565 			else
4566 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4567 				    newpa, entries);
4568 		}
4569 
4570 		pool->cur = TAILQ_FIRST(&pool->list);
4571 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4572 		PF_RULES_WUNLOCK();
4573 		break;
4574 
4575 #undef ERROUT
4576 DIOCCHANGEADDR_error:
4577 		if (newpa != NULL) {
4578 			if (newpa->kif)
4579 				pfi_kkif_unref(newpa->kif);
4580 			free(newpa, M_PFRULE);
4581 		}
4582 		PF_RULES_WUNLOCK();
4583 		pf_kkif_free(kif);
4584 		break;
4585 	}
4586 
4587 	case DIOCGETRULESETS: {
4588 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4589 
4590 		pr->path[sizeof(pr->path) - 1] = 0;
4591 
4592 		error = pf_ioctl_get_rulesets(pr);
4593 		break;
4594 	}
4595 
4596 	case DIOCGETRULESET: {
4597 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4598 
4599 		pr->path[sizeof(pr->path) - 1] = 0;
4600 
4601 		error = pf_ioctl_get_ruleset(pr);
4602 		break;
4603 	}
4604 
4605 	case DIOCRCLRTABLES: {
4606 		struct pfioc_table *io = (struct pfioc_table *)addr;
4607 
4608 		if (io->pfrio_esize != 0) {
4609 			error = ENODEV;
4610 			break;
4611 		}
4612 		PF_RULES_WLOCK();
4613 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4614 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4615 		PF_RULES_WUNLOCK();
4616 		break;
4617 	}
4618 
4619 	case DIOCRADDTABLES: {
4620 		struct pfioc_table *io = (struct pfioc_table *)addr;
4621 		struct pfr_table *pfrts;
4622 		size_t totlen;
4623 
4624 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4625 			error = ENODEV;
4626 			break;
4627 		}
4628 
4629 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4630 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4631 			error = ENOMEM;
4632 			break;
4633 		}
4634 
4635 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4636 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4637 		    M_TEMP, M_WAITOK);
4638 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4639 		if (error) {
4640 			free(pfrts, M_TEMP);
4641 			break;
4642 		}
4643 		PF_RULES_WLOCK();
4644 		error = pfr_add_tables(pfrts, io->pfrio_size,
4645 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4646 		PF_RULES_WUNLOCK();
4647 		free(pfrts, M_TEMP);
4648 		break;
4649 	}
4650 
4651 	case DIOCRDELTABLES: {
4652 		struct pfioc_table *io = (struct pfioc_table *)addr;
4653 		struct pfr_table *pfrts;
4654 		size_t totlen;
4655 
4656 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4657 			error = ENODEV;
4658 			break;
4659 		}
4660 
4661 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4662 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4663 			error = ENOMEM;
4664 			break;
4665 		}
4666 
4667 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4668 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4669 		    M_TEMP, M_WAITOK);
4670 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4671 		if (error) {
4672 			free(pfrts, M_TEMP);
4673 			break;
4674 		}
4675 		PF_RULES_WLOCK();
4676 		error = pfr_del_tables(pfrts, io->pfrio_size,
4677 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4678 		PF_RULES_WUNLOCK();
4679 		free(pfrts, M_TEMP);
4680 		break;
4681 	}
4682 
4683 	case DIOCRGETTABLES: {
4684 		struct pfioc_table *io = (struct pfioc_table *)addr;
4685 		struct pfr_table *pfrts;
4686 		size_t totlen;
4687 		int n;
4688 
4689 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4690 			error = ENODEV;
4691 			break;
4692 		}
4693 		PF_RULES_RLOCK();
4694 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4695 		if (n < 0) {
4696 			PF_RULES_RUNLOCK();
4697 			error = EINVAL;
4698 			break;
4699 		}
4700 		io->pfrio_size = min(io->pfrio_size, n);
4701 
4702 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4703 
4704 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4705 		    M_TEMP, M_NOWAIT | M_ZERO);
4706 		if (pfrts == NULL) {
4707 			error = ENOMEM;
4708 			PF_RULES_RUNLOCK();
4709 			break;
4710 		}
4711 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4712 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4713 		PF_RULES_RUNLOCK();
4714 		if (error == 0)
4715 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4716 		free(pfrts, M_TEMP);
4717 		break;
4718 	}
4719 
4720 	case DIOCRGETTSTATS: {
4721 		struct pfioc_table *io = (struct pfioc_table *)addr;
4722 		struct pfr_tstats *pfrtstats;
4723 		size_t totlen;
4724 		int n;
4725 
4726 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4727 			error = ENODEV;
4728 			break;
4729 		}
4730 		PF_TABLE_STATS_LOCK();
4731 		PF_RULES_RLOCK();
4732 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4733 		if (n < 0) {
4734 			PF_RULES_RUNLOCK();
4735 			PF_TABLE_STATS_UNLOCK();
4736 			error = EINVAL;
4737 			break;
4738 		}
4739 		io->pfrio_size = min(io->pfrio_size, n);
4740 
4741 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4742 		pfrtstats = mallocarray(io->pfrio_size,
4743 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4744 		if (pfrtstats == NULL) {
4745 			error = ENOMEM;
4746 			PF_RULES_RUNLOCK();
4747 			PF_TABLE_STATS_UNLOCK();
4748 			break;
4749 		}
4750 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4751 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4752 		PF_RULES_RUNLOCK();
4753 		PF_TABLE_STATS_UNLOCK();
4754 		if (error == 0)
4755 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4756 		free(pfrtstats, M_TEMP);
4757 		break;
4758 	}
4759 
4760 	case DIOCRCLRTSTATS: {
4761 		struct pfioc_table *io = (struct pfioc_table *)addr;
4762 		struct pfr_table *pfrts;
4763 		size_t totlen;
4764 
4765 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4766 			error = ENODEV;
4767 			break;
4768 		}
4769 
4770 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4771 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4772 			/* We used to count tables and use the minimum required
4773 			 * size, so we didn't fail on overly large requests.
4774 			 * Keep doing so. */
4775 			io->pfrio_size = pf_ioctl_maxcount;
4776 			break;
4777 		}
4778 
4779 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4780 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4781 		    M_TEMP, M_WAITOK);
4782 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4783 		if (error) {
4784 			free(pfrts, M_TEMP);
4785 			break;
4786 		}
4787 
4788 		PF_TABLE_STATS_LOCK();
4789 		PF_RULES_RLOCK();
4790 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4791 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4792 		PF_RULES_RUNLOCK();
4793 		PF_TABLE_STATS_UNLOCK();
4794 		free(pfrts, M_TEMP);
4795 		break;
4796 	}
4797 
4798 	case DIOCRSETTFLAGS: {
4799 		struct pfioc_table *io = (struct pfioc_table *)addr;
4800 		struct pfr_table *pfrts;
4801 		size_t totlen;
4802 		int n;
4803 
4804 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4805 			error = ENODEV;
4806 			break;
4807 		}
4808 
4809 		PF_RULES_RLOCK();
4810 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4811 		if (n < 0) {
4812 			PF_RULES_RUNLOCK();
4813 			error = EINVAL;
4814 			break;
4815 		}
4816 
4817 		io->pfrio_size = min(io->pfrio_size, n);
4818 		PF_RULES_RUNLOCK();
4819 
4820 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4821 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4822 		    M_TEMP, M_WAITOK);
4823 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4824 		if (error) {
4825 			free(pfrts, M_TEMP);
4826 			break;
4827 		}
4828 		PF_RULES_WLOCK();
4829 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4830 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4831 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4832 		PF_RULES_WUNLOCK();
4833 		free(pfrts, M_TEMP);
4834 		break;
4835 	}
4836 
4837 	case DIOCRCLRADDRS: {
4838 		struct pfioc_table *io = (struct pfioc_table *)addr;
4839 
4840 		if (io->pfrio_esize != 0) {
4841 			error = ENODEV;
4842 			break;
4843 		}
4844 		PF_RULES_WLOCK();
4845 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4846 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4847 		PF_RULES_WUNLOCK();
4848 		break;
4849 	}
4850 
4851 	case DIOCRADDADDRS: {
4852 		struct pfioc_table *io = (struct pfioc_table *)addr;
4853 		struct pfr_addr *pfras;
4854 		size_t totlen;
4855 
4856 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4857 			error = ENODEV;
4858 			break;
4859 		}
4860 		if (io->pfrio_size < 0 ||
4861 		    io->pfrio_size > pf_ioctl_maxcount ||
4862 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4863 			error = EINVAL;
4864 			break;
4865 		}
4866 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4867 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4868 		    M_TEMP, M_WAITOK);
4869 		error = copyin(io->pfrio_buffer, pfras, totlen);
4870 		if (error) {
4871 			free(pfras, M_TEMP);
4872 			break;
4873 		}
4874 		PF_RULES_WLOCK();
4875 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4876 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4877 		    PFR_FLAG_USERIOCTL);
4878 		PF_RULES_WUNLOCK();
4879 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4880 			error = copyout(pfras, io->pfrio_buffer, totlen);
4881 		free(pfras, M_TEMP);
4882 		break;
4883 	}
4884 
4885 	case DIOCRDELADDRS: {
4886 		struct pfioc_table *io = (struct pfioc_table *)addr;
4887 		struct pfr_addr *pfras;
4888 		size_t totlen;
4889 
4890 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4891 			error = ENODEV;
4892 			break;
4893 		}
4894 		if (io->pfrio_size < 0 ||
4895 		    io->pfrio_size > pf_ioctl_maxcount ||
4896 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4897 			error = EINVAL;
4898 			break;
4899 		}
4900 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4901 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4902 		    M_TEMP, M_WAITOK);
4903 		error = copyin(io->pfrio_buffer, pfras, totlen);
4904 		if (error) {
4905 			free(pfras, M_TEMP);
4906 			break;
4907 		}
4908 		PF_RULES_WLOCK();
4909 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4910 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4911 		    PFR_FLAG_USERIOCTL);
4912 		PF_RULES_WUNLOCK();
4913 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4914 			error = copyout(pfras, io->pfrio_buffer, totlen);
4915 		free(pfras, M_TEMP);
4916 		break;
4917 	}
4918 
4919 	case DIOCRSETADDRS: {
4920 		struct pfioc_table *io = (struct pfioc_table *)addr;
4921 		struct pfr_addr *pfras;
4922 		size_t totlen, count;
4923 
4924 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4925 			error = ENODEV;
4926 			break;
4927 		}
4928 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4929 			error = EINVAL;
4930 			break;
4931 		}
4932 		count = max(io->pfrio_size, io->pfrio_size2);
4933 		if (count > pf_ioctl_maxcount ||
4934 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4935 			error = EINVAL;
4936 			break;
4937 		}
4938 		totlen = count * sizeof(struct pfr_addr);
4939 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4940 		    M_WAITOK);
4941 		error = copyin(io->pfrio_buffer, pfras, totlen);
4942 		if (error) {
4943 			free(pfras, M_TEMP);
4944 			break;
4945 		}
4946 		PF_RULES_WLOCK();
4947 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4948 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4949 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4950 		    PFR_FLAG_USERIOCTL, 0);
4951 		PF_RULES_WUNLOCK();
4952 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4953 			error = copyout(pfras, io->pfrio_buffer, totlen);
4954 		free(pfras, M_TEMP);
4955 		break;
4956 	}
4957 
4958 	case DIOCRGETADDRS: {
4959 		struct pfioc_table *io = (struct pfioc_table *)addr;
4960 		struct pfr_addr *pfras;
4961 		size_t totlen;
4962 
4963 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4964 			error = ENODEV;
4965 			break;
4966 		}
4967 		if (io->pfrio_size < 0 ||
4968 		    io->pfrio_size > pf_ioctl_maxcount ||
4969 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4970 			error = EINVAL;
4971 			break;
4972 		}
4973 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4974 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4975 		    M_TEMP, M_WAITOK | M_ZERO);
4976 		PF_RULES_RLOCK();
4977 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4978 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4979 		PF_RULES_RUNLOCK();
4980 		if (error == 0)
4981 			error = copyout(pfras, io->pfrio_buffer, totlen);
4982 		free(pfras, M_TEMP);
4983 		break;
4984 	}
4985 
4986 	case DIOCRGETASTATS: {
4987 		struct pfioc_table *io = (struct pfioc_table *)addr;
4988 		struct pfr_astats *pfrastats;
4989 		size_t totlen;
4990 
4991 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4992 			error = ENODEV;
4993 			break;
4994 		}
4995 		if (io->pfrio_size < 0 ||
4996 		    io->pfrio_size > pf_ioctl_maxcount ||
4997 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
4998 			error = EINVAL;
4999 			break;
5000 		}
5001 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5002 		pfrastats = mallocarray(io->pfrio_size,
5003 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5004 		PF_RULES_RLOCK();
5005 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5006 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5007 		PF_RULES_RUNLOCK();
5008 		if (error == 0)
5009 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5010 		free(pfrastats, M_TEMP);
5011 		break;
5012 	}
5013 
5014 	case DIOCRCLRASTATS: {
5015 		struct pfioc_table *io = (struct pfioc_table *)addr;
5016 		struct pfr_addr *pfras;
5017 		size_t totlen;
5018 
5019 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5020 			error = ENODEV;
5021 			break;
5022 		}
5023 		if (io->pfrio_size < 0 ||
5024 		    io->pfrio_size > pf_ioctl_maxcount ||
5025 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5026 			error = EINVAL;
5027 			break;
5028 		}
5029 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5030 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5031 		    M_TEMP, M_WAITOK);
5032 		error = copyin(io->pfrio_buffer, pfras, totlen);
5033 		if (error) {
5034 			free(pfras, M_TEMP);
5035 			break;
5036 		}
5037 		PF_RULES_WLOCK();
5038 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5039 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5040 		    PFR_FLAG_USERIOCTL);
5041 		PF_RULES_WUNLOCK();
5042 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5043 			error = copyout(pfras, io->pfrio_buffer, totlen);
5044 		free(pfras, M_TEMP);
5045 		break;
5046 	}
5047 
5048 	case DIOCRTSTADDRS: {
5049 		struct pfioc_table *io = (struct pfioc_table *)addr;
5050 		struct pfr_addr *pfras;
5051 		size_t totlen;
5052 
5053 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5054 			error = ENODEV;
5055 			break;
5056 		}
5057 		if (io->pfrio_size < 0 ||
5058 		    io->pfrio_size > pf_ioctl_maxcount ||
5059 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5060 			error = EINVAL;
5061 			break;
5062 		}
5063 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5064 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5065 		    M_TEMP, M_WAITOK);
5066 		error = copyin(io->pfrio_buffer, pfras, totlen);
5067 		if (error) {
5068 			free(pfras, M_TEMP);
5069 			break;
5070 		}
5071 		PF_RULES_RLOCK();
5072 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5073 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5074 		    PFR_FLAG_USERIOCTL);
5075 		PF_RULES_RUNLOCK();
5076 		if (error == 0)
5077 			error = copyout(pfras, io->pfrio_buffer, totlen);
5078 		free(pfras, M_TEMP);
5079 		break;
5080 	}
5081 
5082 	case DIOCRINADEFINE: {
5083 		struct pfioc_table *io = (struct pfioc_table *)addr;
5084 		struct pfr_addr *pfras;
5085 		size_t totlen;
5086 
5087 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5088 			error = ENODEV;
5089 			break;
5090 		}
5091 		if (io->pfrio_size < 0 ||
5092 		    io->pfrio_size > pf_ioctl_maxcount ||
5093 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5094 			error = EINVAL;
5095 			break;
5096 		}
5097 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5098 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5099 		    M_TEMP, M_WAITOK);
5100 		error = copyin(io->pfrio_buffer, pfras, totlen);
5101 		if (error) {
5102 			free(pfras, M_TEMP);
5103 			break;
5104 		}
5105 		PF_RULES_WLOCK();
5106 		error = pfr_ina_define(&io->pfrio_table, pfras,
5107 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5108 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5109 		PF_RULES_WUNLOCK();
5110 		free(pfras, M_TEMP);
5111 		break;
5112 	}
5113 
5114 	case DIOCOSFPADD: {
5115 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5116 		PF_RULES_WLOCK();
5117 		error = pf_osfp_add(io);
5118 		PF_RULES_WUNLOCK();
5119 		break;
5120 	}
5121 
5122 	case DIOCOSFPGET: {
5123 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5124 		PF_RULES_RLOCK();
5125 		error = pf_osfp_get(io);
5126 		PF_RULES_RUNLOCK();
5127 		break;
5128 	}
5129 
5130 	case DIOCXBEGIN: {
5131 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5132 		struct pfioc_trans_e	*ioes, *ioe;
5133 		size_t			 totlen;
5134 		int			 i;
5135 
5136 		if (io->esize != sizeof(*ioe)) {
5137 			error = ENODEV;
5138 			break;
5139 		}
5140 		if (io->size < 0 ||
5141 		    io->size > pf_ioctl_maxcount ||
5142 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5143 			error = EINVAL;
5144 			break;
5145 		}
5146 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5147 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5148 		    M_TEMP, M_WAITOK);
5149 		error = copyin(io->array, ioes, totlen);
5150 		if (error) {
5151 			free(ioes, M_TEMP);
5152 			break;
5153 		}
5154 		/* Ensure there's no more ethernet rules to clean up. */
5155 		NET_EPOCH_DRAIN_CALLBACKS();
5156 		PF_RULES_WLOCK();
5157 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5158 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5159 			switch (ioe->rs_num) {
5160 			case PF_RULESET_ETH:
5161 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5162 					PF_RULES_WUNLOCK();
5163 					free(ioes, M_TEMP);
5164 					goto fail;
5165 				}
5166 				break;
5167 #ifdef ALTQ
5168 			case PF_RULESET_ALTQ:
5169 				if (ioe->anchor[0]) {
5170 					PF_RULES_WUNLOCK();
5171 					free(ioes, M_TEMP);
5172 					error = EINVAL;
5173 					goto fail;
5174 				}
5175 				if ((error = pf_begin_altq(&ioe->ticket))) {
5176 					PF_RULES_WUNLOCK();
5177 					free(ioes, M_TEMP);
5178 					goto fail;
5179 				}
5180 				break;
5181 #endif /* ALTQ */
5182 			case PF_RULESET_TABLE:
5183 			    {
5184 				struct pfr_table table;
5185 
5186 				bzero(&table, sizeof(table));
5187 				strlcpy(table.pfrt_anchor, ioe->anchor,
5188 				    sizeof(table.pfrt_anchor));
5189 				if ((error = pfr_ina_begin(&table,
5190 				    &ioe->ticket, NULL, 0))) {
5191 					PF_RULES_WUNLOCK();
5192 					free(ioes, M_TEMP);
5193 					goto fail;
5194 				}
5195 				break;
5196 			    }
5197 			default:
5198 				if ((error = pf_begin_rules(&ioe->ticket,
5199 				    ioe->rs_num, ioe->anchor))) {
5200 					PF_RULES_WUNLOCK();
5201 					free(ioes, M_TEMP);
5202 					goto fail;
5203 				}
5204 				break;
5205 			}
5206 		}
5207 		PF_RULES_WUNLOCK();
5208 		error = copyout(ioes, io->array, totlen);
5209 		free(ioes, M_TEMP);
5210 		break;
5211 	}
5212 
5213 	case DIOCXROLLBACK: {
5214 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5215 		struct pfioc_trans_e	*ioe, *ioes;
5216 		size_t			 totlen;
5217 		int			 i;
5218 
5219 		if (io->esize != sizeof(*ioe)) {
5220 			error = ENODEV;
5221 			break;
5222 		}
5223 		if (io->size < 0 ||
5224 		    io->size > pf_ioctl_maxcount ||
5225 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5226 			error = EINVAL;
5227 			break;
5228 		}
5229 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5230 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5231 		    M_TEMP, M_WAITOK);
5232 		error = copyin(io->array, ioes, totlen);
5233 		if (error) {
5234 			free(ioes, M_TEMP);
5235 			break;
5236 		}
5237 		PF_RULES_WLOCK();
5238 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5239 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5240 			switch (ioe->rs_num) {
5241 			case PF_RULESET_ETH:
5242 				if ((error = pf_rollback_eth(ioe->ticket,
5243 				    ioe->anchor))) {
5244 					PF_RULES_WUNLOCK();
5245 					free(ioes, M_TEMP);
5246 					goto fail; /* really bad */
5247 				}
5248 				break;
5249 #ifdef ALTQ
5250 			case PF_RULESET_ALTQ:
5251 				if (ioe->anchor[0]) {
5252 					PF_RULES_WUNLOCK();
5253 					free(ioes, M_TEMP);
5254 					error = EINVAL;
5255 					goto fail;
5256 				}
5257 				if ((error = pf_rollback_altq(ioe->ticket))) {
5258 					PF_RULES_WUNLOCK();
5259 					free(ioes, M_TEMP);
5260 					goto fail; /* really bad */
5261 				}
5262 				break;
5263 #endif /* ALTQ */
5264 			case PF_RULESET_TABLE:
5265 			    {
5266 				struct pfr_table table;
5267 
5268 				bzero(&table, sizeof(table));
5269 				strlcpy(table.pfrt_anchor, ioe->anchor,
5270 				    sizeof(table.pfrt_anchor));
5271 				if ((error = pfr_ina_rollback(&table,
5272 				    ioe->ticket, NULL, 0))) {
5273 					PF_RULES_WUNLOCK();
5274 					free(ioes, M_TEMP);
5275 					goto fail; /* really bad */
5276 				}
5277 				break;
5278 			    }
5279 			default:
5280 				if ((error = pf_rollback_rules(ioe->ticket,
5281 				    ioe->rs_num, ioe->anchor))) {
5282 					PF_RULES_WUNLOCK();
5283 					free(ioes, M_TEMP);
5284 					goto fail; /* really bad */
5285 				}
5286 				break;
5287 			}
5288 		}
5289 		PF_RULES_WUNLOCK();
5290 		free(ioes, M_TEMP);
5291 		break;
5292 	}
5293 
5294 	case DIOCXCOMMIT: {
5295 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5296 		struct pfioc_trans_e	*ioe, *ioes;
5297 		struct pf_kruleset	*rs;
5298 		struct pf_keth_ruleset	*ers;
5299 		size_t			 totlen;
5300 		int			 i;
5301 
5302 		if (io->esize != sizeof(*ioe)) {
5303 			error = ENODEV;
5304 			break;
5305 		}
5306 
5307 		if (io->size < 0 ||
5308 		    io->size > pf_ioctl_maxcount ||
5309 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5310 			error = EINVAL;
5311 			break;
5312 		}
5313 
5314 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5315 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5316 		    M_TEMP, M_WAITOK);
5317 		error = copyin(io->array, ioes, totlen);
5318 		if (error) {
5319 			free(ioes, M_TEMP);
5320 			break;
5321 		}
5322 		PF_RULES_WLOCK();
5323 		/* First makes sure everything will succeed. */
5324 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5325 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5326 			switch (ioe->rs_num) {
5327 			case PF_RULESET_ETH:
5328 				ers = pf_find_keth_ruleset(ioe->anchor);
5329 				if (ers == NULL || ioe->ticket == 0 ||
5330 				    ioe->ticket != ers->inactive.ticket) {
5331 					PF_RULES_WUNLOCK();
5332 					free(ioes, M_TEMP);
5333 					error = EINVAL;
5334 					goto fail;
5335 				}
5336 				break;
5337 #ifdef ALTQ
5338 			case PF_RULESET_ALTQ:
5339 				if (ioe->anchor[0]) {
5340 					PF_RULES_WUNLOCK();
5341 					free(ioes, M_TEMP);
5342 					error = EINVAL;
5343 					goto fail;
5344 				}
5345 				if (!V_altqs_inactive_open || ioe->ticket !=
5346 				    V_ticket_altqs_inactive) {
5347 					PF_RULES_WUNLOCK();
5348 					free(ioes, M_TEMP);
5349 					error = EBUSY;
5350 					goto fail;
5351 				}
5352 				break;
5353 #endif /* ALTQ */
5354 			case PF_RULESET_TABLE:
5355 				rs = pf_find_kruleset(ioe->anchor);
5356 				if (rs == NULL || !rs->topen || ioe->ticket !=
5357 				    rs->tticket) {
5358 					PF_RULES_WUNLOCK();
5359 					free(ioes, M_TEMP);
5360 					error = EBUSY;
5361 					goto fail;
5362 				}
5363 				break;
5364 			default:
5365 				if (ioe->rs_num < 0 || ioe->rs_num >=
5366 				    PF_RULESET_MAX) {
5367 					PF_RULES_WUNLOCK();
5368 					free(ioes, M_TEMP);
5369 					error = EINVAL;
5370 					goto fail;
5371 				}
5372 				rs = pf_find_kruleset(ioe->anchor);
5373 				if (rs == NULL ||
5374 				    !rs->rules[ioe->rs_num].inactive.open ||
5375 				    rs->rules[ioe->rs_num].inactive.ticket !=
5376 				    ioe->ticket) {
5377 					PF_RULES_WUNLOCK();
5378 					free(ioes, M_TEMP);
5379 					error = EBUSY;
5380 					goto fail;
5381 				}
5382 				break;
5383 			}
5384 		}
5385 		/* Now do the commit - no errors should happen here. */
5386 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5387 			switch (ioe->rs_num) {
5388 			case PF_RULESET_ETH:
5389 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5390 					PF_RULES_WUNLOCK();
5391 					free(ioes, M_TEMP);
5392 					goto fail; /* really bad */
5393 				}
5394 				break;
5395 #ifdef ALTQ
5396 			case PF_RULESET_ALTQ:
5397 				if ((error = pf_commit_altq(ioe->ticket))) {
5398 					PF_RULES_WUNLOCK();
5399 					free(ioes, M_TEMP);
5400 					goto fail; /* really bad */
5401 				}
5402 				break;
5403 #endif /* ALTQ */
5404 			case PF_RULESET_TABLE:
5405 			    {
5406 				struct pfr_table table;
5407 
5408 				bzero(&table, sizeof(table));
5409 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5410 				    sizeof(table.pfrt_anchor));
5411 				if ((error = pfr_ina_commit(&table,
5412 				    ioe->ticket, NULL, NULL, 0))) {
5413 					PF_RULES_WUNLOCK();
5414 					free(ioes, M_TEMP);
5415 					goto fail; /* really bad */
5416 				}
5417 				break;
5418 			    }
5419 			default:
5420 				if ((error = pf_commit_rules(ioe->ticket,
5421 				    ioe->rs_num, ioe->anchor))) {
5422 					PF_RULES_WUNLOCK();
5423 					free(ioes, M_TEMP);
5424 					goto fail; /* really bad */
5425 				}
5426 				break;
5427 			}
5428 		}
5429 		PF_RULES_WUNLOCK();
5430 
5431 		/* Only hook into EtherNet taffic if we've got rules for it. */
5432 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5433 			hook_pf_eth();
5434 		else
5435 			dehook_pf_eth();
5436 
5437 		free(ioes, M_TEMP);
5438 		break;
5439 	}
5440 
5441 	case DIOCGETSRCNODES: {
5442 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5443 		struct pf_srchash	*sh;
5444 		struct pf_ksrc_node	*n;
5445 		struct pf_src_node	*p, *pstore;
5446 		uint32_t		 i, nr = 0;
5447 
5448 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5449 				i++, sh++) {
5450 			PF_HASHROW_LOCK(sh);
5451 			LIST_FOREACH(n, &sh->nodes, entry)
5452 				nr++;
5453 			PF_HASHROW_UNLOCK(sh);
5454 		}
5455 
5456 		psn->psn_len = min(psn->psn_len,
5457 		    sizeof(struct pf_src_node) * nr);
5458 
5459 		if (psn->psn_len == 0) {
5460 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5461 			break;
5462 		}
5463 
5464 		nr = 0;
5465 
5466 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5467 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5468 		    i++, sh++) {
5469 		    PF_HASHROW_LOCK(sh);
5470 		    LIST_FOREACH(n, &sh->nodes, entry) {
5471 
5472 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5473 				break;
5474 
5475 			pf_src_node_copy(n, p);
5476 
5477 			p++;
5478 			nr++;
5479 		    }
5480 		    PF_HASHROW_UNLOCK(sh);
5481 		}
5482 		error = copyout(pstore, psn->psn_src_nodes,
5483 		    sizeof(struct pf_src_node) * nr);
5484 		if (error) {
5485 			free(pstore, M_TEMP);
5486 			break;
5487 		}
5488 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5489 		free(pstore, M_TEMP);
5490 		break;
5491 	}
5492 
5493 	case DIOCCLRSRCNODES: {
5494 		pf_kill_srcnodes(NULL);
5495 		break;
5496 	}
5497 
5498 	case DIOCKILLSRCNODES:
5499 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5500 		break;
5501 
5502 #ifdef COMPAT_FREEBSD13
5503 	case DIOCKEEPCOUNTERS_FREEBSD13:
5504 #endif
5505 	case DIOCKEEPCOUNTERS:
5506 		error = pf_keepcounters((struct pfioc_nv *)addr);
5507 		break;
5508 
5509 	case DIOCGETSYNCOOKIES:
5510 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5511 		break;
5512 
5513 	case DIOCSETSYNCOOKIES:
5514 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5515 		break;
5516 
5517 	case DIOCSETHOSTID: {
5518 		u_int32_t	*hostid = (u_int32_t *)addr;
5519 
5520 		PF_RULES_WLOCK();
5521 		if (*hostid == 0)
5522 			V_pf_status.hostid = arc4random();
5523 		else
5524 			V_pf_status.hostid = *hostid;
5525 		PF_RULES_WUNLOCK();
5526 		break;
5527 	}
5528 
5529 	case DIOCOSFPFLUSH:
5530 		PF_RULES_WLOCK();
5531 		pf_osfp_flush();
5532 		PF_RULES_WUNLOCK();
5533 		break;
5534 
5535 	case DIOCIGETIFACES: {
5536 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5537 		struct pfi_kif *ifstore;
5538 		size_t bufsiz;
5539 
5540 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5541 			error = ENODEV;
5542 			break;
5543 		}
5544 
5545 		if (io->pfiio_size < 0 ||
5546 		    io->pfiio_size > pf_ioctl_maxcount ||
5547 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5548 			error = EINVAL;
5549 			break;
5550 		}
5551 
5552 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5553 
5554 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5555 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5556 		    M_TEMP, M_WAITOK | M_ZERO);
5557 
5558 		PF_RULES_RLOCK();
5559 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5560 		PF_RULES_RUNLOCK();
5561 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5562 		free(ifstore, M_TEMP);
5563 		break;
5564 	}
5565 
5566 	case DIOCSETIFFLAG: {
5567 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5568 
5569 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5570 
5571 		PF_RULES_WLOCK();
5572 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5573 		PF_RULES_WUNLOCK();
5574 		break;
5575 	}
5576 
5577 	case DIOCCLRIFFLAG: {
5578 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5579 
5580 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5581 
5582 		PF_RULES_WLOCK();
5583 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5584 		PF_RULES_WUNLOCK();
5585 		break;
5586 	}
5587 
5588 	case DIOCSETREASS: {
5589 		u_int32_t	*reass = (u_int32_t *)addr;
5590 
5591 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5592 		/* Removal of DF flag without reassembly enabled is not a
5593 		 * valid combination. Disable reassembly in such case. */
5594 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5595 			V_pf_status.reass = 0;
5596 		break;
5597 	}
5598 
5599 	default:
5600 		error = ENODEV;
5601 		break;
5602 	}
5603 fail:
5604 	CURVNET_RESTORE();
5605 
5606 #undef ERROUT_IOCTL
5607 
5608 	return (error);
5609 }
5610 
5611 void
5612 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5613 {
5614 	bzero(sp, sizeof(union pfsync_state_union));
5615 
5616 	/* copy from state key */
5617 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5618 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5619 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5620 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5621 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5622 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5623 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5624 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5625 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5626 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5627 
5628 	/* copy from state */
5629 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5630 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5631 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5632 	sp->pfs_1301.expire = pf_state_expires(st);
5633 	if (sp->pfs_1301.expire <= time_uptime)
5634 		sp->pfs_1301.expire = htonl(0);
5635 	else
5636 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5637 
5638 	sp->pfs_1301.direction = st->direction;
5639 	sp->pfs_1301.log = st->act.log;
5640 	sp->pfs_1301.timeout = st->timeout;
5641 
5642 	switch (msg_version) {
5643 		case PFSYNC_MSG_VERSION_1301:
5644 			sp->pfs_1301.state_flags = st->state_flags;
5645 			break;
5646 		case PFSYNC_MSG_VERSION_1400:
5647 			sp->pfs_1400.state_flags = htons(st->state_flags);
5648 			sp->pfs_1400.qid = htons(st->act.qid);
5649 			sp->pfs_1400.pqid = htons(st->act.pqid);
5650 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5651 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5652 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5653 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5654 			sp->pfs_1400.set_tos = st->act.set_tos;
5655 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5656 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5657 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5658 			sp->pfs_1400.rt = st->act.rt;
5659 			if (st->act.rt_kif)
5660 				strlcpy(sp->pfs_1400.rt_ifname,
5661 				    st->act.rt_kif->pfik_name,
5662 				    sizeof(sp->pfs_1400.rt_ifname));
5663 			break;
5664 		default:
5665 			panic("%s: Unsupported pfsync_msg_version %d",
5666 			    __func__, msg_version);
5667 	}
5668 
5669 	if (st->src_node)
5670 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5671 	if (st->nat_src_node)
5672 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5673 
5674 	sp->pfs_1301.id = st->id;
5675 	sp->pfs_1301.creatorid = st->creatorid;
5676 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5677 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5678 
5679 	if (st->rule == NULL)
5680 		sp->pfs_1301.rule = htonl(-1);
5681 	else
5682 		sp->pfs_1301.rule = htonl(st->rule->nr);
5683 	if (st->anchor == NULL)
5684 		sp->pfs_1301.anchor = htonl(-1);
5685 	else
5686 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5687 	if (st->nat_rule == NULL)
5688 		sp->pfs_1301.nat_rule = htonl(-1);
5689 	else
5690 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5691 
5692 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5693 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5694 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5695 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5696 }
5697 
5698 void
5699 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5700 {
5701 	bzero(sp, sizeof(*sp));
5702 
5703 	sp->version = PF_STATE_VERSION;
5704 
5705 	/* copy from state key */
5706 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5707 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5708 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5709 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5710 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5711 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5712 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5713 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5714 	sp->proto = st->key[PF_SK_WIRE]->proto;
5715 	sp->af = st->key[PF_SK_WIRE]->af;
5716 
5717 	/* copy from state */
5718 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5719 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5720 	    sizeof(sp->orig_ifname));
5721 	bcopy(&st->act.rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5722 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5723 	sp->expire = pf_state_expires(st);
5724 	if (sp->expire <= time_uptime)
5725 		sp->expire = htonl(0);
5726 	else
5727 		sp->expire = htonl(sp->expire - time_uptime);
5728 
5729 	sp->direction = st->direction;
5730 	sp->log = st->act.log;
5731 	sp->timeout = st->timeout;
5732 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5733 	sp->state_flags_compat = st->state_flags;
5734 	sp->state_flags = htons(st->state_flags);
5735 	if (st->src_node)
5736 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5737 	if (st->nat_src_node)
5738 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5739 
5740 	sp->id = st->id;
5741 	sp->creatorid = st->creatorid;
5742 	pf_state_peer_hton(&st->src, &sp->src);
5743 	pf_state_peer_hton(&st->dst, &sp->dst);
5744 
5745 	if (st->rule == NULL)
5746 		sp->rule = htonl(-1);
5747 	else
5748 		sp->rule = htonl(st->rule->nr);
5749 	if (st->anchor == NULL)
5750 		sp->anchor = htonl(-1);
5751 	else
5752 		sp->anchor = htonl(st->anchor->nr);
5753 	if (st->nat_rule == NULL)
5754 		sp->nat_rule = htonl(-1);
5755 	else
5756 		sp->nat_rule = htonl(st->nat_rule->nr);
5757 
5758 	sp->packets[0] = st->packets[0];
5759 	sp->packets[1] = st->packets[1];
5760 	sp->bytes[0] = st->bytes[0];
5761 	sp->bytes[1] = st->bytes[1];
5762 
5763 	sp->qid = htons(st->act.qid);
5764 	sp->pqid = htons(st->act.pqid);
5765 	sp->dnpipe = htons(st->act.dnpipe);
5766 	sp->dnrpipe = htons(st->act.dnrpipe);
5767 	sp->rtableid = htonl(st->act.rtableid);
5768 	sp->min_ttl = st->act.min_ttl;
5769 	sp->set_tos = st->act.set_tos;
5770 	sp->max_mss = htons(st->act.max_mss);
5771 	sp->rt = st->act.rt;
5772 	if (st->act.rt_kif)
5773 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5774 		    sizeof(sp->rt_ifname));
5775 	sp->set_prio[0] = st->act.set_prio[0];
5776 	sp->set_prio[1] = st->act.set_prio[1];
5777 
5778 }
5779 
5780 static void
5781 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5782 {
5783 	struct pfr_ktable *kt;
5784 
5785 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5786 
5787 	kt = aw->p.tbl;
5788 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5789 		kt = kt->pfrkt_root;
5790 	aw->p.tbl = NULL;
5791 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5792 		kt->pfrkt_cnt : -1;
5793 }
5794 
5795 static int
5796 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5797     size_t number, char **names)
5798 {
5799 	nvlist_t        *nvc;
5800 
5801 	nvc = nvlist_create(0);
5802 	if (nvc == NULL)
5803 		return (ENOMEM);
5804 
5805 	for (int i = 0; i < number; i++) {
5806 		nvlist_append_number_array(nvc, "counters",
5807 		    counter_u64_fetch(counters[i]));
5808 		nvlist_append_string_array(nvc, "names",
5809 		    names[i]);
5810 		nvlist_append_number_array(nvc, "ids",
5811 		    i);
5812 	}
5813 	nvlist_add_nvlist(nvl, name, nvc);
5814 	nvlist_destroy(nvc);
5815 
5816 	return (0);
5817 }
5818 
5819 static int
5820 pf_getstatus(struct pfioc_nv *nv)
5821 {
5822 	nvlist_t        *nvl = NULL, *nvc = NULL;
5823 	void            *nvlpacked = NULL;
5824 	int              error;
5825 	struct pf_status s;
5826 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5827 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5828 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5829 	PF_RULES_RLOCK_TRACKER;
5830 
5831 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5832 
5833 	PF_RULES_RLOCK();
5834 
5835 	nvl = nvlist_create(0);
5836 	if (nvl == NULL)
5837 		ERROUT(ENOMEM);
5838 
5839 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5840 	nvlist_add_number(nvl, "since", V_pf_status.since);
5841 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5842 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5843 	nvlist_add_number(nvl, "states", V_pf_status.states);
5844 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5845 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5846 	nvlist_add_bool(nvl, "syncookies_active",
5847 	    V_pf_status.syncookies_active);
5848 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5849 
5850 	/* counters */
5851 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5852 	    PFRES_MAX, pf_reasons);
5853 	if (error != 0)
5854 		ERROUT(error);
5855 
5856 	/* lcounters */
5857 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5858 	    KLCNT_MAX, pf_lcounter);
5859 	if (error != 0)
5860 		ERROUT(error);
5861 
5862 	/* fcounters */
5863 	nvc = nvlist_create(0);
5864 	if (nvc == NULL)
5865 		ERROUT(ENOMEM);
5866 
5867 	for (int i = 0; i < FCNT_MAX; i++) {
5868 		nvlist_append_number_array(nvc, "counters",
5869 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5870 		nvlist_append_string_array(nvc, "names",
5871 		    pf_fcounter[i]);
5872 		nvlist_append_number_array(nvc, "ids",
5873 		    i);
5874 	}
5875 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5876 	nvlist_destroy(nvc);
5877 	nvc = NULL;
5878 
5879 	/* scounters */
5880 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5881 	    SCNT_MAX, pf_fcounter);
5882 	if (error != 0)
5883 		ERROUT(error);
5884 
5885 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5886 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5887 	    PF_MD5_DIGEST_LENGTH);
5888 
5889 	pfi_update_status(V_pf_status.ifname, &s);
5890 
5891 	/* pcounters / bcounters */
5892 	for (int i = 0; i < 2; i++) {
5893 		for (int j = 0; j < 2; j++) {
5894 			for (int k = 0; k < 2; k++) {
5895 				nvlist_append_number_array(nvl, "pcounters",
5896 				    s.pcounters[i][j][k]);
5897 			}
5898 			nvlist_append_number_array(nvl, "bcounters",
5899 			    s.bcounters[i][j]);
5900 		}
5901 	}
5902 
5903 	nvlpacked = nvlist_pack(nvl, &nv->len);
5904 	if (nvlpacked == NULL)
5905 		ERROUT(ENOMEM);
5906 
5907 	if (nv->size == 0)
5908 		ERROUT(0);
5909 	else if (nv->size < nv->len)
5910 		ERROUT(ENOSPC);
5911 
5912 	PF_RULES_RUNLOCK();
5913 	error = copyout(nvlpacked, nv->data, nv->len);
5914 	goto done;
5915 
5916 #undef ERROUT
5917 errout:
5918 	PF_RULES_RUNLOCK();
5919 done:
5920 	free(nvlpacked, M_NVLIST);
5921 	nvlist_destroy(nvc);
5922 	nvlist_destroy(nvl);
5923 
5924 	return (error);
5925 }
5926 
5927 /*
5928  * XXX - Check for version mismatch!!!
5929  */
5930 static void
5931 pf_clear_all_states(void)
5932 {
5933 	struct epoch_tracker	 et;
5934 	struct pf_kstate	*s;
5935 	u_int i;
5936 
5937 	NET_EPOCH_ENTER(et);
5938 	for (i = 0; i <= V_pf_hashmask; i++) {
5939 		struct pf_idhash *ih = &V_pf_idhash[i];
5940 relock:
5941 		PF_HASHROW_LOCK(ih);
5942 		LIST_FOREACH(s, &ih->states, entry) {
5943 			s->timeout = PFTM_PURGE;
5944 			/* Don't send out individual delete messages. */
5945 			s->state_flags |= PFSTATE_NOSYNC;
5946 			pf_unlink_state(s);
5947 			goto relock;
5948 		}
5949 		PF_HASHROW_UNLOCK(ih);
5950 	}
5951 	NET_EPOCH_EXIT(et);
5952 }
5953 
5954 static int
5955 pf_clear_tables(void)
5956 {
5957 	struct pfioc_table io;
5958 	int error;
5959 
5960 	bzero(&io, sizeof(io));
5961 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5962 
5963 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5964 	    io.pfrio_flags);
5965 
5966 	return (error);
5967 }
5968 
5969 static void
5970 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5971 {
5972 	struct pf_ksrc_node_list	 kill;
5973 	u_int 				 killed;
5974 
5975 	LIST_INIT(&kill);
5976 	for (int i = 0; i <= V_pf_srchashmask; i++) {
5977 		struct pf_srchash *sh = &V_pf_srchash[i];
5978 		struct pf_ksrc_node *sn, *tmp;
5979 
5980 		PF_HASHROW_LOCK(sh);
5981 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5982 			if (psnk == NULL ||
5983 			    (PF_MATCHA(psnk->psnk_src.neg,
5984 			      &psnk->psnk_src.addr.v.a.addr,
5985 			      &psnk->psnk_src.addr.v.a.mask,
5986 			      &sn->addr, sn->af) &&
5987 			    PF_MATCHA(psnk->psnk_dst.neg,
5988 			      &psnk->psnk_dst.addr.v.a.addr,
5989 			      &psnk->psnk_dst.addr.v.a.mask,
5990 			      &sn->raddr, sn->af))) {
5991 				pf_unlink_src_node(sn);
5992 				LIST_INSERT_HEAD(&kill, sn, entry);
5993 				sn->expire = 1;
5994 			}
5995 		PF_HASHROW_UNLOCK(sh);
5996 	}
5997 
5998 	for (int i = 0; i <= V_pf_hashmask; i++) {
5999 		struct pf_idhash *ih = &V_pf_idhash[i];
6000 		struct pf_kstate *s;
6001 
6002 		PF_HASHROW_LOCK(ih);
6003 		LIST_FOREACH(s, &ih->states, entry) {
6004 			if (s->src_node && s->src_node->expire == 1)
6005 				s->src_node = NULL;
6006 			if (s->nat_src_node && s->nat_src_node->expire == 1)
6007 				s->nat_src_node = NULL;
6008 		}
6009 		PF_HASHROW_UNLOCK(ih);
6010 	}
6011 
6012 	killed = pf_free_src_nodes(&kill);
6013 
6014 	if (psnk != NULL)
6015 		psnk->psnk_killed = killed;
6016 }
6017 
6018 static int
6019 pf_keepcounters(struct pfioc_nv *nv)
6020 {
6021 	nvlist_t	*nvl = NULL;
6022 	void		*nvlpacked = NULL;
6023 	int		 error = 0;
6024 
6025 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6026 
6027 	if (nv->len > pf_ioctl_maxcount)
6028 		ERROUT(ENOMEM);
6029 
6030 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6031 	error = copyin(nv->data, nvlpacked, nv->len);
6032 	if (error)
6033 		ERROUT(error);
6034 
6035 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6036 	if (nvl == NULL)
6037 		ERROUT(EBADMSG);
6038 
6039 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6040 		ERROUT(EBADMSG);
6041 
6042 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6043 
6044 on_error:
6045 	nvlist_destroy(nvl);
6046 	free(nvlpacked, M_NVLIST);
6047 	return (error);
6048 }
6049 
6050 unsigned int
6051 pf_clear_states(const struct pf_kstate_kill *kill)
6052 {
6053 	struct pf_state_key_cmp	 match_key;
6054 	struct pf_kstate	*s;
6055 	struct pfi_kkif	*kif;
6056 	int		 idx;
6057 	unsigned int	 killed = 0, dir;
6058 
6059 	NET_EPOCH_ASSERT();
6060 
6061 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6062 		struct pf_idhash *ih = &V_pf_idhash[i];
6063 
6064 relock_DIOCCLRSTATES:
6065 		PF_HASHROW_LOCK(ih);
6066 		LIST_FOREACH(s, &ih->states, entry) {
6067 			/* For floating states look at the original kif. */
6068 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6069 
6070 			if (kill->psk_ifname[0] &&
6071 			    strcmp(kill->psk_ifname,
6072 			    kif->pfik_name))
6073 				continue;
6074 
6075 			if (kill->psk_kill_match) {
6076 				bzero(&match_key, sizeof(match_key));
6077 
6078 				if (s->direction == PF_OUT) {
6079 					dir = PF_IN;
6080 					idx = PF_SK_STACK;
6081 				} else {
6082 					dir = PF_OUT;
6083 					idx = PF_SK_WIRE;
6084 				}
6085 
6086 				match_key.af = s->key[idx]->af;
6087 				match_key.proto = s->key[idx]->proto;
6088 				PF_ACPY(&match_key.addr[0],
6089 				    &s->key[idx]->addr[1], match_key.af);
6090 				match_key.port[0] = s->key[idx]->port[1];
6091 				PF_ACPY(&match_key.addr[1],
6092 				    &s->key[idx]->addr[0], match_key.af);
6093 				match_key.port[1] = s->key[idx]->port[0];
6094 			}
6095 
6096 			/*
6097 			 * Don't send out individual
6098 			 * delete messages.
6099 			 */
6100 			s->state_flags |= PFSTATE_NOSYNC;
6101 			pf_unlink_state(s);
6102 			killed++;
6103 
6104 			if (kill->psk_kill_match)
6105 				killed += pf_kill_matching_state(&match_key,
6106 				    dir);
6107 
6108 			goto relock_DIOCCLRSTATES;
6109 		}
6110 		PF_HASHROW_UNLOCK(ih);
6111 	}
6112 
6113 	if (V_pfsync_clear_states_ptr != NULL)
6114 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6115 
6116 	return (killed);
6117 }
6118 
6119 void
6120 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6121 {
6122 	struct pf_kstate	*s;
6123 
6124 	NET_EPOCH_ASSERT();
6125 	if (kill->psk_pfcmp.id) {
6126 		if (kill->psk_pfcmp.creatorid == 0)
6127 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6128 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6129 		    kill->psk_pfcmp.creatorid))) {
6130 			pf_unlink_state(s);
6131 			*killed = 1;
6132 		}
6133 		return;
6134 	}
6135 
6136 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6137 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6138 }
6139 
6140 static int
6141 pf_killstates_nv(struct pfioc_nv *nv)
6142 {
6143 	struct pf_kstate_kill	 kill;
6144 	struct epoch_tracker	 et;
6145 	nvlist_t		*nvl = NULL;
6146 	void			*nvlpacked = NULL;
6147 	int			 error = 0;
6148 	unsigned int		 killed = 0;
6149 
6150 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6151 
6152 	if (nv->len > pf_ioctl_maxcount)
6153 		ERROUT(ENOMEM);
6154 
6155 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6156 	error = copyin(nv->data, nvlpacked, nv->len);
6157 	if (error)
6158 		ERROUT(error);
6159 
6160 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6161 	if (nvl == NULL)
6162 		ERROUT(EBADMSG);
6163 
6164 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6165 	if (error)
6166 		ERROUT(error);
6167 
6168 	NET_EPOCH_ENTER(et);
6169 	pf_killstates(&kill, &killed);
6170 	NET_EPOCH_EXIT(et);
6171 
6172 	free(nvlpacked, M_NVLIST);
6173 	nvlpacked = NULL;
6174 	nvlist_destroy(nvl);
6175 	nvl = nvlist_create(0);
6176 	if (nvl == NULL)
6177 		ERROUT(ENOMEM);
6178 
6179 	nvlist_add_number(nvl, "killed", killed);
6180 
6181 	nvlpacked = nvlist_pack(nvl, &nv->len);
6182 	if (nvlpacked == NULL)
6183 		ERROUT(ENOMEM);
6184 
6185 	if (nv->size == 0)
6186 		ERROUT(0);
6187 	else if (nv->size < nv->len)
6188 		ERROUT(ENOSPC);
6189 
6190 	error = copyout(nvlpacked, nv->data, nv->len);
6191 
6192 on_error:
6193 	nvlist_destroy(nvl);
6194 	free(nvlpacked, M_NVLIST);
6195 	return (error);
6196 }
6197 
6198 static int
6199 pf_clearstates_nv(struct pfioc_nv *nv)
6200 {
6201 	struct pf_kstate_kill	 kill;
6202 	struct epoch_tracker	 et;
6203 	nvlist_t		*nvl = NULL;
6204 	void			*nvlpacked = NULL;
6205 	int			 error = 0;
6206 	unsigned int		 killed;
6207 
6208 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6209 
6210 	if (nv->len > pf_ioctl_maxcount)
6211 		ERROUT(ENOMEM);
6212 
6213 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6214 	error = copyin(nv->data, nvlpacked, nv->len);
6215 	if (error)
6216 		ERROUT(error);
6217 
6218 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6219 	if (nvl == NULL)
6220 		ERROUT(EBADMSG);
6221 
6222 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6223 	if (error)
6224 		ERROUT(error);
6225 
6226 	NET_EPOCH_ENTER(et);
6227 	killed = pf_clear_states(&kill);
6228 	NET_EPOCH_EXIT(et);
6229 
6230 	free(nvlpacked, M_NVLIST);
6231 	nvlpacked = NULL;
6232 	nvlist_destroy(nvl);
6233 	nvl = nvlist_create(0);
6234 	if (nvl == NULL)
6235 		ERROUT(ENOMEM);
6236 
6237 	nvlist_add_number(nvl, "killed", killed);
6238 
6239 	nvlpacked = nvlist_pack(nvl, &nv->len);
6240 	if (nvlpacked == NULL)
6241 		ERROUT(ENOMEM);
6242 
6243 	if (nv->size == 0)
6244 		ERROUT(0);
6245 	else if (nv->size < nv->len)
6246 		ERROUT(ENOSPC);
6247 
6248 	error = copyout(nvlpacked, nv->data, nv->len);
6249 
6250 #undef ERROUT
6251 on_error:
6252 	nvlist_destroy(nvl);
6253 	free(nvlpacked, M_NVLIST);
6254 	return (error);
6255 }
6256 
6257 static int
6258 pf_getstate(struct pfioc_nv *nv)
6259 {
6260 	nvlist_t		*nvl = NULL, *nvls;
6261 	void			*nvlpacked = NULL;
6262 	struct pf_kstate	*s = NULL;
6263 	int			 error = 0;
6264 	uint64_t		 id, creatorid;
6265 
6266 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6267 
6268 	if (nv->len > pf_ioctl_maxcount)
6269 		ERROUT(ENOMEM);
6270 
6271 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6272 	error = copyin(nv->data, nvlpacked, nv->len);
6273 	if (error)
6274 		ERROUT(error);
6275 
6276 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6277 	if (nvl == NULL)
6278 		ERROUT(EBADMSG);
6279 
6280 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6281 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6282 
6283 	s = pf_find_state_byid(id, creatorid);
6284 	if (s == NULL)
6285 		ERROUT(ENOENT);
6286 
6287 	free(nvlpacked, M_NVLIST);
6288 	nvlpacked = NULL;
6289 	nvlist_destroy(nvl);
6290 	nvl = nvlist_create(0);
6291 	if (nvl == NULL)
6292 		ERROUT(ENOMEM);
6293 
6294 	nvls = pf_state_to_nvstate(s);
6295 	if (nvls == NULL)
6296 		ERROUT(ENOMEM);
6297 
6298 	nvlist_add_nvlist(nvl, "state", nvls);
6299 	nvlist_destroy(nvls);
6300 
6301 	nvlpacked = nvlist_pack(nvl, &nv->len);
6302 	if (nvlpacked == NULL)
6303 		ERROUT(ENOMEM);
6304 
6305 	if (nv->size == 0)
6306 		ERROUT(0);
6307 	else if (nv->size < nv->len)
6308 		ERROUT(ENOSPC);
6309 
6310 	error = copyout(nvlpacked, nv->data, nv->len);
6311 
6312 #undef ERROUT
6313 errout:
6314 	if (s != NULL)
6315 		PF_STATE_UNLOCK(s);
6316 	free(nvlpacked, M_NVLIST);
6317 	nvlist_destroy(nvl);
6318 	return (error);
6319 }
6320 
6321 /*
6322  * XXX - Check for version mismatch!!!
6323  */
6324 
6325 /*
6326  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6327  */
6328 static int
6329 shutdown_pf(void)
6330 {
6331 	int error = 0;
6332 	u_int32_t t[5];
6333 	char nn = '\0';
6334 	struct pf_kanchor *anchor;
6335 	struct pf_keth_anchor *eth_anchor;
6336 	int rs_num;
6337 
6338 	do {
6339 		/* Unlink rules of all user defined anchors */
6340 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6341 			/* Wildcard based anchors may not have a respective
6342 			 * explicit anchor rule or they may be left empty
6343 			 * without rules. It leads to anchor.refcnt=0, and the
6344 			 * rest of the logic does not expect it. */
6345 			if (anchor->refcnt == 0)
6346 				anchor->refcnt = 1;
6347 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6348 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6349 				    anchor->path)) != 0) {
6350 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6351 					    "anchor.path=%s rs_num=%d\n",
6352 					    anchor->path, rs_num));
6353 					goto error;	/* XXX: rollback? */
6354 				}
6355 			}
6356 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6357 				error = pf_commit_rules(t[rs_num], rs_num,
6358 				    anchor->path);
6359 				MPASS(error == 0);
6360 			}
6361 		}
6362 
6363 		/* Unlink rules of all user defined ether anchors */
6364 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6365 		    &V_pf_keth_anchors) {
6366 			/* Wildcard based anchors may not have a respective
6367 			 * explicit anchor rule or they may be left empty
6368 			 * without rules. It leads to anchor.refcnt=0, and the
6369 			 * rest of the logic does not expect it. */
6370 			if (eth_anchor->refcnt == 0)
6371 				eth_anchor->refcnt = 1;
6372 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6373 			    != 0) {
6374 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6375 				    "anchor.path=%s\n", eth_anchor->path));
6376 				goto error;
6377 			}
6378 			error = pf_commit_eth(t[0], eth_anchor->path);
6379 			MPASS(error == 0);
6380 		}
6381 
6382 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6383 		    != 0) {
6384 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6385 			break;
6386 		}
6387 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6388 		    != 0) {
6389 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6390 			break;		/* XXX: rollback? */
6391 		}
6392 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6393 		    != 0) {
6394 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6395 			break;		/* XXX: rollback? */
6396 		}
6397 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6398 		    != 0) {
6399 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6400 			break;		/* XXX: rollback? */
6401 		}
6402 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6403 		    != 0) {
6404 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6405 			break;		/* XXX: rollback? */
6406 		}
6407 
6408 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6409 		MPASS(error == 0);
6410 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6411 		MPASS(error == 0);
6412 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6413 		MPASS(error == 0);
6414 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6415 		MPASS(error == 0);
6416 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6417 		MPASS(error == 0);
6418 
6419 		if ((error = pf_clear_tables()) != 0)
6420 			break;
6421 
6422 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6423 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6424 			break;
6425 		}
6426 		error = pf_commit_eth(t[0], &nn);
6427 		MPASS(error == 0);
6428 
6429 #ifdef ALTQ
6430 		if ((error = pf_begin_altq(&t[0])) != 0) {
6431 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6432 			break;
6433 		}
6434 		pf_commit_altq(t[0]);
6435 #endif
6436 
6437 		pf_clear_all_states();
6438 
6439 		pf_kill_srcnodes(NULL);
6440 
6441 		/* status does not use malloced mem so no need to cleanup */
6442 		/* fingerprints and interfaces have their own cleanup code */
6443 	} while(0);
6444 
6445 error:
6446 	return (error);
6447 }
6448 
6449 static pfil_return_t
6450 pf_check_return(int chk, struct mbuf **m)
6451 {
6452 
6453 	switch (chk) {
6454 	case PF_PASS:
6455 		if (*m == NULL)
6456 			return (PFIL_CONSUMED);
6457 		else
6458 			return (PFIL_PASS);
6459 		break;
6460 	default:
6461 		if (*m != NULL) {
6462 			m_freem(*m);
6463 			*m = NULL;
6464 		}
6465 		return (PFIL_DROPPED);
6466 	}
6467 }
6468 
6469 static pfil_return_t
6470 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6471     void *ruleset __unused, struct inpcb *inp)
6472 {
6473 	int chk;
6474 
6475 	CURVNET_ASSERT_SET();
6476 
6477 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6478 
6479 	return (pf_check_return(chk, m));
6480 }
6481 
6482 static pfil_return_t
6483 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6484     void *ruleset __unused, struct inpcb *inp)
6485 {
6486 	int chk;
6487 
6488 	CURVNET_ASSERT_SET();
6489 
6490 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6491 
6492 	return (pf_check_return(chk, m));
6493 }
6494 
6495 #ifdef INET
6496 static pfil_return_t
6497 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6498     void *ruleset __unused, struct inpcb *inp)
6499 {
6500 	int chk;
6501 
6502 	CURVNET_ASSERT_SET();
6503 
6504 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6505 
6506 	return (pf_check_return(chk, m));
6507 }
6508 
6509 static pfil_return_t
6510 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6511     void *ruleset __unused,  struct inpcb *inp)
6512 {
6513 	int chk;
6514 
6515 	CURVNET_ASSERT_SET();
6516 
6517 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6518 
6519 	return (pf_check_return(chk, m));
6520 }
6521 #endif
6522 
6523 #ifdef INET6
6524 static pfil_return_t
6525 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6526     void *ruleset __unused,  struct inpcb *inp)
6527 {
6528 	int chk;
6529 
6530 	CURVNET_ASSERT_SET();
6531 
6532 	/*
6533 	 * In case of loopback traffic IPv6 uses the real interface in
6534 	 * order to support scoped addresses. In order to support stateful
6535 	 * filtering we have change this to lo0 as it is the case in IPv4.
6536 	 */
6537 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6538 	    m, inp, NULL);
6539 
6540 	return (pf_check_return(chk, m));
6541 }
6542 
6543 static pfil_return_t
6544 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6545     void *ruleset __unused,  struct inpcb *inp)
6546 {
6547 	int chk;
6548 
6549 	CURVNET_ASSERT_SET();
6550 
6551 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6552 
6553 	return (pf_check_return(chk, m));
6554 }
6555 #endif /* INET6 */
6556 
6557 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6558 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6559 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6560 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6561 
6562 #ifdef INET
6563 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6564 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6565 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6566 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6567 #endif
6568 #ifdef INET6
6569 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6570 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6571 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6572 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6573 #endif
6574 
6575 static void
6576 hook_pf_eth(void)
6577 {
6578 	struct pfil_hook_args pha = {
6579 		.pa_version = PFIL_VERSION,
6580 		.pa_modname = "pf",
6581 		.pa_type = PFIL_TYPE_ETHERNET,
6582 	};
6583 	struct pfil_link_args pla = {
6584 		.pa_version = PFIL_VERSION,
6585 	};
6586 	int ret __diagused;
6587 
6588 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6589 		return;
6590 
6591 	pha.pa_mbuf_chk = pf_eth_check_in;
6592 	pha.pa_flags = PFIL_IN;
6593 	pha.pa_rulname = "eth-in";
6594 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6595 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6596 	pla.pa_head = V_link_pfil_head;
6597 	pla.pa_hook = V_pf_eth_in_hook;
6598 	ret = pfil_link(&pla);
6599 	MPASS(ret == 0);
6600 	pha.pa_mbuf_chk = pf_eth_check_out;
6601 	pha.pa_flags = PFIL_OUT;
6602 	pha.pa_rulname = "eth-out";
6603 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6604 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6605 	pla.pa_head = V_link_pfil_head;
6606 	pla.pa_hook = V_pf_eth_out_hook;
6607 	ret = pfil_link(&pla);
6608 	MPASS(ret == 0);
6609 
6610 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6611 }
6612 
6613 static void
6614 hook_pf(void)
6615 {
6616 	struct pfil_hook_args pha = {
6617 		.pa_version = PFIL_VERSION,
6618 		.pa_modname = "pf",
6619 	};
6620 	struct pfil_link_args pla = {
6621 		.pa_version = PFIL_VERSION,
6622 	};
6623 	int ret __diagused;
6624 
6625 	if (atomic_load_bool(&V_pf_pfil_hooked))
6626 		return;
6627 
6628 #ifdef INET
6629 	pha.pa_type = PFIL_TYPE_IP4;
6630 	pha.pa_mbuf_chk = pf_check_in;
6631 	pha.pa_flags = PFIL_IN;
6632 	pha.pa_rulname = "default-in";
6633 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6634 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6635 	pla.pa_head = V_inet_pfil_head;
6636 	pla.pa_hook = V_pf_ip4_in_hook;
6637 	ret = pfil_link(&pla);
6638 	MPASS(ret == 0);
6639 	pha.pa_mbuf_chk = pf_check_out;
6640 	pha.pa_flags = PFIL_OUT;
6641 	pha.pa_rulname = "default-out";
6642 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6643 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6644 	pla.pa_head = V_inet_pfil_head;
6645 	pla.pa_hook = V_pf_ip4_out_hook;
6646 	ret = pfil_link(&pla);
6647 	MPASS(ret == 0);
6648 	if (V_pf_filter_local) {
6649 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6650 		pla.pa_head = V_inet_local_pfil_head;
6651 		pla.pa_hook = V_pf_ip4_out_hook;
6652 		ret = pfil_link(&pla);
6653 		MPASS(ret == 0);
6654 	}
6655 #endif
6656 #ifdef INET6
6657 	pha.pa_type = PFIL_TYPE_IP6;
6658 	pha.pa_mbuf_chk = pf_check6_in;
6659 	pha.pa_flags = PFIL_IN;
6660 	pha.pa_rulname = "default-in6";
6661 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6662 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6663 	pla.pa_head = V_inet6_pfil_head;
6664 	pla.pa_hook = V_pf_ip6_in_hook;
6665 	ret = pfil_link(&pla);
6666 	MPASS(ret == 0);
6667 	pha.pa_mbuf_chk = pf_check6_out;
6668 	pha.pa_rulname = "default-out6";
6669 	pha.pa_flags = PFIL_OUT;
6670 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6671 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6672 	pla.pa_head = V_inet6_pfil_head;
6673 	pla.pa_hook = V_pf_ip6_out_hook;
6674 	ret = pfil_link(&pla);
6675 	MPASS(ret == 0);
6676 	if (V_pf_filter_local) {
6677 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6678 		pla.pa_head = V_inet6_local_pfil_head;
6679 		pla.pa_hook = V_pf_ip6_out_hook;
6680 		ret = pfil_link(&pla);
6681 		MPASS(ret == 0);
6682 	}
6683 #endif
6684 
6685 	atomic_store_bool(&V_pf_pfil_hooked, true);
6686 }
6687 
6688 static void
6689 dehook_pf_eth(void)
6690 {
6691 
6692 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6693 		return;
6694 
6695 	pfil_remove_hook(V_pf_eth_in_hook);
6696 	pfil_remove_hook(V_pf_eth_out_hook);
6697 
6698 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6699 }
6700 
6701 static void
6702 dehook_pf(void)
6703 {
6704 
6705 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6706 		return;
6707 
6708 #ifdef INET
6709 	pfil_remove_hook(V_pf_ip4_in_hook);
6710 	pfil_remove_hook(V_pf_ip4_out_hook);
6711 #endif
6712 #ifdef INET6
6713 	pfil_remove_hook(V_pf_ip6_in_hook);
6714 	pfil_remove_hook(V_pf_ip6_out_hook);
6715 #endif
6716 
6717 	atomic_store_bool(&V_pf_pfil_hooked, false);
6718 }
6719 
6720 static void
6721 pf_load_vnet(void)
6722 {
6723 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6724 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6725 
6726 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6727 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6728 
6729 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6730 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6731 #ifdef ALTQ
6732 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6733 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6734 #endif
6735 
6736 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6737 
6738 	pfattach_vnet();
6739 	V_pf_vnet_active = 1;
6740 }
6741 
6742 static int
6743 pf_load(void)
6744 {
6745 	int error;
6746 
6747 	sx_init(&pf_end_lock, "pf end thread");
6748 
6749 	pf_mtag_initialize();
6750 
6751 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6752 	if (pf_dev == NULL)
6753 		return (ENOMEM);
6754 
6755 	pf_end_threads = 0;
6756 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6757 	if (error != 0)
6758 		return (error);
6759 
6760 	pfi_initialize();
6761 
6762 	return (0);
6763 }
6764 
6765 static void
6766 pf_unload_vnet(void)
6767 {
6768 	int ret __diagused;
6769 
6770 	V_pf_vnet_active = 0;
6771 	V_pf_status.running = 0;
6772 	dehook_pf();
6773 	dehook_pf_eth();
6774 
6775 	PF_RULES_WLOCK();
6776 	pf_syncookies_cleanup();
6777 	shutdown_pf();
6778 	PF_RULES_WUNLOCK();
6779 
6780 	/* Make sure we've cleaned up ethernet rules before we continue. */
6781 	NET_EPOCH_DRAIN_CALLBACKS();
6782 
6783 	ret = swi_remove(V_pf_swi_cookie);
6784 	MPASS(ret == 0);
6785 	ret = intr_event_destroy(V_pf_swi_ie);
6786 	MPASS(ret == 0);
6787 
6788 	pf_unload_vnet_purge();
6789 
6790 	pf_normalize_cleanup();
6791 	PF_RULES_WLOCK();
6792 	pfi_cleanup_vnet();
6793 	PF_RULES_WUNLOCK();
6794 	pfr_cleanup();
6795 	pf_osfp_flush();
6796 	pf_cleanup();
6797 	if (IS_DEFAULT_VNET(curvnet))
6798 		pf_mtag_cleanup();
6799 
6800 	pf_cleanup_tagset(&V_pf_tags);
6801 #ifdef ALTQ
6802 	pf_cleanup_tagset(&V_pf_qids);
6803 #endif
6804 	uma_zdestroy(V_pf_tag_z);
6805 
6806 #ifdef PF_WANT_32_TO_64_COUNTER
6807 	PF_RULES_WLOCK();
6808 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6809 
6810 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6811 	MPASS(V_pf_allkifcount == 0);
6812 
6813 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6814 	V_pf_allrulecount--;
6815 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6816 
6817 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6818 	MPASS(V_pf_allrulecount == 0);
6819 
6820 	PF_RULES_WUNLOCK();
6821 
6822 	free(V_pf_kifmarker, PFI_MTYPE);
6823 	free(V_pf_rulemarker, M_PFRULE);
6824 #endif
6825 
6826 	/* Free counters last as we updated them during shutdown. */
6827 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6828 	for (int i = 0; i < 2; i++) {
6829 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6830 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6831 	}
6832 	counter_u64_free(V_pf_default_rule.states_cur);
6833 	counter_u64_free(V_pf_default_rule.states_tot);
6834 	counter_u64_free(V_pf_default_rule.src_nodes);
6835 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6836 
6837 	for (int i = 0; i < PFRES_MAX; i++)
6838 		counter_u64_free(V_pf_status.counters[i]);
6839 	for (int i = 0; i < KLCNT_MAX; i++)
6840 		counter_u64_free(V_pf_status.lcounters[i]);
6841 	for (int i = 0; i < FCNT_MAX; i++)
6842 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6843 	for (int i = 0; i < SCNT_MAX; i++)
6844 		counter_u64_free(V_pf_status.scounters[i]);
6845 
6846 	rm_destroy(&V_pf_rules_lock);
6847 	sx_destroy(&V_pf_ioctl_lock);
6848 }
6849 
6850 static void
6851 pf_unload(void)
6852 {
6853 
6854 	sx_xlock(&pf_end_lock);
6855 	pf_end_threads = 1;
6856 	while (pf_end_threads < 2) {
6857 		wakeup_one(pf_purge_thread);
6858 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6859 	}
6860 	sx_xunlock(&pf_end_lock);
6861 
6862 	pf_nl_unregister();
6863 
6864 	if (pf_dev != NULL)
6865 		destroy_dev(pf_dev);
6866 
6867 	pfi_cleanup();
6868 
6869 	sx_destroy(&pf_end_lock);
6870 }
6871 
6872 static void
6873 vnet_pf_init(void *unused __unused)
6874 {
6875 
6876 	pf_load_vnet();
6877 }
6878 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6879     vnet_pf_init, NULL);
6880 
6881 static void
6882 vnet_pf_uninit(const void *unused __unused)
6883 {
6884 
6885 	pf_unload_vnet();
6886 }
6887 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6888 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6889     vnet_pf_uninit, NULL);
6890 
6891 static int
6892 pf_modevent(module_t mod, int type, void *data)
6893 {
6894 	int error = 0;
6895 
6896 	switch(type) {
6897 	case MOD_LOAD:
6898 		error = pf_load();
6899 		pf_nl_register();
6900 		break;
6901 	case MOD_UNLOAD:
6902 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6903 		 * the vnet_pf_uninit()s */
6904 		break;
6905 	default:
6906 		error = EINVAL;
6907 		break;
6908 	}
6909 
6910 	return (error);
6911 }
6912 
6913 static moduledata_t pf_mod = {
6914 	"pf",
6915 	pf_modevent,
6916 	0
6917 };
6918 
6919 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6920 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6921 MODULE_VERSION(pf, PF_MODVER);
6922