xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision 7a66b3008693ce61957e8b2a3d99829063e1e4af)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 
191 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
192 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
193 #endif
194 
195 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
196 #define V_pf_filter_local	VNET(pf_filter_local)
197 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
198     &VNET_NAME(pf_filter_local), false,
199     "Enable filtering for packets delivered to local network stack");
200 
201 #ifdef PF_DEFAULT_TO_DROP
202 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
203 #else
204 VNET_DEFINE_STATIC(bool, default_to_drop);
205 #endif
206 #define	V_default_to_drop VNET(default_to_drop)
207 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
208     &VNET_NAME(default_to_drop), false,
209     "Make the default rule drop all packets.");
210 
211 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
212 			    unsigned int);
213 static void		 pf_cleanup_tagset(struct pf_tagset *);
214 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
215 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
216 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
217 static u_int16_t	 pf_tagname2tag(const char *);
218 static void		 tag_unref(struct pf_tagset *, u_int16_t);
219 
220 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
221 
222 struct cdev *pf_dev;
223 
224 /*
225  * XXX - These are new and need to be checked when moveing to a new version
226  */
227 static void		 pf_clear_all_states(void);
228 static int		 pf_killstates_row(struct pf_kstate_kill *,
229 			    struct pf_idhash *);
230 static int		 pf_killstates_nv(struct pfioc_nv *);
231 static int		 pf_clearstates_nv(struct pfioc_nv *);
232 static int		 pf_getstate(struct pfioc_nv *);
233 static int		 pf_getstatus(struct pfioc_nv *);
234 static int		 pf_clear_tables(void);
235 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
236 static int		 pf_keepcounters(struct pfioc_nv *);
237 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
238 
239 /*
240  * Wrapper functions for pfil(9) hooks
241  */
242 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
243     int flags, void *ruleset __unused, struct inpcb *inp);
244 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
245     int flags, void *ruleset __unused, struct inpcb *inp);
246 #ifdef INET
247 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
250     int flags, void *ruleset __unused, struct inpcb *inp);
251 #endif
252 #ifdef INET6
253 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
256     int flags, void *ruleset __unused, struct inpcb *inp);
257 #endif
258 
259 static void		hook_pf_eth(void);
260 static void		hook_pf(void);
261 static void		dehook_pf_eth(void);
262 static void		dehook_pf(void);
263 static int		shutdown_pf(void);
264 static int		pf_load(void);
265 static void		pf_unload(void);
266 
267 static struct cdevsw pf_cdevsw = {
268 	.d_ioctl =	pfioctl,
269 	.d_name =	PF_NAME,
270 	.d_version =	D_VERSION,
271 };
272 
273 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
274 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
275 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
276 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
277 
278 /*
279  * We need a flag that is neither hooked nor running to know when
280  * the VNET is "valid".  We primarily need this to control (global)
281  * external event, e.g., eventhandlers.
282  */
283 VNET_DEFINE(int, pf_vnet_active);
284 #define V_pf_vnet_active	VNET(pf_vnet_active)
285 
286 int pf_end_threads;
287 struct proc *pf_purge_proc;
288 
289 VNET_DEFINE(struct rmlock, pf_rules_lock);
290 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
291 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
292 struct sx			pf_end_lock;
293 
294 /* pfsync */
295 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
296 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
297 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
298 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
299 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
300 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
301 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
302 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
303 
304 /* pflog */
305 pflog_packet_t			*pflog_packet_ptr = NULL;
306 
307 /*
308  * Copy a user-provided string, returning an error if truncation would occur.
309  * Avoid scanning past "sz" bytes in the source string since there's no
310  * guarantee that it's nul-terminated.
311  */
312 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)313 pf_user_strcpy(char *dst, const char *src, size_t sz)
314 {
315 	if (strnlen(src, sz) == sz)
316 		return (EINVAL);
317 	(void)strlcpy(dst, src, sz);
318 	return (0);
319 }
320 
321 static void
pfattach_vnet(void)322 pfattach_vnet(void)
323 {
324 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
325 
326 	bzero(&V_pf_status, sizeof(V_pf_status));
327 
328 	pf_initialize();
329 	pfr_initialize();
330 	pfi_initialize_vnet();
331 	pf_normalize_init();
332 	pf_syncookies_init();
333 
334 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
335 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
336 
337 	RB_INIT(&V_pf_anchors);
338 	pf_init_kruleset(&pf_main_ruleset);
339 
340 	pf_init_keth(V_pf_keth);
341 
342 	/* default rule should never be garbage collected */
343 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
344 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
345 	V_pf_default_rule.nr = -1;
346 	V_pf_default_rule.rtableid = -1;
347 
348 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
349 	for (int i = 0; i < 2; i++) {
350 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
351 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
352 	}
353 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
354 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
355 	V_pf_default_rule.src_nodes = counter_u64_alloc(M_WAITOK);
356 
357 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
358 	    M_WAITOK | M_ZERO);
359 
360 #ifdef PF_WANT_32_TO_64_COUNTER
361 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
362 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
363 	PF_RULES_WLOCK();
364 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
365 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
366 	V_pf_allrulecount++;
367 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
368 	PF_RULES_WUNLOCK();
369 #endif
370 
371 	/* initialize default timeouts */
372 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
373 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
374 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
375 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
376 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
377 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
378 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
379 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
380 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
381 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
382 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
383 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
384 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
385 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
386 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
387 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
388 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
389 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
390 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
391 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
392 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
393 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
394 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
395 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
396 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
397 
398 	V_pf_status.debug = PF_DEBUG_URGENT;
399 	/*
400 	 * XXX This is different than in OpenBSD where reassembly is enabled by
401 	 * defult. In FreeBSD we expect people to still use scrub rules and
402 	 * switch to the new syntax later. Only when they switch they must
403 	 * explicitly enable reassemle. We could change the default once the
404 	 * scrub rule functionality is hopefully removed some day in future.
405 	 */
406 	V_pf_status.reass = 0;
407 
408 	V_pf_pfil_hooked = false;
409 	V_pf_pfil_eth_hooked = false;
410 
411 	/* XXX do our best to avoid a conflict */
412 	V_pf_status.hostid = arc4random();
413 
414 	for (int i = 0; i < PFRES_MAX; i++)
415 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
416 	for (int i = 0; i < KLCNT_MAX; i++)
417 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
418 	for (int i = 0; i < FCNT_MAX; i++)
419 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
420 	for (int i = 0; i < SCNT_MAX; i++)
421 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
422 
423 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
424 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
425 		/* XXXGL: leaked all above. */
426 		return;
427 }
428 
429 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)430 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
431     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
432     u_int8_t check_ticket, int which)
433 {
434 	struct pf_kruleset	*ruleset;
435 	struct pf_krule		*rule;
436 	int			 rs_num;
437 
438 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
439 
440 	ruleset = pf_find_kruleset(anchor);
441 	if (ruleset == NULL)
442 		return (NULL);
443 	rs_num = pf_get_ruleset_number(rule_action);
444 	if (rs_num >= PF_RULESET_MAX)
445 		return (NULL);
446 	if (active) {
447 		if (check_ticket && ticket !=
448 		    ruleset->rules[rs_num].active.ticket)
449 			return (NULL);
450 		if (r_last)
451 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
452 			    pf_krulequeue);
453 		else
454 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
455 	} else {
456 		if (check_ticket && ticket !=
457 		    ruleset->rules[rs_num].inactive.ticket)
458 			return (NULL);
459 		if (r_last)
460 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
461 			    pf_krulequeue);
462 		else
463 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
464 	}
465 	if (!r_last) {
466 		while ((rule != NULL) && (rule->nr != rule_number))
467 			rule = TAILQ_NEXT(rule, entries);
468 	}
469 	if (rule == NULL)
470 		return (NULL);
471 
472 	switch (which) {
473 	case PF_RDR:
474 		return (&rule->rdr);
475 	case PF_NAT:
476 		return (&rule->nat);
477 	case PF_RT:
478 		return (&rule->route);
479 	default:
480 		panic("Unknow pool type %d", which);
481 	}
482 }
483 
484 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)485 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
486 {
487 	struct pf_kpooladdr	*mv_pool_pa;
488 
489 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
490 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
491 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
492 	}
493 }
494 
495 static void
pf_empty_kpool(struct pf_kpalist * poola)496 pf_empty_kpool(struct pf_kpalist *poola)
497 {
498 	struct pf_kpooladdr *pa;
499 
500 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
501 		switch (pa->addr.type) {
502 		case PF_ADDR_DYNIFTL:
503 			pfi_dynaddr_remove(pa->addr.p.dyn);
504 			break;
505 		case PF_ADDR_TABLE:
506 			/* XXX: this could be unfinished pooladdr on pabuf */
507 			if (pa->addr.p.tbl != NULL)
508 				pfr_detach_table(pa->addr.p.tbl);
509 			break;
510 		}
511 		if (pa->kif)
512 			pfi_kkif_unref(pa->kif);
513 		TAILQ_REMOVE(poola, pa, entries);
514 		free(pa, M_PFRULE);
515 	}
516 }
517 
518 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)519 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
520 {
521 
522 	PF_RULES_WASSERT();
523 	PF_UNLNKDRULES_ASSERT();
524 
525 	TAILQ_REMOVE(rulequeue, rule, entries);
526 
527 	rule->rule_ref |= PFRULE_REFS;
528 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
529 }
530 
531 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)532 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
533 {
534 
535 	PF_RULES_WASSERT();
536 
537 	PF_UNLNKDRULES_LOCK();
538 	pf_unlink_rule_locked(rulequeue, rule);
539 	PF_UNLNKDRULES_UNLOCK();
540 }
541 
542 static void
pf_free_eth_rule(struct pf_keth_rule * rule)543 pf_free_eth_rule(struct pf_keth_rule *rule)
544 {
545 	PF_RULES_WASSERT();
546 
547 	if (rule == NULL)
548 		return;
549 
550 	if (rule->tag)
551 		tag_unref(&V_pf_tags, rule->tag);
552 	if (rule->match_tag)
553 		tag_unref(&V_pf_tags, rule->match_tag);
554 #ifdef ALTQ
555 	pf_qid_unref(rule->qid);
556 #endif
557 
558 	if (rule->bridge_to)
559 		pfi_kkif_unref(rule->bridge_to);
560 	if (rule->kif)
561 		pfi_kkif_unref(rule->kif);
562 
563 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
564 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
565 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
566 		pfr_detach_table(rule->ipdst.addr.p.tbl);
567 
568 	counter_u64_free(rule->evaluations);
569 	for (int i = 0; i < 2; i++) {
570 		counter_u64_free(rule->packets[i]);
571 		counter_u64_free(rule->bytes[i]);
572 	}
573 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
574 	pf_keth_anchor_remove(rule);
575 
576 	free(rule, M_PFRULE);
577 }
578 
579 void
pf_free_rule(struct pf_krule * rule)580 pf_free_rule(struct pf_krule *rule)
581 {
582 
583 	PF_RULES_WASSERT();
584 	PF_CONFIG_ASSERT();
585 
586 	if (rule->tag)
587 		tag_unref(&V_pf_tags, rule->tag);
588 	if (rule->match_tag)
589 		tag_unref(&V_pf_tags, rule->match_tag);
590 #ifdef ALTQ
591 	if (rule->pqid != rule->qid)
592 		pf_qid_unref(rule->pqid);
593 	pf_qid_unref(rule->qid);
594 #endif
595 	switch (rule->src.addr.type) {
596 	case PF_ADDR_DYNIFTL:
597 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
598 		break;
599 	case PF_ADDR_TABLE:
600 		pfr_detach_table(rule->src.addr.p.tbl);
601 		break;
602 	}
603 	switch (rule->dst.addr.type) {
604 	case PF_ADDR_DYNIFTL:
605 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
606 		break;
607 	case PF_ADDR_TABLE:
608 		pfr_detach_table(rule->dst.addr.p.tbl);
609 		break;
610 	}
611 	if (rule->overload_tbl)
612 		pfr_detach_table(rule->overload_tbl);
613 	if (rule->kif)
614 		pfi_kkif_unref(rule->kif);
615 	if (rule->rcv_kif)
616 		pfi_kkif_unref(rule->rcv_kif);
617 	pf_kanchor_remove(rule);
618 	pf_empty_kpool(&rule->rdr.list);
619 	pf_empty_kpool(&rule->nat.list);
620 	pf_empty_kpool(&rule->route.list);
621 
622 	pf_krule_free(rule);
623 }
624 
625 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)626 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
627     unsigned int default_size)
628 {
629 	unsigned int i;
630 	unsigned int hashsize;
631 
632 	if (*tunable_size == 0 || !powerof2(*tunable_size))
633 		*tunable_size = default_size;
634 
635 	hashsize = *tunable_size;
636 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
637 	    M_WAITOK);
638 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
639 	    M_WAITOK);
640 	ts->mask = hashsize - 1;
641 	ts->seed = arc4random();
642 	for (i = 0; i < hashsize; i++) {
643 		TAILQ_INIT(&ts->namehash[i]);
644 		TAILQ_INIT(&ts->taghash[i]);
645 	}
646 	BIT_FILL(TAGID_MAX, &ts->avail);
647 }
648 
649 static void
pf_cleanup_tagset(struct pf_tagset * ts)650 pf_cleanup_tagset(struct pf_tagset *ts)
651 {
652 	unsigned int i;
653 	unsigned int hashsize;
654 	struct pf_tagname *t, *tmp;
655 
656 	/*
657 	 * Only need to clean up one of the hashes as each tag is hashed
658 	 * into each table.
659 	 */
660 	hashsize = ts->mask + 1;
661 	for (i = 0; i < hashsize; i++)
662 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
663 			uma_zfree(V_pf_tag_z, t);
664 
665 	free(ts->namehash, M_PFHASH);
666 	free(ts->taghash, M_PFHASH);
667 }
668 
669 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)670 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
671 {
672 	size_t len;
673 
674 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
675 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
676 }
677 
678 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)679 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
680 {
681 
682 	return (tag & ts->mask);
683 }
684 
685 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)686 tagname2tag(struct pf_tagset *ts, const char *tagname)
687 {
688 	struct pf_tagname	*tag;
689 	u_int32_t		 index;
690 	u_int16_t		 new_tagid;
691 
692 	PF_RULES_WASSERT();
693 
694 	index = tagname2hashindex(ts, tagname);
695 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
696 		if (strcmp(tagname, tag->name) == 0) {
697 			tag->ref++;
698 			return (tag->tag);
699 		}
700 
701 	/*
702 	 * new entry
703 	 *
704 	 * to avoid fragmentation, we do a linear search from the beginning
705 	 * and take the first free slot we find.
706 	 */
707 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
708 	/*
709 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
710 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
711 	 * set.  It may also return a bit number greater than TAGID_MAX due
712 	 * to rounding of the number of bits in the vector up to a multiple
713 	 * of the vector word size at declaration/allocation time.
714 	 */
715 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
716 		return (0);
717 
718 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
719 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
720 
721 	/* allocate and fill new struct pf_tagname */
722 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
723 	if (tag == NULL)
724 		return (0);
725 	strlcpy(tag->name, tagname, sizeof(tag->name));
726 	tag->tag = new_tagid;
727 	tag->ref = 1;
728 
729 	/* Insert into namehash */
730 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
731 
732 	/* Insert into taghash */
733 	index = tag2hashindex(ts, new_tagid);
734 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
735 
736 	return (tag->tag);
737 }
738 
739 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)740 tag_unref(struct pf_tagset *ts, u_int16_t tag)
741 {
742 	struct pf_tagname	*t;
743 	uint16_t		 index;
744 
745 	PF_RULES_WASSERT();
746 
747 	index = tag2hashindex(ts, tag);
748 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
749 		if (tag == t->tag) {
750 			if (--t->ref == 0) {
751 				TAILQ_REMOVE(&ts->taghash[index], t,
752 				    taghash_entries);
753 				index = tagname2hashindex(ts, t->name);
754 				TAILQ_REMOVE(&ts->namehash[index], t,
755 				    namehash_entries);
756 				/* Bits are 0-based for BIT_SET() */
757 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
758 				uma_zfree(V_pf_tag_z, t);
759 			}
760 			break;
761 		}
762 }
763 
764 static uint16_t
pf_tagname2tag(const char * tagname)765 pf_tagname2tag(const char *tagname)
766 {
767 	return (tagname2tag(&V_pf_tags, tagname));
768 }
769 
770 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)771 pf_begin_eth(uint32_t *ticket, const char *anchor)
772 {
773 	struct pf_keth_rule *rule, *tmp;
774 	struct pf_keth_ruleset *rs;
775 
776 	PF_RULES_WASSERT();
777 
778 	rs = pf_find_or_create_keth_ruleset(anchor);
779 	if (rs == NULL)
780 		return (EINVAL);
781 
782 	/* Purge old inactive rules. */
783 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
784 	    tmp) {
785 		TAILQ_REMOVE(rs->inactive.rules, rule,
786 		    entries);
787 		pf_free_eth_rule(rule);
788 	}
789 
790 	*ticket = ++rs->inactive.ticket;
791 	rs->inactive.open = 1;
792 
793 	return (0);
794 }
795 
796 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)797 pf_rollback_eth(uint32_t ticket, const char *anchor)
798 {
799 	struct pf_keth_rule *rule, *tmp;
800 	struct pf_keth_ruleset *rs;
801 
802 	PF_RULES_WASSERT();
803 
804 	rs = pf_find_keth_ruleset(anchor);
805 	if (rs == NULL)
806 		return (EINVAL);
807 
808 	if (!rs->inactive.open ||
809 	    ticket != rs->inactive.ticket)
810 		return (0);
811 
812 	/* Purge old inactive rules. */
813 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
814 	    tmp) {
815 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
816 		pf_free_eth_rule(rule);
817 	}
818 
819 	rs->inactive.open = 0;
820 
821 	pf_remove_if_empty_keth_ruleset(rs);
822 
823 	return (0);
824 }
825 
826 #define	PF_SET_SKIP_STEPS(i)					\
827 	do {							\
828 		while (head[i] != cur) {			\
829 			head[i]->skip[i].ptr = cur;		\
830 			head[i] = TAILQ_NEXT(head[i], entries);	\
831 		}						\
832 	} while (0)
833 
834 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)835 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
836 {
837 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
838 	int i;
839 
840 	cur = TAILQ_FIRST(rules);
841 	prev = cur;
842 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
843 		head[i] = cur;
844 	while (cur != NULL) {
845 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
846 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
847 		if (cur->direction != prev->direction)
848 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
849 		if (cur->proto != prev->proto)
850 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
851 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
852 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
853 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
854 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
855 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
856 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
857 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
858 		if (cur->ipdst.neg != prev->ipdst.neg ||
859 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
860 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
861 
862 		prev = cur;
863 		cur = TAILQ_NEXT(cur, entries);
864 	}
865 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
866 		PF_SET_SKIP_STEPS(i);
867 }
868 
869 static int
pf_commit_eth(uint32_t ticket,const char * anchor)870 pf_commit_eth(uint32_t ticket, const char *anchor)
871 {
872 	struct pf_keth_ruleq *rules;
873 	struct pf_keth_ruleset *rs;
874 
875 	rs = pf_find_keth_ruleset(anchor);
876 	if (rs == NULL) {
877 		return (EINVAL);
878 	}
879 
880 	if (!rs->inactive.open ||
881 	    ticket != rs->inactive.ticket)
882 		return (EBUSY);
883 
884 	PF_RULES_WASSERT();
885 
886 	pf_eth_calc_skip_steps(rs->inactive.rules);
887 
888 	rules = rs->active.rules;
889 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
890 	rs->inactive.rules = rules;
891 	rs->inactive.ticket = rs->active.ticket;
892 
893 	return (pf_rollback_eth(rs->inactive.ticket,
894 	    rs->anchor ? rs->anchor->path : ""));
895 }
896 
897 #ifdef ALTQ
898 static uint16_t
pf_qname2qid(const char * qname)899 pf_qname2qid(const char *qname)
900 {
901 	return (tagname2tag(&V_pf_qids, qname));
902 }
903 
904 static void
pf_qid_unref(uint16_t qid)905 pf_qid_unref(uint16_t qid)
906 {
907 	tag_unref(&V_pf_qids, qid);
908 }
909 
910 static int
pf_begin_altq(u_int32_t * ticket)911 pf_begin_altq(u_int32_t *ticket)
912 {
913 	struct pf_altq	*altq, *tmp;
914 	int		 error = 0;
915 
916 	PF_RULES_WASSERT();
917 
918 	/* Purge the old altq lists */
919 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
920 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
921 			/* detach and destroy the discipline */
922 			error = altq_remove(altq);
923 		}
924 		free(altq, M_PFALTQ);
925 	}
926 	TAILQ_INIT(V_pf_altq_ifs_inactive);
927 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
928 		pf_qid_unref(altq->qid);
929 		free(altq, M_PFALTQ);
930 	}
931 	TAILQ_INIT(V_pf_altqs_inactive);
932 	if (error)
933 		return (error);
934 	*ticket = ++V_ticket_altqs_inactive;
935 	V_altqs_inactive_open = 1;
936 	return (0);
937 }
938 
939 static int
pf_rollback_altq(u_int32_t ticket)940 pf_rollback_altq(u_int32_t ticket)
941 {
942 	struct pf_altq	*altq, *tmp;
943 	int		 error = 0;
944 
945 	PF_RULES_WASSERT();
946 
947 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
948 		return (0);
949 	/* Purge the old altq lists */
950 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
951 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
952 			/* detach and destroy the discipline */
953 			error = altq_remove(altq);
954 		}
955 		free(altq, M_PFALTQ);
956 	}
957 	TAILQ_INIT(V_pf_altq_ifs_inactive);
958 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
959 		pf_qid_unref(altq->qid);
960 		free(altq, M_PFALTQ);
961 	}
962 	TAILQ_INIT(V_pf_altqs_inactive);
963 	V_altqs_inactive_open = 0;
964 	return (error);
965 }
966 
967 static int
pf_commit_altq(u_int32_t ticket)968 pf_commit_altq(u_int32_t ticket)
969 {
970 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
971 	struct pf_altq		*altq, *tmp;
972 	int			 err, error = 0;
973 
974 	PF_RULES_WASSERT();
975 
976 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
977 		return (EBUSY);
978 
979 	/* swap altqs, keep the old. */
980 	old_altqs = V_pf_altqs_active;
981 	old_altq_ifs = V_pf_altq_ifs_active;
982 	V_pf_altqs_active = V_pf_altqs_inactive;
983 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
984 	V_pf_altqs_inactive = old_altqs;
985 	V_pf_altq_ifs_inactive = old_altq_ifs;
986 	V_ticket_altqs_active = V_ticket_altqs_inactive;
987 
988 	/* Attach new disciplines */
989 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
990 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
991 			/* attach the discipline */
992 			error = altq_pfattach(altq);
993 			if (error == 0 && V_pf_altq_running)
994 				error = pf_enable_altq(altq);
995 			if (error != 0)
996 				return (error);
997 		}
998 	}
999 
1000 	/* Purge the old altq lists */
1001 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1002 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1003 			/* detach and destroy the discipline */
1004 			if (V_pf_altq_running)
1005 				error = pf_disable_altq(altq);
1006 			err = altq_pfdetach(altq);
1007 			if (err != 0 && error == 0)
1008 				error = err;
1009 			err = altq_remove(altq);
1010 			if (err != 0 && error == 0)
1011 				error = err;
1012 		}
1013 		free(altq, M_PFALTQ);
1014 	}
1015 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1016 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1017 		pf_qid_unref(altq->qid);
1018 		free(altq, M_PFALTQ);
1019 	}
1020 	TAILQ_INIT(V_pf_altqs_inactive);
1021 
1022 	V_altqs_inactive_open = 0;
1023 	return (error);
1024 }
1025 
1026 static int
pf_enable_altq(struct pf_altq * altq)1027 pf_enable_altq(struct pf_altq *altq)
1028 {
1029 	struct ifnet		*ifp;
1030 	struct tb_profile	 tb;
1031 	int			 error = 0;
1032 
1033 	if ((ifp = ifunit(altq->ifname)) == NULL)
1034 		return (EINVAL);
1035 
1036 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1037 		error = altq_enable(&ifp->if_snd);
1038 
1039 	/* set tokenbucket regulator */
1040 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1041 		tb.rate = altq->ifbandwidth;
1042 		tb.depth = altq->tbrsize;
1043 		error = tbr_set(&ifp->if_snd, &tb);
1044 	}
1045 
1046 	return (error);
1047 }
1048 
1049 static int
pf_disable_altq(struct pf_altq * altq)1050 pf_disable_altq(struct pf_altq *altq)
1051 {
1052 	struct ifnet		*ifp;
1053 	struct tb_profile	 tb;
1054 	int			 error;
1055 
1056 	if ((ifp = ifunit(altq->ifname)) == NULL)
1057 		return (EINVAL);
1058 
1059 	/*
1060 	 * when the discipline is no longer referenced, it was overridden
1061 	 * by a new one.  if so, just return.
1062 	 */
1063 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1064 		return (0);
1065 
1066 	error = altq_disable(&ifp->if_snd);
1067 
1068 	if (error == 0) {
1069 		/* clear tokenbucket regulator */
1070 		tb.rate = 0;
1071 		error = tbr_set(&ifp->if_snd, &tb);
1072 	}
1073 
1074 	return (error);
1075 }
1076 
1077 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1078 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1079     struct pf_altq *altq)
1080 {
1081 	struct ifnet	*ifp1;
1082 	int		 error = 0;
1083 
1084 	/* Deactivate the interface in question */
1085 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1086 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1087 	    (remove && ifp1 == ifp)) {
1088 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1089 	} else {
1090 		error = altq_add(ifp1, altq);
1091 
1092 		if (ticket != V_ticket_altqs_inactive)
1093 			error = EBUSY;
1094 
1095 		if (error)
1096 			free(altq, M_PFALTQ);
1097 	}
1098 
1099 	return (error);
1100 }
1101 
1102 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1103 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1104 {
1105 	struct pf_altq	*a1, *a2, *a3;
1106 	u_int32_t	 ticket;
1107 	int		 error = 0;
1108 
1109 	/*
1110 	 * No need to re-evaluate the configuration for events on interfaces
1111 	 * that do not support ALTQ, as it's not possible for such
1112 	 * interfaces to be part of the configuration.
1113 	 */
1114 	if (!ALTQ_IS_READY(&ifp->if_snd))
1115 		return;
1116 
1117 	/* Interrupt userland queue modifications */
1118 	if (V_altqs_inactive_open)
1119 		pf_rollback_altq(V_ticket_altqs_inactive);
1120 
1121 	/* Start new altq ruleset */
1122 	if (pf_begin_altq(&ticket))
1123 		return;
1124 
1125 	/* Copy the current active set */
1126 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1127 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1128 		if (a2 == NULL) {
1129 			error = ENOMEM;
1130 			break;
1131 		}
1132 		bcopy(a1, a2, sizeof(struct pf_altq));
1133 
1134 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1135 		if (error)
1136 			break;
1137 
1138 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1139 	}
1140 	if (error)
1141 		goto out;
1142 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1143 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1144 		if (a2 == NULL) {
1145 			error = ENOMEM;
1146 			break;
1147 		}
1148 		bcopy(a1, a2, sizeof(struct pf_altq));
1149 
1150 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1151 			error = EBUSY;
1152 			free(a2, M_PFALTQ);
1153 			break;
1154 		}
1155 		a2->altq_disc = NULL;
1156 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1157 			if (strncmp(a3->ifname, a2->ifname,
1158 				IFNAMSIZ) == 0) {
1159 				a2->altq_disc = a3->altq_disc;
1160 				break;
1161 			}
1162 		}
1163 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1164 		if (error)
1165 			break;
1166 
1167 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1168 	}
1169 
1170 out:
1171 	if (error != 0)
1172 		pf_rollback_altq(ticket);
1173 	else
1174 		pf_commit_altq(ticket);
1175 }
1176 #endif /* ALTQ */
1177 
1178 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1179 pf_rule_tree_alloc(int flags)
1180 {
1181 	struct pf_krule_global *tree;
1182 
1183 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1184 	if (tree == NULL)
1185 		return (NULL);
1186 	RB_INIT(tree);
1187 	return (tree);
1188 }
1189 
1190 static void
pf_rule_tree_free(struct pf_krule_global * tree)1191 pf_rule_tree_free(struct pf_krule_global *tree)
1192 {
1193 
1194 	free(tree, M_TEMP);
1195 }
1196 
1197 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1198 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1199 {
1200 	struct pf_krule_global *tree;
1201 	struct pf_kruleset	*rs;
1202 	struct pf_krule		*rule;
1203 
1204 	PF_RULES_WASSERT();
1205 
1206 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1207 		return (EINVAL);
1208 	tree = pf_rule_tree_alloc(M_NOWAIT);
1209 	if (tree == NULL)
1210 		return (ENOMEM);
1211 	rs = pf_find_or_create_kruleset(anchor);
1212 	if (rs == NULL) {
1213 		free(tree, M_TEMP);
1214 		return (EINVAL);
1215 	}
1216 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1217 	rs->rules[rs_num].inactive.tree = tree;
1218 
1219 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1220 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1221 		rs->rules[rs_num].inactive.rcount--;
1222 	}
1223 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1224 	rs->rules[rs_num].inactive.open = 1;
1225 	return (0);
1226 }
1227 
1228 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1229 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1230 {
1231 	struct pf_kruleset	*rs;
1232 	struct pf_krule		*rule;
1233 
1234 	PF_RULES_WASSERT();
1235 
1236 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1237 		return (EINVAL);
1238 	rs = pf_find_kruleset(anchor);
1239 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1240 	    rs->rules[rs_num].inactive.ticket != ticket)
1241 		return (0);
1242 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1243 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1244 		rs->rules[rs_num].inactive.rcount--;
1245 	}
1246 	rs->rules[rs_num].inactive.open = 0;
1247 	return (0);
1248 }
1249 
1250 #define PF_MD5_UPD(st, elm)						\
1251 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1252 
1253 #define PF_MD5_UPD_STR(st, elm)						\
1254 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1255 
1256 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1257 		(stor) = htonl((st)->elm);				\
1258 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1259 } while (0)
1260 
1261 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1262 		(stor) = htons((st)->elm);				\
1263 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1264 } while (0)
1265 
1266 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1267 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1268 {
1269 	PF_MD5_UPD(pfr, addr.type);
1270 	switch (pfr->addr.type) {
1271 		case PF_ADDR_DYNIFTL:
1272 			PF_MD5_UPD(pfr, addr.v.ifname);
1273 			PF_MD5_UPD(pfr, addr.iflags);
1274 			break;
1275 		case PF_ADDR_TABLE:
1276 			PF_MD5_UPD(pfr, addr.v.tblname);
1277 			break;
1278 		case PF_ADDR_ADDRMASK:
1279 			/* XXX ignore af? */
1280 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1281 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1282 			break;
1283 	}
1284 
1285 	PF_MD5_UPD(pfr, port[0]);
1286 	PF_MD5_UPD(pfr, port[1]);
1287 	PF_MD5_UPD(pfr, neg);
1288 	PF_MD5_UPD(pfr, port_op);
1289 }
1290 
1291 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1292 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1293 {
1294 	u_int16_t x;
1295 	u_int32_t y;
1296 
1297 	pf_hash_rule_addr(ctx, &rule->src);
1298 	pf_hash_rule_addr(ctx, &rule->dst);
1299 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1300 		PF_MD5_UPD_STR(rule, label[i]);
1301 	PF_MD5_UPD_STR(rule, ifname);
1302 	PF_MD5_UPD_STR(rule, rcv_ifname);
1303 	PF_MD5_UPD_STR(rule, match_tagname);
1304 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1305 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1306 	PF_MD5_UPD_HTONL(rule, prob, y);
1307 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1308 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1309 	PF_MD5_UPD(rule, uid.op);
1310 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1311 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1312 	PF_MD5_UPD(rule, gid.op);
1313 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1314 	PF_MD5_UPD(rule, action);
1315 	PF_MD5_UPD(rule, direction);
1316 	PF_MD5_UPD(rule, af);
1317 	PF_MD5_UPD(rule, quick);
1318 	PF_MD5_UPD(rule, ifnot);
1319 	PF_MD5_UPD(rule, match_tag_not);
1320 	PF_MD5_UPD(rule, natpass);
1321 	PF_MD5_UPD(rule, keep_state);
1322 	PF_MD5_UPD(rule, proto);
1323 	PF_MD5_UPD(rule, type);
1324 	PF_MD5_UPD(rule, code);
1325 	PF_MD5_UPD(rule, flags);
1326 	PF_MD5_UPD(rule, flagset);
1327 	PF_MD5_UPD(rule, allow_opts);
1328 	PF_MD5_UPD(rule, rt);
1329 	PF_MD5_UPD(rule, tos);
1330 	PF_MD5_UPD(rule, scrub_flags);
1331 	PF_MD5_UPD(rule, min_ttl);
1332 	PF_MD5_UPD(rule, set_tos);
1333 	if (rule->anchor != NULL)
1334 		PF_MD5_UPD_STR(rule, anchor->path);
1335 }
1336 
1337 static void
pf_hash_rule(struct pf_krule * rule)1338 pf_hash_rule(struct pf_krule *rule)
1339 {
1340 	MD5_CTX		ctx;
1341 
1342 	MD5Init(&ctx);
1343 	pf_hash_rule_rolling(&ctx, rule);
1344 	MD5Final(rule->md5sum, &ctx);
1345 }
1346 
1347 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1348 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1349 {
1350 
1351 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1352 }
1353 
1354 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1355 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1356 {
1357 	struct pf_kruleset	*rs;
1358 	struct pf_krule		*rule, **old_array, *old_rule;
1359 	struct pf_krulequeue	*old_rules;
1360 	struct pf_krule_global  *old_tree;
1361 	int			 error;
1362 	u_int32_t		 old_rcount;
1363 
1364 	PF_RULES_WASSERT();
1365 
1366 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1367 		return (EINVAL);
1368 	rs = pf_find_kruleset(anchor);
1369 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1370 	    ticket != rs->rules[rs_num].inactive.ticket)
1371 		return (EBUSY);
1372 
1373 	/* Calculate checksum for the main ruleset */
1374 	if (rs == &pf_main_ruleset) {
1375 		error = pf_setup_pfsync_matching(rs);
1376 		if (error != 0)
1377 			return (error);
1378 	}
1379 
1380 	/* Swap rules, keep the old. */
1381 	old_rules = rs->rules[rs_num].active.ptr;
1382 	old_rcount = rs->rules[rs_num].active.rcount;
1383 	old_array = rs->rules[rs_num].active.ptr_array;
1384 	old_tree = rs->rules[rs_num].active.tree;
1385 
1386 	rs->rules[rs_num].active.ptr =
1387 	    rs->rules[rs_num].inactive.ptr;
1388 	rs->rules[rs_num].active.ptr_array =
1389 	    rs->rules[rs_num].inactive.ptr_array;
1390 	rs->rules[rs_num].active.tree =
1391 	    rs->rules[rs_num].inactive.tree;
1392 	rs->rules[rs_num].active.rcount =
1393 	    rs->rules[rs_num].inactive.rcount;
1394 
1395 	/* Attempt to preserve counter information. */
1396 	if (V_pf_status.keep_counters && old_tree != NULL) {
1397 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1398 		    entries) {
1399 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1400 			if (old_rule == NULL) {
1401 				continue;
1402 			}
1403 			pf_counter_u64_critical_enter();
1404 			pf_counter_u64_rollup_protected(&rule->evaluations,
1405 			    pf_counter_u64_fetch(&old_rule->evaluations));
1406 			pf_counter_u64_rollup_protected(&rule->packets[0],
1407 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1408 			pf_counter_u64_rollup_protected(&rule->packets[1],
1409 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1410 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1411 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1412 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1413 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1414 			pf_counter_u64_critical_exit();
1415 		}
1416 	}
1417 
1418 	rs->rules[rs_num].inactive.ptr = old_rules;
1419 	rs->rules[rs_num].inactive.ptr_array = old_array;
1420 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1421 	rs->rules[rs_num].inactive.rcount = old_rcount;
1422 
1423 	rs->rules[rs_num].active.ticket =
1424 	    rs->rules[rs_num].inactive.ticket;
1425 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1426 
1427 	/* Purge the old rule list. */
1428 	PF_UNLNKDRULES_LOCK();
1429 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1430 		pf_unlink_rule_locked(old_rules, rule);
1431 	PF_UNLNKDRULES_UNLOCK();
1432 	if (rs->rules[rs_num].inactive.ptr_array)
1433 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1434 	rs->rules[rs_num].inactive.ptr_array = NULL;
1435 	rs->rules[rs_num].inactive.rcount = 0;
1436 	rs->rules[rs_num].inactive.open = 0;
1437 	pf_remove_if_empty_kruleset(rs);
1438 	free(old_tree, M_TEMP);
1439 
1440 	return (0);
1441 }
1442 
1443 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1444 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1445 {
1446 	MD5_CTX			 ctx;
1447 	struct pf_krule		*rule;
1448 	int			 rs_cnt;
1449 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1450 
1451 	MD5Init(&ctx);
1452 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1453 		/* XXX PF_RULESET_SCRUB as well? */
1454 		if (rs_cnt == PF_RULESET_SCRUB)
1455 			continue;
1456 
1457 		if (rs->rules[rs_cnt].inactive.ptr_array)
1458 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1459 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1460 
1461 		if (rs->rules[rs_cnt].inactive.rcount) {
1462 			rs->rules[rs_cnt].inactive.ptr_array =
1463 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1464 			    sizeof(struct pf_rule **),
1465 			    M_TEMP, M_NOWAIT);
1466 
1467 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1468 				return (ENOMEM);
1469 		}
1470 
1471 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1472 		    entries) {
1473 			pf_hash_rule_rolling(&ctx, rule);
1474 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1475 		}
1476 	}
1477 
1478 	MD5Final(digest, &ctx);
1479 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1480 	return (0);
1481 }
1482 
1483 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1484 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1485 {
1486 	int error = 0;
1487 
1488 	switch (addr->type) {
1489 	case PF_ADDR_TABLE:
1490 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1491 		if (addr->p.tbl == NULL)
1492 			error = ENOMEM;
1493 		break;
1494 	default:
1495 		error = EINVAL;
1496 	}
1497 
1498 	return (error);
1499 }
1500 
1501 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1502 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1503     sa_family_t af)
1504 {
1505 	int error = 0;
1506 
1507 	switch (addr->type) {
1508 	case PF_ADDR_TABLE:
1509 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1510 		if (addr->p.tbl == NULL)
1511 			error = ENOMEM;
1512 		break;
1513 	case PF_ADDR_DYNIFTL:
1514 		error = pfi_dynaddr_setup(addr, af);
1515 		break;
1516 	}
1517 
1518 	return (error);
1519 }
1520 
1521 void
pf_addr_copyout(struct pf_addr_wrap * addr)1522 pf_addr_copyout(struct pf_addr_wrap *addr)
1523 {
1524 
1525 	switch (addr->type) {
1526 	case PF_ADDR_DYNIFTL:
1527 		pfi_dynaddr_copyout(addr);
1528 		break;
1529 	case PF_ADDR_TABLE:
1530 		pf_tbladdr_copyout(addr);
1531 		break;
1532 	}
1533 }
1534 
1535 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1536 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1537 {
1538 	int	secs = time_uptime, diff;
1539 
1540 	bzero(out, sizeof(struct pf_src_node));
1541 
1542 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1543 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1544 
1545 	if (in->rule != NULL)
1546 		out->rule.nr = in->rule->nr;
1547 
1548 	for (int i = 0; i < 2; i++) {
1549 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1550 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1551 	}
1552 
1553 	out->states = in->states;
1554 	out->conn = in->conn;
1555 	out->af = in->af;
1556 	out->ruletype = in->ruletype;
1557 
1558 	out->creation = secs - in->creation;
1559 	if (out->expire > secs)
1560 		out->expire -= secs;
1561 	else
1562 		out->expire = 0;
1563 
1564 	/* Adjust the connection rate estimate. */
1565 	out->conn_rate = in->conn_rate;
1566 	diff = secs - in->conn_rate.last;
1567 	if (diff >= in->conn_rate.seconds)
1568 		out->conn_rate.count = 0;
1569 	else
1570 		out->conn_rate.count -=
1571 		    in->conn_rate.count * diff /
1572 		    in->conn_rate.seconds;
1573 }
1574 
1575 #ifdef ALTQ
1576 /*
1577  * Handle export of struct pf_kaltq to user binaries that may be using any
1578  * version of struct pf_altq.
1579  */
1580 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1581 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1582 {
1583 	u_int32_t version;
1584 
1585 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1586 		version = 0;
1587 	else
1588 		version = pa->version;
1589 
1590 	if (version > PFIOC_ALTQ_VERSION)
1591 		return (EINVAL);
1592 
1593 #define ASSIGN(x) exported_q->x = q->x
1594 #define COPY(x) \
1595 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1596 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1597 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1598 
1599 	switch (version) {
1600 	case 0: {
1601 		struct pf_altq_v0 *exported_q =
1602 		    &((struct pfioc_altq_v0 *)pa)->altq;
1603 
1604 		COPY(ifname);
1605 
1606 		ASSIGN(scheduler);
1607 		ASSIGN(tbrsize);
1608 		exported_q->tbrsize = SATU16(q->tbrsize);
1609 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1610 
1611 		COPY(qname);
1612 		COPY(parent);
1613 		ASSIGN(parent_qid);
1614 		exported_q->bandwidth = SATU32(q->bandwidth);
1615 		ASSIGN(priority);
1616 		ASSIGN(local_flags);
1617 
1618 		ASSIGN(qlimit);
1619 		ASSIGN(flags);
1620 
1621 		if (q->scheduler == ALTQT_HFSC) {
1622 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1623 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1624 			    SATU32(q->pq_u.hfsc_opts.x)
1625 
1626 			ASSIGN_OPT_SATU32(rtsc_m1);
1627 			ASSIGN_OPT(rtsc_d);
1628 			ASSIGN_OPT_SATU32(rtsc_m2);
1629 
1630 			ASSIGN_OPT_SATU32(lssc_m1);
1631 			ASSIGN_OPT(lssc_d);
1632 			ASSIGN_OPT_SATU32(lssc_m2);
1633 
1634 			ASSIGN_OPT_SATU32(ulsc_m1);
1635 			ASSIGN_OPT(ulsc_d);
1636 			ASSIGN_OPT_SATU32(ulsc_m2);
1637 
1638 			ASSIGN_OPT(flags);
1639 
1640 #undef ASSIGN_OPT
1641 #undef ASSIGN_OPT_SATU32
1642 		} else
1643 			COPY(pq_u);
1644 
1645 		ASSIGN(qid);
1646 		break;
1647 	}
1648 	case 1:	{
1649 		struct pf_altq_v1 *exported_q =
1650 		    &((struct pfioc_altq_v1 *)pa)->altq;
1651 
1652 		COPY(ifname);
1653 
1654 		ASSIGN(scheduler);
1655 		ASSIGN(tbrsize);
1656 		ASSIGN(ifbandwidth);
1657 
1658 		COPY(qname);
1659 		COPY(parent);
1660 		ASSIGN(parent_qid);
1661 		ASSIGN(bandwidth);
1662 		ASSIGN(priority);
1663 		ASSIGN(local_flags);
1664 
1665 		ASSIGN(qlimit);
1666 		ASSIGN(flags);
1667 		COPY(pq_u);
1668 
1669 		ASSIGN(qid);
1670 		break;
1671 	}
1672 	default:
1673 		panic("%s: unhandled struct pfioc_altq version", __func__);
1674 		break;
1675 	}
1676 
1677 #undef ASSIGN
1678 #undef COPY
1679 #undef SATU16
1680 #undef SATU32
1681 
1682 	return (0);
1683 }
1684 
1685 /*
1686  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1687  * that may be using any version of it.
1688  */
1689 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1690 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1691 {
1692 	u_int32_t version;
1693 
1694 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1695 		version = 0;
1696 	else
1697 		version = pa->version;
1698 
1699 	if (version > PFIOC_ALTQ_VERSION)
1700 		return (EINVAL);
1701 
1702 #define ASSIGN(x) q->x = imported_q->x
1703 #define COPY(x) \
1704 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1705 
1706 	switch (version) {
1707 	case 0: {
1708 		struct pf_altq_v0 *imported_q =
1709 		    &((struct pfioc_altq_v0 *)pa)->altq;
1710 
1711 		COPY(ifname);
1712 
1713 		ASSIGN(scheduler);
1714 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1715 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1716 
1717 		COPY(qname);
1718 		COPY(parent);
1719 		ASSIGN(parent_qid);
1720 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1721 		ASSIGN(priority);
1722 		ASSIGN(local_flags);
1723 
1724 		ASSIGN(qlimit);
1725 		ASSIGN(flags);
1726 
1727 		if (imported_q->scheduler == ALTQT_HFSC) {
1728 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1729 
1730 			/*
1731 			 * The m1 and m2 parameters are being copied from
1732 			 * 32-bit to 64-bit.
1733 			 */
1734 			ASSIGN_OPT(rtsc_m1);
1735 			ASSIGN_OPT(rtsc_d);
1736 			ASSIGN_OPT(rtsc_m2);
1737 
1738 			ASSIGN_OPT(lssc_m1);
1739 			ASSIGN_OPT(lssc_d);
1740 			ASSIGN_OPT(lssc_m2);
1741 
1742 			ASSIGN_OPT(ulsc_m1);
1743 			ASSIGN_OPT(ulsc_d);
1744 			ASSIGN_OPT(ulsc_m2);
1745 
1746 			ASSIGN_OPT(flags);
1747 
1748 #undef ASSIGN_OPT
1749 		} else
1750 			COPY(pq_u);
1751 
1752 		ASSIGN(qid);
1753 		break;
1754 	}
1755 	case 1: {
1756 		struct pf_altq_v1 *imported_q =
1757 		    &((struct pfioc_altq_v1 *)pa)->altq;
1758 
1759 		COPY(ifname);
1760 
1761 		ASSIGN(scheduler);
1762 		ASSIGN(tbrsize);
1763 		ASSIGN(ifbandwidth);
1764 
1765 		COPY(qname);
1766 		COPY(parent);
1767 		ASSIGN(parent_qid);
1768 		ASSIGN(bandwidth);
1769 		ASSIGN(priority);
1770 		ASSIGN(local_flags);
1771 
1772 		ASSIGN(qlimit);
1773 		ASSIGN(flags);
1774 		COPY(pq_u);
1775 
1776 		ASSIGN(qid);
1777 		break;
1778 	}
1779 	default:
1780 		panic("%s: unhandled struct pfioc_altq version", __func__);
1781 		break;
1782 	}
1783 
1784 #undef ASSIGN
1785 #undef COPY
1786 
1787 	return (0);
1788 }
1789 
1790 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1791 pf_altq_get_nth_active(u_int32_t n)
1792 {
1793 	struct pf_altq		*altq;
1794 	u_int32_t		 nr;
1795 
1796 	nr = 0;
1797 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1798 		if (nr == n)
1799 			return (altq);
1800 		nr++;
1801 	}
1802 
1803 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1804 		if (nr == n)
1805 			return (altq);
1806 		nr++;
1807 	}
1808 
1809 	return (NULL);
1810 }
1811 #endif /* ALTQ */
1812 
1813 struct pf_krule *
pf_krule_alloc(void)1814 pf_krule_alloc(void)
1815 {
1816 	struct pf_krule *rule;
1817 
1818 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1819 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1820 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1821 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1822 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1823 	    M_WAITOK | M_ZERO);
1824 	return (rule);
1825 }
1826 
1827 void
pf_krule_free(struct pf_krule * rule)1828 pf_krule_free(struct pf_krule *rule)
1829 {
1830 #ifdef PF_WANT_32_TO_64_COUNTER
1831 	bool wowned;
1832 #endif
1833 
1834 	if (rule == NULL)
1835 		return;
1836 
1837 #ifdef PF_WANT_32_TO_64_COUNTER
1838 	if (rule->allrulelinked) {
1839 		wowned = PF_RULES_WOWNED();
1840 		if (!wowned)
1841 			PF_RULES_WLOCK();
1842 		LIST_REMOVE(rule, allrulelist);
1843 		V_pf_allrulecount--;
1844 		if (!wowned)
1845 			PF_RULES_WUNLOCK();
1846 	}
1847 #endif
1848 
1849 	pf_counter_u64_deinit(&rule->evaluations);
1850 	for (int i = 0; i < 2; i++) {
1851 		pf_counter_u64_deinit(&rule->packets[i]);
1852 		pf_counter_u64_deinit(&rule->bytes[i]);
1853 	}
1854 	counter_u64_free(rule->states_cur);
1855 	counter_u64_free(rule->states_tot);
1856 	counter_u64_free(rule->src_nodes);
1857 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1858 
1859 	mtx_destroy(&rule->nat.mtx);
1860 	mtx_destroy(&rule->rdr.mtx);
1861 	mtx_destroy(&rule->route.mtx);
1862 	free(rule, M_PFRULE);
1863 }
1864 
1865 void
pf_krule_clear_counters(struct pf_krule * rule)1866 pf_krule_clear_counters(struct pf_krule *rule)
1867 {
1868 	pf_counter_u64_zero(&rule->evaluations);
1869 	for (int i = 0; i < 2; i++) {
1870 		pf_counter_u64_zero(&rule->packets[i]);
1871 		pf_counter_u64_zero(&rule->bytes[i]);
1872 	}
1873 	counter_u64_zero(rule->states_tot);
1874 }
1875 
1876 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1877 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1878     struct pf_pooladdr *pool)
1879 {
1880 
1881 	bzero(pool, sizeof(*pool));
1882 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1883 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1884 }
1885 
1886 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1887 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1888     struct pf_kpooladdr *kpool)
1889 {
1890 	int ret;
1891 
1892 	bzero(kpool, sizeof(*kpool));
1893 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1894 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1895 	    sizeof(kpool->ifname));
1896 	return (ret);
1897 }
1898 
1899 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1900 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1901 {
1902 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1903 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1904 
1905 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1906 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1907 
1908 	kpool->tblidx = pool->tblidx;
1909 	kpool->proxy_port[0] = pool->proxy_port[0];
1910 	kpool->proxy_port[1] = pool->proxy_port[1];
1911 	kpool->opts = pool->opts;
1912 }
1913 
1914 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1915 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1916 {
1917 	int ret;
1918 
1919 #ifndef INET
1920 	if (rule->af == AF_INET) {
1921 		return (EAFNOSUPPORT);
1922 	}
1923 #endif /* INET */
1924 #ifndef INET6
1925 	if (rule->af == AF_INET6) {
1926 		return (EAFNOSUPPORT);
1927 	}
1928 #endif /* INET6 */
1929 
1930 	ret = pf_check_rule_addr(&rule->src);
1931 	if (ret != 0)
1932 		return (ret);
1933 	ret = pf_check_rule_addr(&rule->dst);
1934 	if (ret != 0)
1935 		return (ret);
1936 
1937 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1938 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1939 
1940 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1941 	if (ret != 0)
1942 		return (ret);
1943 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1944 	if (ret != 0)
1945 		return (ret);
1946 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1947 	if (ret != 0)
1948 		return (ret);
1949 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1950 	if (ret != 0)
1951 		return (ret);
1952 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1953 	    sizeof(rule->tagname));
1954 	if (ret != 0)
1955 		return (ret);
1956 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1957 	    sizeof(rule->match_tagname));
1958 	if (ret != 0)
1959 		return (ret);
1960 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1961 	    sizeof(rule->overload_tblname));
1962 	if (ret != 0)
1963 		return (ret);
1964 
1965 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1966 
1967 	/* Don't allow userspace to set evaluations, packets or bytes. */
1968 	/* kif, anchor, overload_tbl are not copied over. */
1969 
1970 	krule->os_fingerprint = rule->os_fingerprint;
1971 
1972 	krule->rtableid = rule->rtableid;
1973 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1974 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1975 	krule->max_states = rule->max_states;
1976 	krule->max_src_nodes = rule->max_src_nodes;
1977 	krule->max_src_states = rule->max_src_states;
1978 	krule->max_src_conn = rule->max_src_conn;
1979 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1980 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1981 	krule->qid = rule->qid;
1982 	krule->pqid = rule->pqid;
1983 	krule->nr = rule->nr;
1984 	krule->prob = rule->prob;
1985 	krule->cuid = rule->cuid;
1986 	krule->cpid = rule->cpid;
1987 
1988 	krule->return_icmp = rule->return_icmp;
1989 	krule->return_icmp6 = rule->return_icmp6;
1990 	krule->max_mss = rule->max_mss;
1991 	krule->tag = rule->tag;
1992 	krule->match_tag = rule->match_tag;
1993 	krule->scrub_flags = rule->scrub_flags;
1994 
1995 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1996 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1997 
1998 	krule->rule_flag = rule->rule_flag;
1999 	krule->action = rule->action;
2000 	krule->direction = rule->direction;
2001 	krule->log = rule->log;
2002 	krule->logif = rule->logif;
2003 	krule->quick = rule->quick;
2004 	krule->ifnot = rule->ifnot;
2005 	krule->match_tag_not = rule->match_tag_not;
2006 	krule->natpass = rule->natpass;
2007 
2008 	krule->keep_state = rule->keep_state;
2009 	krule->af = rule->af;
2010 	krule->proto = rule->proto;
2011 	krule->type = rule->type;
2012 	krule->code = rule->code;
2013 	krule->flags = rule->flags;
2014 	krule->flagset = rule->flagset;
2015 	krule->min_ttl = rule->min_ttl;
2016 	krule->allow_opts = rule->allow_opts;
2017 	krule->rt = rule->rt;
2018 	krule->return_ttl = rule->return_ttl;
2019 	krule->tos = rule->tos;
2020 	krule->set_tos = rule->set_tos;
2021 
2022 	krule->flush = rule->flush;
2023 	krule->prio = rule->prio;
2024 	krule->set_prio[0] = rule->set_prio[0];
2025 	krule->set_prio[1] = rule->set_prio[1];
2026 
2027 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2028 
2029 	return (0);
2030 }
2031 
2032 int
pf_ioctl_getrules(struct pfioc_rule * pr)2033 pf_ioctl_getrules(struct pfioc_rule *pr)
2034 {
2035 	struct pf_kruleset	*ruleset;
2036 	struct pf_krule		*tail;
2037 	int			 rs_num;
2038 
2039 	PF_RULES_WLOCK();
2040 	ruleset = pf_find_kruleset(pr->anchor);
2041 	if (ruleset == NULL) {
2042 		PF_RULES_WUNLOCK();
2043 		return (EINVAL);
2044 	}
2045 	rs_num = pf_get_ruleset_number(pr->rule.action);
2046 	if (rs_num >= PF_RULESET_MAX) {
2047 		PF_RULES_WUNLOCK();
2048 		return (EINVAL);
2049 	}
2050 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2051 	    pf_krulequeue);
2052 	if (tail)
2053 		pr->nr = tail->nr + 1;
2054 	else
2055 		pr->nr = 0;
2056 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2057 	PF_RULES_WUNLOCK();
2058 
2059 	return (0);
2060 }
2061 
2062 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2063 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2064     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2065     uid_t uid, pid_t pid)
2066 {
2067 	struct pf_kruleset	*ruleset;
2068 	struct pf_krule		*tail;
2069 	struct pf_kpooladdr	*pa;
2070 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2071 	int			 rs_num;
2072 	int			 error = 0;
2073 
2074 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2075 		error = EINVAL;
2076 		goto errout_unlocked;
2077 	}
2078 
2079 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2080 
2081 	if (rule->ifname[0])
2082 		kif = pf_kkif_create(M_WAITOK);
2083 	if (rule->rcv_ifname[0])
2084 		rcv_kif = pf_kkif_create(M_WAITOK);
2085 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2086 	for (int i = 0; i < 2; i++) {
2087 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2088 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2089 	}
2090 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2091 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2092 	rule->src_nodes = counter_u64_alloc(M_WAITOK);
2093 	rule->cuid = uid;
2094 	rule->cpid = pid;
2095 	TAILQ_INIT(&rule->rdr.list);
2096 	TAILQ_INIT(&rule->nat.list);
2097 	TAILQ_INIT(&rule->route.list);
2098 
2099 	PF_CONFIG_LOCK();
2100 	PF_RULES_WLOCK();
2101 #ifdef PF_WANT_32_TO_64_COUNTER
2102 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2103 	MPASS(!rule->allrulelinked);
2104 	rule->allrulelinked = true;
2105 	V_pf_allrulecount++;
2106 #endif
2107 	ruleset = pf_find_kruleset(anchor);
2108 	if (ruleset == NULL)
2109 		ERROUT(EINVAL);
2110 	rs_num = pf_get_ruleset_number(rule->action);
2111 	if (rs_num >= PF_RULESET_MAX)
2112 		ERROUT(EINVAL);
2113 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2114 		DPFPRINTF(PF_DEBUG_MISC,
2115 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2116 		    ruleset->rules[rs_num].inactive.ticket));
2117 		ERROUT(EBUSY);
2118 	}
2119 	if (pool_ticket != V_ticket_pabuf) {
2120 		DPFPRINTF(PF_DEBUG_MISC,
2121 		    ("pool_ticket: %d != %d\n", pool_ticket,
2122 		    V_ticket_pabuf));
2123 		ERROUT(EBUSY);
2124 	}
2125 	/*
2126 	 * XXXMJG hack: there is no mechanism to ensure they started the
2127 	 * transaction. Ticket checked above may happen to match by accident,
2128 	 * even if nobody called DIOCXBEGIN, let alone this process.
2129 	 * Partially work around it by checking if the RB tree got allocated,
2130 	 * see pf_begin_rules.
2131 	 */
2132 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2133 		ERROUT(EINVAL);
2134 	}
2135 
2136 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2137 	    pf_krulequeue);
2138 	if (tail)
2139 		rule->nr = tail->nr + 1;
2140 	else
2141 		rule->nr = 0;
2142 	if (rule->ifname[0]) {
2143 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2144 		kif = NULL;
2145 		pfi_kkif_ref(rule->kif);
2146 	} else
2147 		rule->kif = NULL;
2148 
2149 	if (rule->rcv_ifname[0]) {
2150 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2151 		rcv_kif = NULL;
2152 		pfi_kkif_ref(rule->rcv_kif);
2153 	} else
2154 		rule->rcv_kif = NULL;
2155 
2156 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2157 		error = EBUSY;
2158 
2159 #ifdef ALTQ
2160 	/* set queue IDs */
2161 	if (rule->qname[0] != 0) {
2162 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2163 			error = EBUSY;
2164 		else if (rule->pqname[0] != 0) {
2165 			if ((rule->pqid =
2166 			    pf_qname2qid(rule->pqname)) == 0)
2167 				error = EBUSY;
2168 		} else
2169 			rule->pqid = rule->qid;
2170 	}
2171 #endif
2172 	if (rule->tagname[0])
2173 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2174 			error = EBUSY;
2175 	if (rule->match_tagname[0])
2176 		if ((rule->match_tag =
2177 		    pf_tagname2tag(rule->match_tagname)) == 0)
2178 			error = EBUSY;
2179 	if (rule->rt && !rule->direction)
2180 		error = EINVAL;
2181 	if (!rule->log)
2182 		rule->logif = 0;
2183 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2184 		error = ENOMEM;
2185 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2186 		error = ENOMEM;
2187 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2188 		error = EINVAL;
2189 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2190 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2191 	    rule->set_prio[1] > PF_PRIO_MAX))
2192 		error = EINVAL;
2193 	for (int i = 0; i < 3; i++) {
2194 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2195 			if (pa->addr.type == PF_ADDR_TABLE) {
2196 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2197 				    pa->addr.v.tblname);
2198 				if (pa->addr.p.tbl == NULL)
2199 					error = ENOMEM;
2200 			}
2201 	}
2202 
2203 	rule->overload_tbl = NULL;
2204 	if (rule->overload_tblname[0]) {
2205 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2206 		    rule->overload_tblname)) == NULL)
2207 			error = EINVAL;
2208 		else
2209 			rule->overload_tbl->pfrkt_flags |=
2210 			    PFR_TFLAG_ACTIVE;
2211 	}
2212 
2213 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2214 	pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2215 	pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2216 	if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2217 	    (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
2218 	    (rule->rt > PF_NOPFROUTE)) &&
2219 	    (TAILQ_FIRST(&rule->rdr.list) == NULL &&
2220 	     TAILQ_FIRST(&rule->route.list) == NULL))
2221 		error = EINVAL;
2222 
2223 	if (rule->action == PF_PASS && rule->rdr.opts & PF_POOL_STICKYADDR &&
2224 	    !rule->keep_state) {
2225 		error = EINVAL;
2226 	}
2227 
2228 	if (error) {
2229 		pf_free_rule(rule);
2230 		rule = NULL;
2231 		ERROUT(error);
2232 	}
2233 
2234 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2235 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2236 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2237 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2238 	    rule, entries);
2239 	ruleset->rules[rs_num].inactive.rcount++;
2240 
2241 	PF_RULES_WUNLOCK();
2242 	pf_hash_rule(rule);
2243 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2244 		PF_RULES_WLOCK();
2245 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2246 		ruleset->rules[rs_num].inactive.rcount--;
2247 		pf_free_rule(rule);
2248 		rule = NULL;
2249 		ERROUT(EEXIST);
2250 	}
2251 	PF_CONFIG_UNLOCK();
2252 
2253 	return (0);
2254 
2255 #undef ERROUT
2256 errout:
2257 	PF_RULES_WUNLOCK();
2258 	PF_CONFIG_UNLOCK();
2259 errout_unlocked:
2260 	pf_kkif_free(rcv_kif);
2261 	pf_kkif_free(kif);
2262 	pf_krule_free(rule);
2263 	return (error);
2264 }
2265 
2266 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2267 pf_label_match(const struct pf_krule *rule, const char *label)
2268 {
2269 	int i = 0;
2270 
2271 	while (*rule->label[i]) {
2272 		if (strcmp(rule->label[i], label) == 0)
2273 			return (true);
2274 		i++;
2275 	}
2276 
2277 	return (false);
2278 }
2279 
2280 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2281 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2282 {
2283 	struct pf_kstate *s;
2284 	int more = 0;
2285 
2286 	s = pf_find_state_all(key, dir, &more);
2287 	if (s == NULL)
2288 		return (0);
2289 
2290 	if (more) {
2291 		PF_STATE_UNLOCK(s);
2292 		return (0);
2293 	}
2294 
2295 	pf_unlink_state(s);
2296 	return (1);
2297 }
2298 
2299 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2300 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2301 {
2302 	struct pf_kstate	*s;
2303 	struct pf_state_key	*sk;
2304 	struct pf_addr		*srcaddr, *dstaddr;
2305 	struct pf_state_key_cmp	 match_key;
2306 	int			 idx, killed = 0;
2307 	unsigned int		 dir;
2308 	u_int16_t		 srcport, dstport;
2309 	struct pfi_kkif		*kif;
2310 
2311 relock_DIOCKILLSTATES:
2312 	PF_HASHROW_LOCK(ih);
2313 	LIST_FOREACH(s, &ih->states, entry) {
2314 		/* For floating states look at the original kif. */
2315 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2316 
2317 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2318 		if (s->direction == PF_OUT) {
2319 			srcaddr = &sk->addr[1];
2320 			dstaddr = &sk->addr[0];
2321 			srcport = sk->port[1];
2322 			dstport = sk->port[0];
2323 		} else {
2324 			srcaddr = &sk->addr[0];
2325 			dstaddr = &sk->addr[1];
2326 			srcport = sk->port[0];
2327 			dstport = sk->port[1];
2328 		}
2329 
2330 		if (psk->psk_af && sk->af != psk->psk_af)
2331 			continue;
2332 
2333 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2334 			continue;
2335 
2336 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2337 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2338 			continue;
2339 
2340 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2341 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2342 			continue;
2343 
2344 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2345 		    &psk->psk_rt_addr.addr.v.a.addr,
2346 		    &psk->psk_rt_addr.addr.v.a.mask,
2347 		    &s->act.rt_addr, sk->af))
2348 			continue;
2349 
2350 		if (psk->psk_src.port_op != 0 &&
2351 		    ! pf_match_port(psk->psk_src.port_op,
2352 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2353 			continue;
2354 
2355 		if (psk->psk_dst.port_op != 0 &&
2356 		    ! pf_match_port(psk->psk_dst.port_op,
2357 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2358 			continue;
2359 
2360 		if (psk->psk_label[0] &&
2361 		    ! pf_label_match(s->rule, psk->psk_label))
2362 			continue;
2363 
2364 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2365 		    kif->pfik_name))
2366 			continue;
2367 
2368 		if (psk->psk_kill_match) {
2369 			/* Create the key to find matching states, with lock
2370 			 * held. */
2371 
2372 			bzero(&match_key, sizeof(match_key));
2373 
2374 			if (s->direction == PF_OUT) {
2375 				dir = PF_IN;
2376 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2377 			} else {
2378 				dir = PF_OUT;
2379 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2380 			}
2381 
2382 			match_key.af = s->key[idx]->af;
2383 			match_key.proto = s->key[idx]->proto;
2384 			PF_ACPY(&match_key.addr[0],
2385 			    &s->key[idx]->addr[1], match_key.af);
2386 			match_key.port[0] = s->key[idx]->port[1];
2387 			PF_ACPY(&match_key.addr[1],
2388 			    &s->key[idx]->addr[0], match_key.af);
2389 			match_key.port[1] = s->key[idx]->port[0];
2390 		}
2391 
2392 		pf_unlink_state(s);
2393 		killed++;
2394 
2395 		if (psk->psk_kill_match)
2396 			killed += pf_kill_matching_state(&match_key, dir);
2397 
2398 		goto relock_DIOCKILLSTATES;
2399 	}
2400 	PF_HASHROW_UNLOCK(ih);
2401 
2402 	return (killed);
2403 }
2404 
2405 int
pf_start(void)2406 pf_start(void)
2407 {
2408 	int error = 0;
2409 
2410 	sx_xlock(&V_pf_ioctl_lock);
2411 	if (V_pf_status.running)
2412 		error = EEXIST;
2413 	else {
2414 		hook_pf();
2415 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2416 			hook_pf_eth();
2417 		V_pf_status.running = 1;
2418 		V_pf_status.since = time_second;
2419 		new_unrhdr64(&V_pf_stateid, time_second);
2420 
2421 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2422 	}
2423 	sx_xunlock(&V_pf_ioctl_lock);
2424 
2425 	return (error);
2426 }
2427 
2428 int
pf_stop(void)2429 pf_stop(void)
2430 {
2431 	int error = 0;
2432 
2433 	sx_xlock(&V_pf_ioctl_lock);
2434 	if (!V_pf_status.running)
2435 		error = ENOENT;
2436 	else {
2437 		V_pf_status.running = 0;
2438 		dehook_pf();
2439 		dehook_pf_eth();
2440 		V_pf_status.since = time_second;
2441 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2442 	}
2443 	sx_xunlock(&V_pf_ioctl_lock);
2444 
2445 	return (error);
2446 }
2447 
2448 void
pf_ioctl_clear_status(void)2449 pf_ioctl_clear_status(void)
2450 {
2451 	PF_RULES_WLOCK();
2452 	for (int i = 0; i < PFRES_MAX; i++)
2453 		counter_u64_zero(V_pf_status.counters[i]);
2454 	for (int i = 0; i < FCNT_MAX; i++)
2455 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2456 	for (int i = 0; i < SCNT_MAX; i++)
2457 		counter_u64_zero(V_pf_status.scounters[i]);
2458 	for (int i = 0; i < KLCNT_MAX; i++)
2459 		counter_u64_zero(V_pf_status.lcounters[i]);
2460 	V_pf_status.since = time_second;
2461 	if (*V_pf_status.ifname)
2462 		pfi_update_status(V_pf_status.ifname, NULL);
2463 	PF_RULES_WUNLOCK();
2464 }
2465 
2466 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2467 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2468 {
2469 	uint32_t old;
2470 
2471 	if (timeout < 0 || timeout >= PFTM_MAX ||
2472 	    seconds < 0)
2473 		return (EINVAL);
2474 
2475 	PF_RULES_WLOCK();
2476 	old = V_pf_default_rule.timeout[timeout];
2477 	if (timeout == PFTM_INTERVAL && seconds == 0)
2478 		seconds = 1;
2479 	V_pf_default_rule.timeout[timeout] = seconds;
2480 	if (timeout == PFTM_INTERVAL && seconds < old)
2481 		wakeup(pf_purge_thread);
2482 
2483 	if (prev_seconds != NULL)
2484 		*prev_seconds = old;
2485 
2486 	PF_RULES_WUNLOCK();
2487 
2488 	return (0);
2489 }
2490 
2491 int
pf_ioctl_get_timeout(int timeout,int * seconds)2492 pf_ioctl_get_timeout(int timeout, int *seconds)
2493 {
2494 	PF_RULES_RLOCK_TRACKER;
2495 
2496 	if (timeout < 0 || timeout >= PFTM_MAX)
2497 		return (EINVAL);
2498 
2499 	PF_RULES_RLOCK();
2500 	*seconds = V_pf_default_rule.timeout[timeout];
2501 	PF_RULES_RUNLOCK();
2502 
2503 	return (0);
2504 }
2505 
2506 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2507 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2508 {
2509 
2510 	PF_RULES_WLOCK();
2511 	if (index < 0 || index >= PF_LIMIT_MAX ||
2512 	    V_pf_limits[index].zone == NULL) {
2513 		PF_RULES_WUNLOCK();
2514 		return (EINVAL);
2515 	}
2516 	uma_zone_set_max(V_pf_limits[index].zone, limit);
2517 	if (old_limit != NULL)
2518 		*old_limit = V_pf_limits[index].limit;
2519 	V_pf_limits[index].limit = limit;
2520 	PF_RULES_WUNLOCK();
2521 
2522 	return (0);
2523 }
2524 
2525 int
pf_ioctl_get_limit(int index,unsigned int * limit)2526 pf_ioctl_get_limit(int index, unsigned int *limit)
2527 {
2528 	PF_RULES_RLOCK_TRACKER;
2529 
2530 	if (index < 0 || index >= PF_LIMIT_MAX)
2531 		return (EINVAL);
2532 
2533 	PF_RULES_RLOCK();
2534 	*limit = V_pf_limits[index].limit;
2535 	PF_RULES_RUNLOCK();
2536 
2537 	return (0);
2538 }
2539 
2540 int
pf_ioctl_begin_addrs(uint32_t * ticket)2541 pf_ioctl_begin_addrs(uint32_t *ticket)
2542 {
2543 	PF_RULES_WLOCK();
2544 	pf_empty_kpool(&V_pf_pabuf[0]);
2545 	pf_empty_kpool(&V_pf_pabuf[1]);
2546 	pf_empty_kpool(&V_pf_pabuf[2]);
2547 	*ticket = ++V_ticket_pabuf;
2548 	PF_RULES_WUNLOCK();
2549 
2550 	return (0);
2551 }
2552 
2553 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2554 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2555 {
2556 	struct pf_kpooladdr	*pa = NULL;
2557 	struct pfi_kkif		*kif = NULL;
2558 	int error;
2559 
2560 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2561 	    pp->which != PF_RT)
2562 		return (EINVAL);
2563 
2564 #ifndef INET
2565 	if (pp->af == AF_INET)
2566 		return (EAFNOSUPPORT);
2567 #endif /* INET */
2568 #ifndef INET6
2569 	if (pp->af == AF_INET6)
2570 		return (EAFNOSUPPORT);
2571 #endif /* INET6 */
2572 
2573 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2574 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2575 	    pp->addr.addr.type != PF_ADDR_TABLE)
2576 		return (EINVAL);
2577 
2578 	if (pp->addr.addr.p.dyn != NULL)
2579 		return (EINVAL);
2580 
2581 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2582 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2583 	if (error != 0)
2584 		goto out;
2585 	if (pa->ifname[0])
2586 		kif = pf_kkif_create(M_WAITOK);
2587 	PF_RULES_WLOCK();
2588 	if (pp->ticket != V_ticket_pabuf) {
2589 		PF_RULES_WUNLOCK();
2590 		if (pa->ifname[0])
2591 			pf_kkif_free(kif);
2592 		error = EBUSY;
2593 		goto out;
2594 	}
2595 	if (pa->ifname[0]) {
2596 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2597 		kif = NULL;
2598 		pfi_kkif_ref(pa->kif);
2599 	} else
2600 		pa->kif = NULL;
2601 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2602 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2603 		if (pa->ifname[0])
2604 			pfi_kkif_unref(pa->kif);
2605 		PF_RULES_WUNLOCK();
2606 		goto out;
2607 	}
2608 	switch (pp->which) {
2609 	case PF_NAT:
2610 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2611 		break;
2612 	case PF_RDR:
2613 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2614 		break;
2615 	case PF_RT:
2616 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2617 		break;
2618 	}
2619 	PF_RULES_WUNLOCK();
2620 
2621 	return (0);
2622 
2623 out:
2624 	free(pa, M_PFRULE);
2625 	return (error);
2626 }
2627 
2628 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2629 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2630 {
2631 	struct pf_kpool		*pool;
2632 	struct pf_kpooladdr	*pa;
2633 
2634 	PF_RULES_RLOCK_TRACKER;
2635 
2636 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2637 	    pp->which != PF_RT)
2638 		return (EINVAL);
2639 
2640 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2641 	pp->nr = 0;
2642 
2643 	PF_RULES_RLOCK();
2644 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2645 	    pp->r_num, 0, 1, 0, pp->which);
2646 	if (pool == NULL) {
2647 		PF_RULES_RUNLOCK();
2648 		return (EBUSY);
2649 	}
2650 	TAILQ_FOREACH(pa, &pool->list, entries)
2651 		pp->nr++;
2652 	PF_RULES_RUNLOCK();
2653 
2654 	return (0);
2655 }
2656 
2657 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2658 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2659 {
2660 	struct pf_kpool		*pool;
2661 	struct pf_kpooladdr	*pa;
2662 	u_int32_t		 nr = 0;
2663 
2664 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2665 	    pp->which != PF_RT)
2666 		return (EINVAL);
2667 
2668 	PF_RULES_RLOCK_TRACKER;
2669 
2670 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2671 
2672 	PF_RULES_RLOCK();
2673 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2674 	    pp->r_num, 0, 1, 1, pp->which);
2675 	if (pool == NULL) {
2676 		PF_RULES_RUNLOCK();
2677 		return (EBUSY);
2678 	}
2679 	pa = TAILQ_FIRST(&pool->list);
2680 	while ((pa != NULL) && (nr < pp->nr)) {
2681 		pa = TAILQ_NEXT(pa, entries);
2682 		nr++;
2683 	}
2684 	if (pa == NULL) {
2685 		PF_RULES_RUNLOCK();
2686 		return (EBUSY);
2687 	}
2688 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2689 	pf_addr_copyout(&pp->addr.addr);
2690 	PF_RULES_RUNLOCK();
2691 
2692 	return (0);
2693 }
2694 
2695 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2696 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2697 {
2698 	struct pf_kruleset	*ruleset;
2699 	struct pf_kanchor	*anchor;
2700 
2701 	PF_RULES_RLOCK_TRACKER;
2702 
2703 	pr->path[sizeof(pr->path) - 1] = 0;
2704 
2705 	PF_RULES_RLOCK();
2706 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2707 		PF_RULES_RUNLOCK();
2708 		return (ENOENT);
2709 	}
2710 	pr->nr = 0;
2711 	if (ruleset->anchor == NULL) {
2712 		/* XXX kludge for pf_main_ruleset */
2713 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2714 			if (anchor->parent == NULL)
2715 				pr->nr++;
2716 	} else {
2717 		RB_FOREACH(anchor, pf_kanchor_node,
2718 		    &ruleset->anchor->children)
2719 			pr->nr++;
2720 	}
2721 	PF_RULES_RUNLOCK();
2722 
2723 	return (0);
2724 }
2725 
2726 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2727 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2728 {
2729 	struct pf_kruleset	*ruleset;
2730 	struct pf_kanchor	*anchor;
2731 	u_int32_t		 nr = 0;
2732 	int			 error = 0;
2733 
2734 	PF_RULES_RLOCK_TRACKER;
2735 
2736 	PF_RULES_RLOCK();
2737 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2738 		PF_RULES_RUNLOCK();
2739 		return (ENOENT);
2740 	}
2741 
2742 	pr->name[0] = 0;
2743 	if (ruleset->anchor == NULL) {
2744 		/* XXX kludge for pf_main_ruleset */
2745 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2746 			if (anchor->parent == NULL && nr++ == pr->nr) {
2747 				strlcpy(pr->name, anchor->name,
2748 				    sizeof(pr->name));
2749 				break;
2750 			}
2751 	} else {
2752 		RB_FOREACH(anchor, pf_kanchor_node,
2753 		    &ruleset->anchor->children)
2754 			if (nr++ == pr->nr) {
2755 				strlcpy(pr->name, anchor->name,
2756 				    sizeof(pr->name));
2757 				break;
2758 			}
2759 	}
2760 	if (!pr->name[0])
2761 		error = EBUSY;
2762 	PF_RULES_RUNLOCK();
2763 
2764 	return (error);
2765 }
2766 
2767 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2768 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2769 {
2770 	int			 error = 0;
2771 	PF_RULES_RLOCK_TRACKER;
2772 
2773 #define	ERROUT_IOCTL(target, x)					\
2774     do {								\
2775 	    error = (x);						\
2776 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2777 	    goto target;						\
2778     } while (0)
2779 
2780 
2781 	/* XXX keep in sync with switch() below */
2782 	if (securelevel_gt(td->td_ucred, 2))
2783 		switch (cmd) {
2784 		case DIOCGETRULES:
2785 		case DIOCGETRULENV:
2786 		case DIOCGETADDRS:
2787 		case DIOCGETADDR:
2788 		case DIOCGETSTATE:
2789 		case DIOCGETSTATENV:
2790 		case DIOCSETSTATUSIF:
2791 		case DIOCGETSTATUSNV:
2792 		case DIOCCLRSTATUS:
2793 		case DIOCNATLOOK:
2794 		case DIOCSETDEBUG:
2795 #ifdef COMPAT_FREEBSD14
2796 		case DIOCGETSTATES:
2797 		case DIOCGETSTATESV2:
2798 #endif
2799 		case DIOCGETTIMEOUT:
2800 		case DIOCCLRRULECTRS:
2801 		case DIOCGETLIMIT:
2802 		case DIOCGETALTQSV0:
2803 		case DIOCGETALTQSV1:
2804 		case DIOCGETALTQV0:
2805 		case DIOCGETALTQV1:
2806 		case DIOCGETQSTATSV0:
2807 		case DIOCGETQSTATSV1:
2808 		case DIOCGETRULESETS:
2809 		case DIOCGETRULESET:
2810 		case DIOCRGETTABLES:
2811 		case DIOCRGETTSTATS:
2812 		case DIOCRCLRTSTATS:
2813 		case DIOCRCLRADDRS:
2814 		case DIOCRADDADDRS:
2815 		case DIOCRDELADDRS:
2816 		case DIOCRSETADDRS:
2817 		case DIOCRGETADDRS:
2818 		case DIOCRGETASTATS:
2819 		case DIOCRCLRASTATS:
2820 		case DIOCRTSTADDRS:
2821 		case DIOCOSFPGET:
2822 		case DIOCGETSRCNODES:
2823 		case DIOCCLRSRCNODES:
2824 		case DIOCGETSYNCOOKIES:
2825 		case DIOCIGETIFACES:
2826 		case DIOCGIFSPEEDV0:
2827 		case DIOCGIFSPEEDV1:
2828 		case DIOCSETIFFLAG:
2829 		case DIOCCLRIFFLAG:
2830 		case DIOCGETETHRULES:
2831 		case DIOCGETETHRULE:
2832 		case DIOCGETETHRULESETS:
2833 		case DIOCGETETHRULESET:
2834 			break;
2835 		case DIOCRCLRTABLES:
2836 		case DIOCRADDTABLES:
2837 		case DIOCRDELTABLES:
2838 		case DIOCRSETTFLAGS:
2839 			if (((struct pfioc_table *)addr)->pfrio_flags &
2840 			    PFR_FLAG_DUMMY)
2841 				break; /* dummy operation ok */
2842 			return (EPERM);
2843 		default:
2844 			return (EPERM);
2845 		}
2846 
2847 	if (!(flags & FWRITE))
2848 		switch (cmd) {
2849 		case DIOCGETRULES:
2850 		case DIOCGETADDRS:
2851 		case DIOCGETADDR:
2852 		case DIOCGETSTATE:
2853 		case DIOCGETSTATENV:
2854 		case DIOCGETSTATUSNV:
2855 #ifdef COMPAT_FREEBSD14
2856 		case DIOCGETSTATES:
2857 		case DIOCGETSTATESV2:
2858 #endif
2859 		case DIOCGETTIMEOUT:
2860 		case DIOCGETLIMIT:
2861 		case DIOCGETALTQSV0:
2862 		case DIOCGETALTQSV1:
2863 		case DIOCGETALTQV0:
2864 		case DIOCGETALTQV1:
2865 		case DIOCGETQSTATSV0:
2866 		case DIOCGETQSTATSV1:
2867 		case DIOCGETRULESETS:
2868 		case DIOCGETRULESET:
2869 		case DIOCNATLOOK:
2870 		case DIOCRGETTABLES:
2871 		case DIOCRGETTSTATS:
2872 		case DIOCRGETADDRS:
2873 		case DIOCRGETASTATS:
2874 		case DIOCRTSTADDRS:
2875 		case DIOCOSFPGET:
2876 		case DIOCGETSRCNODES:
2877 		case DIOCGETSYNCOOKIES:
2878 		case DIOCIGETIFACES:
2879 		case DIOCGIFSPEEDV1:
2880 		case DIOCGIFSPEEDV0:
2881 		case DIOCGETRULENV:
2882 		case DIOCGETETHRULES:
2883 		case DIOCGETETHRULE:
2884 		case DIOCGETETHRULESETS:
2885 		case DIOCGETETHRULESET:
2886 			break;
2887 		case DIOCRCLRTABLES:
2888 		case DIOCRADDTABLES:
2889 		case DIOCRDELTABLES:
2890 		case DIOCRCLRTSTATS:
2891 		case DIOCRCLRADDRS:
2892 		case DIOCRADDADDRS:
2893 		case DIOCRDELADDRS:
2894 		case DIOCRSETADDRS:
2895 		case DIOCRSETTFLAGS:
2896 			if (((struct pfioc_table *)addr)->pfrio_flags &
2897 			    PFR_FLAG_DUMMY) {
2898 				flags |= FWRITE; /* need write lock for dummy */
2899 				break; /* dummy operation ok */
2900 			}
2901 			return (EACCES);
2902 		default:
2903 			return (EACCES);
2904 		}
2905 
2906 	CURVNET_SET(TD_TO_VNET(td));
2907 
2908 	switch (cmd) {
2909 #ifdef COMPAT_FREEBSD14
2910 	case DIOCSTART:
2911 		error = pf_start();
2912 		break;
2913 
2914 	case DIOCSTOP:
2915 		error = pf_stop();
2916 		break;
2917 #endif
2918 
2919 	case DIOCGETETHRULES: {
2920 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2921 		nvlist_t		*nvl;
2922 		void			*packed;
2923 		struct pf_keth_rule	*tail;
2924 		struct pf_keth_ruleset	*rs;
2925 		u_int32_t		 ticket, nr;
2926 		const char		*anchor = "";
2927 
2928 		nvl = NULL;
2929 		packed = NULL;
2930 
2931 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2932 
2933 		if (nv->len > pf_ioctl_maxcount)
2934 			ERROUT(ENOMEM);
2935 
2936 		/* Copy the request in */
2937 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2938 		error = copyin(nv->data, packed, nv->len);
2939 		if (error)
2940 			ERROUT(error);
2941 
2942 		nvl = nvlist_unpack(packed, nv->len, 0);
2943 		if (nvl == NULL)
2944 			ERROUT(EBADMSG);
2945 
2946 		if (! nvlist_exists_string(nvl, "anchor"))
2947 			ERROUT(EBADMSG);
2948 
2949 		anchor = nvlist_get_string(nvl, "anchor");
2950 
2951 		rs = pf_find_keth_ruleset(anchor);
2952 
2953 		nvlist_destroy(nvl);
2954 		nvl = NULL;
2955 		free(packed, M_NVLIST);
2956 		packed = NULL;
2957 
2958 		if (rs == NULL)
2959 			ERROUT(ENOENT);
2960 
2961 		/* Reply */
2962 		nvl = nvlist_create(0);
2963 		if (nvl == NULL)
2964 			ERROUT(ENOMEM);
2965 
2966 		PF_RULES_RLOCK();
2967 
2968 		ticket = rs->active.ticket;
2969 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
2970 		if (tail)
2971 			nr = tail->nr + 1;
2972 		else
2973 			nr = 0;
2974 
2975 		PF_RULES_RUNLOCK();
2976 
2977 		nvlist_add_number(nvl, "ticket", ticket);
2978 		nvlist_add_number(nvl, "nr", nr);
2979 
2980 		packed = nvlist_pack(nvl, &nv->len);
2981 		if (packed == NULL)
2982 			ERROUT(ENOMEM);
2983 
2984 		if (nv->size == 0)
2985 			ERROUT(0);
2986 		else if (nv->size < nv->len)
2987 			ERROUT(ENOSPC);
2988 
2989 		error = copyout(packed, nv->data, nv->len);
2990 
2991 #undef ERROUT
2992 DIOCGETETHRULES_error:
2993 		free(packed, M_NVLIST);
2994 		nvlist_destroy(nvl);
2995 		break;
2996 	}
2997 
2998 	case DIOCGETETHRULE: {
2999 		struct epoch_tracker	 et;
3000 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3001 		nvlist_t		*nvl = NULL;
3002 		void			*nvlpacked = NULL;
3003 		struct pf_keth_rule	*rule = NULL;
3004 		struct pf_keth_ruleset	*rs;
3005 		u_int32_t		 ticket, nr;
3006 		bool			 clear = false;
3007 		const char		*anchor;
3008 
3009 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3010 
3011 		if (nv->len > pf_ioctl_maxcount)
3012 			ERROUT(ENOMEM);
3013 
3014 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3015 		error = copyin(nv->data, nvlpacked, nv->len);
3016 		if (error)
3017 			ERROUT(error);
3018 
3019 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3020 		if (nvl == NULL)
3021 			ERROUT(EBADMSG);
3022 		if (! nvlist_exists_number(nvl, "ticket"))
3023 			ERROUT(EBADMSG);
3024 		ticket = nvlist_get_number(nvl, "ticket");
3025 		if (! nvlist_exists_string(nvl, "anchor"))
3026 			ERROUT(EBADMSG);
3027 		anchor = nvlist_get_string(nvl, "anchor");
3028 
3029 		if (nvlist_exists_bool(nvl, "clear"))
3030 			clear = nvlist_get_bool(nvl, "clear");
3031 
3032 		if (clear && !(flags & FWRITE))
3033 			ERROUT(EACCES);
3034 
3035 		if (! nvlist_exists_number(nvl, "nr"))
3036 			ERROUT(EBADMSG);
3037 		nr = nvlist_get_number(nvl, "nr");
3038 
3039 		PF_RULES_RLOCK();
3040 		rs = pf_find_keth_ruleset(anchor);
3041 		if (rs == NULL) {
3042 			PF_RULES_RUNLOCK();
3043 			ERROUT(ENOENT);
3044 		}
3045 		if (ticket != rs->active.ticket) {
3046 			PF_RULES_RUNLOCK();
3047 			ERROUT(EBUSY);
3048 		}
3049 
3050 		nvlist_destroy(nvl);
3051 		nvl = NULL;
3052 		free(nvlpacked, M_NVLIST);
3053 		nvlpacked = NULL;
3054 
3055 		rule = TAILQ_FIRST(rs->active.rules);
3056 		while ((rule != NULL) && (rule->nr != nr))
3057 			rule = TAILQ_NEXT(rule, entries);
3058 		if (rule == NULL) {
3059 			PF_RULES_RUNLOCK();
3060 			ERROUT(ENOENT);
3061 		}
3062 		/* Make sure rule can't go away. */
3063 		NET_EPOCH_ENTER(et);
3064 		PF_RULES_RUNLOCK();
3065 		nvl = pf_keth_rule_to_nveth_rule(rule);
3066 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3067 			NET_EPOCH_EXIT(et);
3068 			ERROUT(EBUSY);
3069 		}
3070 		NET_EPOCH_EXIT(et);
3071 		if (nvl == NULL)
3072 			ERROUT(ENOMEM);
3073 
3074 		nvlpacked = nvlist_pack(nvl, &nv->len);
3075 		if (nvlpacked == NULL)
3076 			ERROUT(ENOMEM);
3077 
3078 		if (nv->size == 0)
3079 			ERROUT(0);
3080 		else if (nv->size < nv->len)
3081 			ERROUT(ENOSPC);
3082 
3083 		error = copyout(nvlpacked, nv->data, nv->len);
3084 		if (error == 0 && clear) {
3085 			counter_u64_zero(rule->evaluations);
3086 			for (int i = 0; i < 2; i++) {
3087 				counter_u64_zero(rule->packets[i]);
3088 				counter_u64_zero(rule->bytes[i]);
3089 			}
3090 		}
3091 
3092 #undef ERROUT
3093 DIOCGETETHRULE_error:
3094 		free(nvlpacked, M_NVLIST);
3095 		nvlist_destroy(nvl);
3096 		break;
3097 	}
3098 
3099 	case DIOCADDETHRULE: {
3100 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3101 		nvlist_t		*nvl = NULL;
3102 		void			*nvlpacked = NULL;
3103 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3104 		struct pf_keth_ruleset	*ruleset = NULL;
3105 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3106 		const char		*anchor = "", *anchor_call = "";
3107 
3108 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3109 
3110 		if (nv->len > pf_ioctl_maxcount)
3111 			ERROUT(ENOMEM);
3112 
3113 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3114 		error = copyin(nv->data, nvlpacked, nv->len);
3115 		if (error)
3116 			ERROUT(error);
3117 
3118 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3119 		if (nvl == NULL)
3120 			ERROUT(EBADMSG);
3121 
3122 		if (! nvlist_exists_number(nvl, "ticket"))
3123 			ERROUT(EBADMSG);
3124 
3125 		if (nvlist_exists_string(nvl, "anchor"))
3126 			anchor = nvlist_get_string(nvl, "anchor");
3127 		if (nvlist_exists_string(nvl, "anchor_call"))
3128 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3129 
3130 		ruleset = pf_find_keth_ruleset(anchor);
3131 		if (ruleset == NULL)
3132 			ERROUT(EINVAL);
3133 
3134 		if (nvlist_get_number(nvl, "ticket") !=
3135 		    ruleset->inactive.ticket) {
3136 			DPFPRINTF(PF_DEBUG_MISC,
3137 			    ("ticket: %d != %d\n",
3138 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3139 			    ruleset->inactive.ticket));
3140 			ERROUT(EBUSY);
3141 		}
3142 
3143 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3144 		rule->timestamp = NULL;
3145 
3146 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3147 		if (error != 0)
3148 			ERROUT(error);
3149 
3150 		if (rule->ifname[0])
3151 			kif = pf_kkif_create(M_WAITOK);
3152 		if (rule->bridge_to_name[0])
3153 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3154 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3155 		for (int i = 0; i < 2; i++) {
3156 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3157 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3158 		}
3159 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3160 		    M_WAITOK | M_ZERO);
3161 
3162 		PF_RULES_WLOCK();
3163 
3164 		if (rule->ifname[0]) {
3165 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3166 			pfi_kkif_ref(rule->kif);
3167 		} else
3168 			rule->kif = NULL;
3169 		if (rule->bridge_to_name[0]) {
3170 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3171 			    rule->bridge_to_name);
3172 			pfi_kkif_ref(rule->bridge_to);
3173 		} else
3174 			rule->bridge_to = NULL;
3175 
3176 #ifdef ALTQ
3177 		/* set queue IDs */
3178 		if (rule->qname[0] != 0) {
3179 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3180 				error = EBUSY;
3181 			else
3182 				rule->qid = rule->qid;
3183 		}
3184 #endif
3185 		if (rule->tagname[0])
3186 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3187 				error = EBUSY;
3188 		if (rule->match_tagname[0])
3189 			if ((rule->match_tag = pf_tagname2tag(
3190 			    rule->match_tagname)) == 0)
3191 				error = EBUSY;
3192 
3193 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3194 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3195 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3196 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3197 
3198 		if (error) {
3199 			pf_free_eth_rule(rule);
3200 			PF_RULES_WUNLOCK();
3201 			ERROUT(error);
3202 		}
3203 
3204 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3205 			pf_free_eth_rule(rule);
3206 			PF_RULES_WUNLOCK();
3207 			ERROUT(EINVAL);
3208 		}
3209 
3210 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3211 		if (tail)
3212 			rule->nr = tail->nr + 1;
3213 		else
3214 			rule->nr = 0;
3215 
3216 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3217 
3218 		PF_RULES_WUNLOCK();
3219 
3220 #undef ERROUT
3221 DIOCADDETHRULE_error:
3222 		nvlist_destroy(nvl);
3223 		free(nvlpacked, M_NVLIST);
3224 		break;
3225 	}
3226 
3227 	case DIOCGETETHRULESETS: {
3228 		struct epoch_tracker	 et;
3229 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3230 		nvlist_t		*nvl = NULL;
3231 		void			*nvlpacked = NULL;
3232 		struct pf_keth_ruleset	*ruleset;
3233 		struct pf_keth_anchor	*anchor;
3234 		int			 nr = 0;
3235 
3236 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3237 
3238 		if (nv->len > pf_ioctl_maxcount)
3239 			ERROUT(ENOMEM);
3240 
3241 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3242 		error = copyin(nv->data, nvlpacked, nv->len);
3243 		if (error)
3244 			ERROUT(error);
3245 
3246 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3247 		if (nvl == NULL)
3248 			ERROUT(EBADMSG);
3249 		if (! nvlist_exists_string(nvl, "path"))
3250 			ERROUT(EBADMSG);
3251 
3252 		NET_EPOCH_ENTER(et);
3253 
3254 		if ((ruleset = pf_find_keth_ruleset(
3255 		    nvlist_get_string(nvl, "path"))) == NULL) {
3256 			NET_EPOCH_EXIT(et);
3257 			ERROUT(ENOENT);
3258 		}
3259 
3260 		if (ruleset->anchor == NULL) {
3261 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3262 				if (anchor->parent == NULL)
3263 					nr++;
3264 		} else {
3265 			RB_FOREACH(anchor, pf_keth_anchor_node,
3266 			    &ruleset->anchor->children)
3267 				nr++;
3268 		}
3269 
3270 		NET_EPOCH_EXIT(et);
3271 
3272 		nvlist_destroy(nvl);
3273 		nvl = NULL;
3274 		free(nvlpacked, M_NVLIST);
3275 		nvlpacked = NULL;
3276 
3277 		nvl = nvlist_create(0);
3278 		if (nvl == NULL)
3279 			ERROUT(ENOMEM);
3280 
3281 		nvlist_add_number(nvl, "nr", nr);
3282 
3283 		nvlpacked = nvlist_pack(nvl, &nv->len);
3284 		if (nvlpacked == NULL)
3285 			ERROUT(ENOMEM);
3286 
3287 		if (nv->size == 0)
3288 			ERROUT(0);
3289 		else if (nv->size < nv->len)
3290 			ERROUT(ENOSPC);
3291 
3292 		error = copyout(nvlpacked, nv->data, nv->len);
3293 
3294 #undef ERROUT
3295 DIOCGETETHRULESETS_error:
3296 		free(nvlpacked, M_NVLIST);
3297 		nvlist_destroy(nvl);
3298 		break;
3299 	}
3300 
3301 	case DIOCGETETHRULESET: {
3302 		struct epoch_tracker	 et;
3303 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3304 		nvlist_t		*nvl = NULL;
3305 		void			*nvlpacked = NULL;
3306 		struct pf_keth_ruleset	*ruleset;
3307 		struct pf_keth_anchor	*anchor;
3308 		int			 nr = 0, req_nr = 0;
3309 		bool			 found = false;
3310 
3311 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3312 
3313 		if (nv->len > pf_ioctl_maxcount)
3314 			ERROUT(ENOMEM);
3315 
3316 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3317 		error = copyin(nv->data, nvlpacked, nv->len);
3318 		if (error)
3319 			ERROUT(error);
3320 
3321 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3322 		if (nvl == NULL)
3323 			ERROUT(EBADMSG);
3324 		if (! nvlist_exists_string(nvl, "path"))
3325 			ERROUT(EBADMSG);
3326 		if (! nvlist_exists_number(nvl, "nr"))
3327 			ERROUT(EBADMSG);
3328 
3329 		req_nr = nvlist_get_number(nvl, "nr");
3330 
3331 		NET_EPOCH_ENTER(et);
3332 
3333 		if ((ruleset = pf_find_keth_ruleset(
3334 		    nvlist_get_string(nvl, "path"))) == NULL) {
3335 			NET_EPOCH_EXIT(et);
3336 			ERROUT(ENOENT);
3337 		}
3338 
3339 		nvlist_destroy(nvl);
3340 		nvl = NULL;
3341 		free(nvlpacked, M_NVLIST);
3342 		nvlpacked = NULL;
3343 
3344 		nvl = nvlist_create(0);
3345 		if (nvl == NULL) {
3346 			NET_EPOCH_EXIT(et);
3347 			ERROUT(ENOMEM);
3348 		}
3349 
3350 		if (ruleset->anchor == NULL) {
3351 			RB_FOREACH(anchor, pf_keth_anchor_global,
3352 			    &V_pf_keth_anchors) {
3353 				if (anchor->parent == NULL && nr++ == req_nr) {
3354 					found = true;
3355 					break;
3356 				}
3357 			}
3358 		} else {
3359 			RB_FOREACH(anchor, pf_keth_anchor_node,
3360 			     &ruleset->anchor->children) {
3361 				if (nr++ == req_nr) {
3362 					found = true;
3363 					break;
3364 				}
3365 			}
3366 		}
3367 
3368 		NET_EPOCH_EXIT(et);
3369 		if (found) {
3370 			nvlist_add_number(nvl, "nr", nr);
3371 			nvlist_add_string(nvl, "name", anchor->name);
3372 			if (ruleset->anchor)
3373 				nvlist_add_string(nvl, "path",
3374 				    ruleset->anchor->path);
3375 			else
3376 				nvlist_add_string(nvl, "path", "");
3377 		} else {
3378 			ERROUT(EBUSY);
3379 		}
3380 
3381 		nvlpacked = nvlist_pack(nvl, &nv->len);
3382 		if (nvlpacked == NULL)
3383 			ERROUT(ENOMEM);
3384 
3385 		if (nv->size == 0)
3386 			ERROUT(0);
3387 		else if (nv->size < nv->len)
3388 			ERROUT(ENOSPC);
3389 
3390 		error = copyout(nvlpacked, nv->data, nv->len);
3391 
3392 #undef ERROUT
3393 DIOCGETETHRULESET_error:
3394 		free(nvlpacked, M_NVLIST);
3395 		nvlist_destroy(nvl);
3396 		break;
3397 	}
3398 
3399 	case DIOCADDRULENV: {
3400 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3401 		nvlist_t	*nvl = NULL;
3402 		void		*nvlpacked = NULL;
3403 		struct pf_krule	*rule = NULL;
3404 		const char	*anchor = "", *anchor_call = "";
3405 		uint32_t	 ticket = 0, pool_ticket = 0;
3406 
3407 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3408 
3409 		if (nv->len > pf_ioctl_maxcount)
3410 			ERROUT(ENOMEM);
3411 
3412 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3413 		error = copyin(nv->data, nvlpacked, nv->len);
3414 		if (error)
3415 			ERROUT(error);
3416 
3417 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3418 		if (nvl == NULL)
3419 			ERROUT(EBADMSG);
3420 
3421 		if (! nvlist_exists_number(nvl, "ticket"))
3422 			ERROUT(EINVAL);
3423 		ticket = nvlist_get_number(nvl, "ticket");
3424 
3425 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3426 			ERROUT(EINVAL);
3427 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3428 
3429 		if (! nvlist_exists_nvlist(nvl, "rule"))
3430 			ERROUT(EINVAL);
3431 
3432 		rule = pf_krule_alloc();
3433 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3434 		    rule);
3435 		if (error)
3436 			ERROUT(error);
3437 
3438 		if (nvlist_exists_string(nvl, "anchor"))
3439 			anchor = nvlist_get_string(nvl, "anchor");
3440 		if (nvlist_exists_string(nvl, "anchor_call"))
3441 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3442 
3443 		if ((error = nvlist_error(nvl)))
3444 			ERROUT(error);
3445 
3446 		/* Frees rule on error */
3447 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3448 		    anchor_call, td->td_ucred->cr_ruid,
3449 		    td->td_proc ? td->td_proc->p_pid : 0);
3450 
3451 		nvlist_destroy(nvl);
3452 		free(nvlpacked, M_NVLIST);
3453 		break;
3454 #undef ERROUT
3455 DIOCADDRULENV_error:
3456 		pf_krule_free(rule);
3457 		nvlist_destroy(nvl);
3458 		free(nvlpacked, M_NVLIST);
3459 
3460 		break;
3461 	}
3462 	case DIOCADDRULE: {
3463 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3464 		struct pf_krule		*rule;
3465 
3466 		rule = pf_krule_alloc();
3467 		error = pf_rule_to_krule(&pr->rule, rule);
3468 		if (error != 0) {
3469 			pf_krule_free(rule);
3470 			break;
3471 		}
3472 
3473 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3474 
3475 		/* Frees rule on error */
3476 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3477 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3478 		    td->td_proc ? td->td_proc->p_pid : 0);
3479 		break;
3480 	}
3481 
3482 	case DIOCGETRULES: {
3483 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3484 
3485 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3486 
3487 		error = pf_ioctl_getrules(pr);
3488 
3489 		break;
3490 	}
3491 
3492 	case DIOCGETRULENV: {
3493 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3494 		nvlist_t		*nvrule = NULL;
3495 		nvlist_t		*nvl = NULL;
3496 		struct pf_kruleset	*ruleset;
3497 		struct pf_krule		*rule;
3498 		void			*nvlpacked = NULL;
3499 		int			 rs_num, nr;
3500 		bool			 clear_counter = false;
3501 
3502 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3503 
3504 		if (nv->len > pf_ioctl_maxcount)
3505 			ERROUT(ENOMEM);
3506 
3507 		/* Copy the request in */
3508 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3509 		error = copyin(nv->data, nvlpacked, nv->len);
3510 		if (error)
3511 			ERROUT(error);
3512 
3513 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3514 		if (nvl == NULL)
3515 			ERROUT(EBADMSG);
3516 
3517 		if (! nvlist_exists_string(nvl, "anchor"))
3518 			ERROUT(EBADMSG);
3519 		if (! nvlist_exists_number(nvl, "ruleset"))
3520 			ERROUT(EBADMSG);
3521 		if (! nvlist_exists_number(nvl, "ticket"))
3522 			ERROUT(EBADMSG);
3523 		if (! nvlist_exists_number(nvl, "nr"))
3524 			ERROUT(EBADMSG);
3525 
3526 		if (nvlist_exists_bool(nvl, "clear_counter"))
3527 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3528 
3529 		if (clear_counter && !(flags & FWRITE))
3530 			ERROUT(EACCES);
3531 
3532 		nr = nvlist_get_number(nvl, "nr");
3533 
3534 		PF_RULES_WLOCK();
3535 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3536 		if (ruleset == NULL) {
3537 			PF_RULES_WUNLOCK();
3538 			ERROUT(ENOENT);
3539 		}
3540 
3541 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3542 		if (rs_num >= PF_RULESET_MAX) {
3543 			PF_RULES_WUNLOCK();
3544 			ERROUT(EINVAL);
3545 		}
3546 
3547 		if (nvlist_get_number(nvl, "ticket") !=
3548 		    ruleset->rules[rs_num].active.ticket) {
3549 			PF_RULES_WUNLOCK();
3550 			ERROUT(EBUSY);
3551 		}
3552 
3553 		if ((error = nvlist_error(nvl))) {
3554 			PF_RULES_WUNLOCK();
3555 			ERROUT(error);
3556 		}
3557 
3558 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3559 		while ((rule != NULL) && (rule->nr != nr))
3560 			rule = TAILQ_NEXT(rule, entries);
3561 		if (rule == NULL) {
3562 			PF_RULES_WUNLOCK();
3563 			ERROUT(EBUSY);
3564 		}
3565 
3566 		nvrule = pf_krule_to_nvrule(rule);
3567 
3568 		nvlist_destroy(nvl);
3569 		nvl = nvlist_create(0);
3570 		if (nvl == NULL) {
3571 			PF_RULES_WUNLOCK();
3572 			ERROUT(ENOMEM);
3573 		}
3574 		nvlist_add_number(nvl, "nr", nr);
3575 		nvlist_add_nvlist(nvl, "rule", nvrule);
3576 		nvlist_destroy(nvrule);
3577 		nvrule = NULL;
3578 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3579 			PF_RULES_WUNLOCK();
3580 			ERROUT(EBUSY);
3581 		}
3582 
3583 		free(nvlpacked, M_NVLIST);
3584 		nvlpacked = nvlist_pack(nvl, &nv->len);
3585 		if (nvlpacked == NULL) {
3586 			PF_RULES_WUNLOCK();
3587 			ERROUT(ENOMEM);
3588 		}
3589 
3590 		if (nv->size == 0) {
3591 			PF_RULES_WUNLOCK();
3592 			ERROUT(0);
3593 		}
3594 		else if (nv->size < nv->len) {
3595 			PF_RULES_WUNLOCK();
3596 			ERROUT(ENOSPC);
3597 		}
3598 
3599 		if (clear_counter)
3600 			pf_krule_clear_counters(rule);
3601 
3602 		PF_RULES_WUNLOCK();
3603 
3604 		error = copyout(nvlpacked, nv->data, nv->len);
3605 
3606 #undef ERROUT
3607 DIOCGETRULENV_error:
3608 		free(nvlpacked, M_NVLIST);
3609 		nvlist_destroy(nvrule);
3610 		nvlist_destroy(nvl);
3611 
3612 		break;
3613 	}
3614 
3615 	case DIOCCHANGERULE: {
3616 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3617 		struct pf_kruleset	*ruleset;
3618 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3619 		struct pfi_kkif		*kif = NULL;
3620 		struct pf_kpooladdr	*pa;
3621 		u_int32_t		 nr = 0;
3622 		int			 rs_num;
3623 
3624 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3625 
3626 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3627 		    pcr->action > PF_CHANGE_GET_TICKET) {
3628 			error = EINVAL;
3629 			break;
3630 		}
3631 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3632 			error = EINVAL;
3633 			break;
3634 		}
3635 
3636 		if (pcr->action != PF_CHANGE_REMOVE) {
3637 			newrule = pf_krule_alloc();
3638 			error = pf_rule_to_krule(&pcr->rule, newrule);
3639 			if (error != 0) {
3640 				pf_krule_free(newrule);
3641 				break;
3642 			}
3643 
3644 			if (newrule->ifname[0])
3645 				kif = pf_kkif_create(M_WAITOK);
3646 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3647 			for (int i = 0; i < 2; i++) {
3648 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3649 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3650 			}
3651 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3652 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3653 			newrule->src_nodes = counter_u64_alloc(M_WAITOK);
3654 			newrule->cuid = td->td_ucred->cr_ruid;
3655 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3656 			TAILQ_INIT(&newrule->nat.list);
3657 			TAILQ_INIT(&newrule->rdr.list);
3658 			TAILQ_INIT(&newrule->route.list);
3659 		}
3660 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3661 
3662 		PF_CONFIG_LOCK();
3663 		PF_RULES_WLOCK();
3664 #ifdef PF_WANT_32_TO_64_COUNTER
3665 		if (newrule != NULL) {
3666 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3667 			newrule->allrulelinked = true;
3668 			V_pf_allrulecount++;
3669 		}
3670 #endif
3671 
3672 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3673 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3674 		    pcr->pool_ticket != V_ticket_pabuf)
3675 			ERROUT(EBUSY);
3676 
3677 		ruleset = pf_find_kruleset(pcr->anchor);
3678 		if (ruleset == NULL)
3679 			ERROUT(EINVAL);
3680 
3681 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3682 		if (rs_num >= PF_RULESET_MAX)
3683 			ERROUT(EINVAL);
3684 
3685 		/*
3686 		 * XXXMJG: there is no guarantee that the ruleset was
3687 		 * created by the usual route of calling DIOCXBEGIN.
3688 		 * As a result it is possible the rule tree will not
3689 		 * be allocated yet. Hack around it by doing it here.
3690 		 * Note it is fine to let the tree persist in case of
3691 		 * error as it will be freed down the road on future
3692 		 * updates (if need be).
3693 		 */
3694 		if (ruleset->rules[rs_num].active.tree == NULL) {
3695 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3696 			if (ruleset->rules[rs_num].active.tree == NULL) {
3697 				ERROUT(ENOMEM);
3698 			}
3699 		}
3700 
3701 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3702 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3703 			ERROUT(0);
3704 		} else if (pcr->ticket !=
3705 			    ruleset->rules[rs_num].active.ticket)
3706 				ERROUT(EINVAL);
3707 
3708 		if (pcr->action != PF_CHANGE_REMOVE) {
3709 			if (newrule->ifname[0]) {
3710 				newrule->kif = pfi_kkif_attach(kif,
3711 				    newrule->ifname);
3712 				kif = NULL;
3713 				pfi_kkif_ref(newrule->kif);
3714 			} else
3715 				newrule->kif = NULL;
3716 
3717 			if (newrule->rtableid > 0 &&
3718 			    newrule->rtableid >= rt_numfibs)
3719 				error = EBUSY;
3720 
3721 #ifdef ALTQ
3722 			/* set queue IDs */
3723 			if (newrule->qname[0] != 0) {
3724 				if ((newrule->qid =
3725 				    pf_qname2qid(newrule->qname)) == 0)
3726 					error = EBUSY;
3727 				else if (newrule->pqname[0] != 0) {
3728 					if ((newrule->pqid =
3729 					    pf_qname2qid(newrule->pqname)) == 0)
3730 						error = EBUSY;
3731 				} else
3732 					newrule->pqid = newrule->qid;
3733 			}
3734 #endif /* ALTQ */
3735 			if (newrule->tagname[0])
3736 				if ((newrule->tag =
3737 				    pf_tagname2tag(newrule->tagname)) == 0)
3738 					error = EBUSY;
3739 			if (newrule->match_tagname[0])
3740 				if ((newrule->match_tag = pf_tagname2tag(
3741 				    newrule->match_tagname)) == 0)
3742 					error = EBUSY;
3743 			if (newrule->rt && !newrule->direction)
3744 				error = EINVAL;
3745 			if (!newrule->log)
3746 				newrule->logif = 0;
3747 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3748 				error = ENOMEM;
3749 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3750 				error = ENOMEM;
3751 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3752 				error = EINVAL;
3753 			for (int i = 0; i < 3; i++) {
3754 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3755 					if (pa->addr.type == PF_ADDR_TABLE) {
3756 						pa->addr.p.tbl =
3757 						    pfr_attach_table(ruleset,
3758 						    pa->addr.v.tblname);
3759 						if (pa->addr.p.tbl == NULL)
3760 							error = ENOMEM;
3761 					}
3762 			}
3763 
3764 			newrule->overload_tbl = NULL;
3765 			if (newrule->overload_tblname[0]) {
3766 				if ((newrule->overload_tbl = pfr_attach_table(
3767 				    ruleset, newrule->overload_tblname)) ==
3768 				    NULL)
3769 					error = EINVAL;
3770 				else
3771 					newrule->overload_tbl->pfrkt_flags |=
3772 					    PFR_TFLAG_ACTIVE;
3773 			}
3774 
3775 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3776 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3777 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3778 			if (((((newrule->action == PF_NAT) ||
3779 			    (newrule->action == PF_RDR) ||
3780 			    (newrule->action == PF_BINAT) ||
3781 			    (newrule->rt > PF_NOPFROUTE)) &&
3782 			    !newrule->anchor)) &&
3783 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3784 				error = EINVAL;
3785 
3786 			if (error) {
3787 				pf_free_rule(newrule);
3788 				PF_RULES_WUNLOCK();
3789 				PF_CONFIG_UNLOCK();
3790 				break;
3791 			}
3792 
3793 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3794 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3795 		}
3796 		pf_empty_kpool(&V_pf_pabuf[0]);
3797 		pf_empty_kpool(&V_pf_pabuf[1]);
3798 		pf_empty_kpool(&V_pf_pabuf[2]);
3799 
3800 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3801 			oldrule = TAILQ_FIRST(
3802 			    ruleset->rules[rs_num].active.ptr);
3803 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3804 			oldrule = TAILQ_LAST(
3805 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3806 		else {
3807 			oldrule = TAILQ_FIRST(
3808 			    ruleset->rules[rs_num].active.ptr);
3809 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3810 				oldrule = TAILQ_NEXT(oldrule, entries);
3811 			if (oldrule == NULL) {
3812 				if (newrule != NULL)
3813 					pf_free_rule(newrule);
3814 				PF_RULES_WUNLOCK();
3815 				PF_CONFIG_UNLOCK();
3816 				error = EINVAL;
3817 				break;
3818 			}
3819 		}
3820 
3821 		if (pcr->action == PF_CHANGE_REMOVE) {
3822 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3823 			    oldrule);
3824 			RB_REMOVE(pf_krule_global,
3825 			    ruleset->rules[rs_num].active.tree, oldrule);
3826 			ruleset->rules[rs_num].active.rcount--;
3827 		} else {
3828 			pf_hash_rule(newrule);
3829 			if (RB_INSERT(pf_krule_global,
3830 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3831 				pf_free_rule(newrule);
3832 				PF_RULES_WUNLOCK();
3833 				PF_CONFIG_UNLOCK();
3834 				error = EEXIST;
3835 				break;
3836 			}
3837 
3838 			if (oldrule == NULL)
3839 				TAILQ_INSERT_TAIL(
3840 				    ruleset->rules[rs_num].active.ptr,
3841 				    newrule, entries);
3842 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3843 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3844 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3845 			else
3846 				TAILQ_INSERT_AFTER(
3847 				    ruleset->rules[rs_num].active.ptr,
3848 				    oldrule, newrule, entries);
3849 			ruleset->rules[rs_num].active.rcount++;
3850 		}
3851 
3852 		nr = 0;
3853 		TAILQ_FOREACH(oldrule,
3854 		    ruleset->rules[rs_num].active.ptr, entries)
3855 			oldrule->nr = nr++;
3856 
3857 		ruleset->rules[rs_num].active.ticket++;
3858 
3859 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3860 		pf_remove_if_empty_kruleset(ruleset);
3861 
3862 		PF_RULES_WUNLOCK();
3863 		PF_CONFIG_UNLOCK();
3864 		break;
3865 
3866 #undef ERROUT
3867 DIOCCHANGERULE_error:
3868 		PF_RULES_WUNLOCK();
3869 		PF_CONFIG_UNLOCK();
3870 		pf_krule_free(newrule);
3871 		pf_kkif_free(kif);
3872 		break;
3873 	}
3874 
3875 	case DIOCCLRSTATESNV: {
3876 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3877 		break;
3878 	}
3879 
3880 	case DIOCKILLSTATESNV: {
3881 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3882 		break;
3883 	}
3884 
3885 	case DIOCADDSTATE: {
3886 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3887 		struct pfsync_state_1301	*sp = &ps->state;
3888 
3889 		if (sp->timeout >= PFTM_MAX) {
3890 			error = EINVAL;
3891 			break;
3892 		}
3893 		if (V_pfsync_state_import_ptr != NULL) {
3894 			PF_RULES_RLOCK();
3895 			error = V_pfsync_state_import_ptr(
3896 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3897 			    PFSYNC_MSG_VERSION_1301);
3898 			PF_RULES_RUNLOCK();
3899 		} else
3900 			error = EOPNOTSUPP;
3901 		break;
3902 	}
3903 
3904 	case DIOCGETSTATE: {
3905 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3906 		struct pf_kstate	*s;
3907 
3908 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3909 		if (s == NULL) {
3910 			error = ENOENT;
3911 			break;
3912 		}
3913 
3914 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3915 		    s, PFSYNC_MSG_VERSION_1301);
3916 		PF_STATE_UNLOCK(s);
3917 		break;
3918 	}
3919 
3920 	case DIOCGETSTATENV: {
3921 		error = pf_getstate((struct pfioc_nv *)addr);
3922 		break;
3923 	}
3924 
3925 #ifdef COMPAT_FREEBSD14
3926 	case DIOCGETSTATES: {
3927 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3928 		struct pf_kstate	*s;
3929 		struct pfsync_state_1301	*pstore, *p;
3930 		int			 i, nr;
3931 		size_t			 slice_count = 16, count;
3932 		void			*out;
3933 
3934 		if (ps->ps_len <= 0) {
3935 			nr = uma_zone_get_cur(V_pf_state_z);
3936 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3937 			break;
3938 		}
3939 
3940 		out = ps->ps_states;
3941 		pstore = mallocarray(slice_count,
3942 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3943 		nr = 0;
3944 
3945 		for (i = 0; i <= V_pf_hashmask; i++) {
3946 			struct pf_idhash *ih = &V_pf_idhash[i];
3947 
3948 DIOCGETSTATES_retry:
3949 			p = pstore;
3950 
3951 			if (LIST_EMPTY(&ih->states))
3952 				continue;
3953 
3954 			PF_HASHROW_LOCK(ih);
3955 			count = 0;
3956 			LIST_FOREACH(s, &ih->states, entry) {
3957 				if (s->timeout == PFTM_UNLINKED)
3958 					continue;
3959 				count++;
3960 			}
3961 
3962 			if (count > slice_count) {
3963 				PF_HASHROW_UNLOCK(ih);
3964 				free(pstore, M_TEMP);
3965 				slice_count = count * 2;
3966 				pstore = mallocarray(slice_count,
3967 				    sizeof(struct pfsync_state_1301), M_TEMP,
3968 				    M_WAITOK | M_ZERO);
3969 				goto DIOCGETSTATES_retry;
3970 			}
3971 
3972 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
3973 				PF_HASHROW_UNLOCK(ih);
3974 				goto DIOCGETSTATES_full;
3975 			}
3976 
3977 			LIST_FOREACH(s, &ih->states, entry) {
3978 				if (s->timeout == PFTM_UNLINKED)
3979 					continue;
3980 
3981 				pfsync_state_export((union pfsync_state_union*)p,
3982 				    s, PFSYNC_MSG_VERSION_1301);
3983 				p++;
3984 				nr++;
3985 			}
3986 			PF_HASHROW_UNLOCK(ih);
3987 			error = copyout(pstore, out,
3988 			    sizeof(struct pfsync_state_1301) * count);
3989 			if (error)
3990 				break;
3991 			out = ps->ps_states + nr;
3992 		}
3993 DIOCGETSTATES_full:
3994 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3995 		free(pstore, M_TEMP);
3996 
3997 		break;
3998 	}
3999 
4000 	case DIOCGETSTATESV2: {
4001 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4002 		struct pf_kstate	*s;
4003 		struct pf_state_export	*pstore, *p;
4004 		int i, nr;
4005 		size_t slice_count = 16, count;
4006 		void *out;
4007 
4008 		if (ps->ps_req_version > PF_STATE_VERSION) {
4009 			error = ENOTSUP;
4010 			break;
4011 		}
4012 
4013 		if (ps->ps_len <= 0) {
4014 			nr = uma_zone_get_cur(V_pf_state_z);
4015 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4016 			break;
4017 		}
4018 
4019 		out = ps->ps_states;
4020 		pstore = mallocarray(slice_count,
4021 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
4022 		nr = 0;
4023 
4024 		for (i = 0; i <= V_pf_hashmask; i++) {
4025 			struct pf_idhash *ih = &V_pf_idhash[i];
4026 
4027 DIOCGETSTATESV2_retry:
4028 			p = pstore;
4029 
4030 			if (LIST_EMPTY(&ih->states))
4031 				continue;
4032 
4033 			PF_HASHROW_LOCK(ih);
4034 			count = 0;
4035 			LIST_FOREACH(s, &ih->states, entry) {
4036 				if (s->timeout == PFTM_UNLINKED)
4037 					continue;
4038 				count++;
4039 			}
4040 
4041 			if (count > slice_count) {
4042 				PF_HASHROW_UNLOCK(ih);
4043 				free(pstore, M_TEMP);
4044 				slice_count = count * 2;
4045 				pstore = mallocarray(slice_count,
4046 				    sizeof(struct pf_state_export), M_TEMP,
4047 				    M_WAITOK | M_ZERO);
4048 				goto DIOCGETSTATESV2_retry;
4049 			}
4050 
4051 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4052 				PF_HASHROW_UNLOCK(ih);
4053 				goto DIOCGETSTATESV2_full;
4054 			}
4055 
4056 			LIST_FOREACH(s, &ih->states, entry) {
4057 				if (s->timeout == PFTM_UNLINKED)
4058 					continue;
4059 
4060 				pf_state_export(p, s);
4061 				p++;
4062 				nr++;
4063 			}
4064 			PF_HASHROW_UNLOCK(ih);
4065 			error = copyout(pstore, out,
4066 			    sizeof(struct pf_state_export) * count);
4067 			if (error)
4068 				break;
4069 			out = ps->ps_states + nr;
4070 		}
4071 DIOCGETSTATESV2_full:
4072 		ps->ps_len = nr * sizeof(struct pf_state_export);
4073 		free(pstore, M_TEMP);
4074 
4075 		break;
4076 	}
4077 #endif
4078 	case DIOCGETSTATUSNV: {
4079 		error = pf_getstatus((struct pfioc_nv *)addr);
4080 		break;
4081 	}
4082 
4083 	case DIOCSETSTATUSIF: {
4084 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4085 
4086 		if (pi->ifname[0] == 0) {
4087 			bzero(V_pf_status.ifname, IFNAMSIZ);
4088 			break;
4089 		}
4090 		PF_RULES_WLOCK();
4091 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4092 		PF_RULES_WUNLOCK();
4093 		break;
4094 	}
4095 
4096 	case DIOCCLRSTATUS: {
4097 		pf_ioctl_clear_status();
4098 		break;
4099 	}
4100 
4101 	case DIOCNATLOOK: {
4102 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4103 		struct pf_state_key	*sk;
4104 		struct pf_kstate	*state;
4105 		struct pf_state_key_cmp	 key;
4106 		int			 m = 0, direction = pnl->direction;
4107 		int			 sidx, didx;
4108 
4109 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4110 		sidx = (direction == PF_IN) ? 1 : 0;
4111 		didx = (direction == PF_IN) ? 0 : 1;
4112 
4113 		if (!pnl->proto ||
4114 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4115 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4116 		    ((pnl->proto == IPPROTO_TCP ||
4117 		    pnl->proto == IPPROTO_UDP) &&
4118 		    (!pnl->dport || !pnl->sport)))
4119 			error = EINVAL;
4120 		else {
4121 			bzero(&key, sizeof(key));
4122 			key.af = pnl->af;
4123 			key.proto = pnl->proto;
4124 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4125 			key.port[sidx] = pnl->sport;
4126 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4127 			key.port[didx] = pnl->dport;
4128 
4129 			state = pf_find_state_all(&key, direction, &m);
4130 			if (state == NULL) {
4131 				error = ENOENT;
4132 			} else {
4133 				if (m > 1) {
4134 					PF_STATE_UNLOCK(state);
4135 					error = E2BIG;	/* more than one state */
4136 				} else {
4137 					sk = state->key[sidx];
4138 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4139 					pnl->rsport = sk->port[sidx];
4140 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4141 					pnl->rdport = sk->port[didx];
4142 					PF_STATE_UNLOCK(state);
4143 				}
4144 			}
4145 		}
4146 		break;
4147 	}
4148 
4149 	case DIOCSETTIMEOUT: {
4150 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4151 
4152 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4153 		    &pt->seconds);
4154 		break;
4155 	}
4156 
4157 	case DIOCGETTIMEOUT: {
4158 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4159 
4160 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4161 		break;
4162 	}
4163 
4164 	case DIOCGETLIMIT: {
4165 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4166 
4167 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4168 		break;
4169 	}
4170 
4171 	case DIOCSETLIMIT: {
4172 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4173 		unsigned int old_limit;
4174 
4175 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4176 		pl->limit = old_limit;
4177 		break;
4178 	}
4179 
4180 	case DIOCSETDEBUG: {
4181 		u_int32_t	*level = (u_int32_t *)addr;
4182 
4183 		PF_RULES_WLOCK();
4184 		V_pf_status.debug = *level;
4185 		PF_RULES_WUNLOCK();
4186 		break;
4187 	}
4188 
4189 	case DIOCCLRRULECTRS: {
4190 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4191 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4192 		struct pf_krule		*rule;
4193 
4194 		PF_RULES_WLOCK();
4195 		TAILQ_FOREACH(rule,
4196 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4197 			pf_counter_u64_zero(&rule->evaluations);
4198 			for (int i = 0; i < 2; i++) {
4199 				pf_counter_u64_zero(&rule->packets[i]);
4200 				pf_counter_u64_zero(&rule->bytes[i]);
4201 			}
4202 		}
4203 		PF_RULES_WUNLOCK();
4204 		break;
4205 	}
4206 
4207 	case DIOCGIFSPEEDV0:
4208 	case DIOCGIFSPEEDV1: {
4209 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4210 		struct pf_ifspeed_v1	ps;
4211 		struct ifnet		*ifp;
4212 
4213 		if (psp->ifname[0] == '\0') {
4214 			error = EINVAL;
4215 			break;
4216 		}
4217 
4218 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4219 		if (error != 0)
4220 			break;
4221 		ifp = ifunit(ps.ifname);
4222 		if (ifp != NULL) {
4223 			psp->baudrate32 =
4224 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4225 			if (cmd == DIOCGIFSPEEDV1)
4226 				psp->baudrate = ifp->if_baudrate;
4227 		} else {
4228 			error = EINVAL;
4229 		}
4230 		break;
4231 	}
4232 
4233 #ifdef ALTQ
4234 	case DIOCSTARTALTQ: {
4235 		struct pf_altq		*altq;
4236 
4237 		PF_RULES_WLOCK();
4238 		/* enable all altq interfaces on active list */
4239 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4240 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4241 				error = pf_enable_altq(altq);
4242 				if (error != 0)
4243 					break;
4244 			}
4245 		}
4246 		if (error == 0)
4247 			V_pf_altq_running = 1;
4248 		PF_RULES_WUNLOCK();
4249 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4250 		break;
4251 	}
4252 
4253 	case DIOCSTOPALTQ: {
4254 		struct pf_altq		*altq;
4255 
4256 		PF_RULES_WLOCK();
4257 		/* disable all altq interfaces on active list */
4258 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4259 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4260 				error = pf_disable_altq(altq);
4261 				if (error != 0)
4262 					break;
4263 			}
4264 		}
4265 		if (error == 0)
4266 			V_pf_altq_running = 0;
4267 		PF_RULES_WUNLOCK();
4268 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4269 		break;
4270 	}
4271 
4272 	case DIOCADDALTQV0:
4273 	case DIOCADDALTQV1: {
4274 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4275 		struct pf_altq		*altq, *a;
4276 		struct ifnet		*ifp;
4277 
4278 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4279 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4280 		if (error)
4281 			break;
4282 		altq->local_flags = 0;
4283 
4284 		PF_RULES_WLOCK();
4285 		if (pa->ticket != V_ticket_altqs_inactive) {
4286 			PF_RULES_WUNLOCK();
4287 			free(altq, M_PFALTQ);
4288 			error = EBUSY;
4289 			break;
4290 		}
4291 
4292 		/*
4293 		 * if this is for a queue, find the discipline and
4294 		 * copy the necessary fields
4295 		 */
4296 		if (altq->qname[0] != 0) {
4297 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4298 				PF_RULES_WUNLOCK();
4299 				error = EBUSY;
4300 				free(altq, M_PFALTQ);
4301 				break;
4302 			}
4303 			altq->altq_disc = NULL;
4304 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4305 				if (strncmp(a->ifname, altq->ifname,
4306 				    IFNAMSIZ) == 0) {
4307 					altq->altq_disc = a->altq_disc;
4308 					break;
4309 				}
4310 			}
4311 		}
4312 
4313 		if ((ifp = ifunit(altq->ifname)) == NULL)
4314 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4315 		else
4316 			error = altq_add(ifp, altq);
4317 
4318 		if (error) {
4319 			PF_RULES_WUNLOCK();
4320 			free(altq, M_PFALTQ);
4321 			break;
4322 		}
4323 
4324 		if (altq->qname[0] != 0)
4325 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4326 		else
4327 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4328 		/* version error check done on import above */
4329 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4330 		PF_RULES_WUNLOCK();
4331 		break;
4332 	}
4333 
4334 	case DIOCGETALTQSV0:
4335 	case DIOCGETALTQSV1: {
4336 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4337 		struct pf_altq		*altq;
4338 
4339 		PF_RULES_RLOCK();
4340 		pa->nr = 0;
4341 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4342 			pa->nr++;
4343 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4344 			pa->nr++;
4345 		pa->ticket = V_ticket_altqs_active;
4346 		PF_RULES_RUNLOCK();
4347 		break;
4348 	}
4349 
4350 	case DIOCGETALTQV0:
4351 	case DIOCGETALTQV1: {
4352 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4353 		struct pf_altq		*altq;
4354 
4355 		PF_RULES_RLOCK();
4356 		if (pa->ticket != V_ticket_altqs_active) {
4357 			PF_RULES_RUNLOCK();
4358 			error = EBUSY;
4359 			break;
4360 		}
4361 		altq = pf_altq_get_nth_active(pa->nr);
4362 		if (altq == NULL) {
4363 			PF_RULES_RUNLOCK();
4364 			error = EBUSY;
4365 			break;
4366 		}
4367 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4368 		PF_RULES_RUNLOCK();
4369 		break;
4370 	}
4371 
4372 	case DIOCCHANGEALTQV0:
4373 	case DIOCCHANGEALTQV1:
4374 		/* CHANGEALTQ not supported yet! */
4375 		error = ENODEV;
4376 		break;
4377 
4378 	case DIOCGETQSTATSV0:
4379 	case DIOCGETQSTATSV1: {
4380 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4381 		struct pf_altq		*altq;
4382 		int			 nbytes;
4383 		u_int32_t		 version;
4384 
4385 		PF_RULES_RLOCK();
4386 		if (pq->ticket != V_ticket_altqs_active) {
4387 			PF_RULES_RUNLOCK();
4388 			error = EBUSY;
4389 			break;
4390 		}
4391 		nbytes = pq->nbytes;
4392 		altq = pf_altq_get_nth_active(pq->nr);
4393 		if (altq == NULL) {
4394 			PF_RULES_RUNLOCK();
4395 			error = EBUSY;
4396 			break;
4397 		}
4398 
4399 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4400 			PF_RULES_RUNLOCK();
4401 			error = ENXIO;
4402 			break;
4403 		}
4404 		PF_RULES_RUNLOCK();
4405 		if (cmd == DIOCGETQSTATSV0)
4406 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4407 		else
4408 			version = pq->version;
4409 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4410 		if (error == 0) {
4411 			pq->scheduler = altq->scheduler;
4412 			pq->nbytes = nbytes;
4413 		}
4414 		break;
4415 	}
4416 #endif /* ALTQ */
4417 
4418 	case DIOCBEGINADDRS: {
4419 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4420 
4421 		error = pf_ioctl_begin_addrs(&pp->ticket);
4422 		break;
4423 	}
4424 
4425 	case DIOCADDADDR: {
4426 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4427 		struct pf_nl_pooladdr npp = {};
4428 
4429 		npp.which = PF_RDR;
4430 		memcpy(&npp, pp, sizeof(*pp));
4431 		error = pf_ioctl_add_addr(&npp);
4432 		break;
4433 	}
4434 
4435 	case DIOCGETADDRS: {
4436 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4437 		struct pf_nl_pooladdr npp = {};
4438 
4439 		npp.which = PF_RDR;
4440 		memcpy(&npp, pp, sizeof(*pp));
4441 		error = pf_ioctl_get_addrs(&npp);
4442 		memcpy(pp, &npp, sizeof(*pp));
4443 
4444 		break;
4445 	}
4446 
4447 	case DIOCGETADDR: {
4448 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4449 		struct pf_nl_pooladdr npp = {};
4450 
4451 		npp.which = PF_RDR;
4452 		memcpy(&npp, pp, sizeof(*pp));
4453 		error = pf_ioctl_get_addr(&npp);
4454 		memcpy(pp, &npp, sizeof(*pp));
4455 
4456 		break;
4457 	}
4458 
4459 	case DIOCCHANGEADDR: {
4460 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4461 		struct pf_kpool		*pool;
4462 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4463 		struct pf_kruleset	*ruleset;
4464 		struct pfi_kkif		*kif = NULL;
4465 
4466 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4467 
4468 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4469 		    pca->action > PF_CHANGE_REMOVE) {
4470 			error = EINVAL;
4471 			break;
4472 		}
4473 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4474 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4475 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4476 			error = EINVAL;
4477 			break;
4478 		}
4479 		if (pca->addr.addr.p.dyn != NULL) {
4480 			error = EINVAL;
4481 			break;
4482 		}
4483 
4484 		if (pca->action != PF_CHANGE_REMOVE) {
4485 #ifndef INET
4486 			if (pca->af == AF_INET) {
4487 				error = EAFNOSUPPORT;
4488 				break;
4489 			}
4490 #endif /* INET */
4491 #ifndef INET6
4492 			if (pca->af == AF_INET6) {
4493 				error = EAFNOSUPPORT;
4494 				break;
4495 			}
4496 #endif /* INET6 */
4497 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4498 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4499 			if (newpa->ifname[0])
4500 				kif = pf_kkif_create(M_WAITOK);
4501 			newpa->kif = NULL;
4502 		}
4503 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4504 		PF_RULES_WLOCK();
4505 		ruleset = pf_find_kruleset(pca->anchor);
4506 		if (ruleset == NULL)
4507 			ERROUT(EBUSY);
4508 
4509 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4510 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4511 		if (pool == NULL)
4512 			ERROUT(EBUSY);
4513 
4514 		if (pca->action != PF_CHANGE_REMOVE) {
4515 			if (newpa->ifname[0]) {
4516 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4517 				pfi_kkif_ref(newpa->kif);
4518 				kif = NULL;
4519 			}
4520 
4521 			switch (newpa->addr.type) {
4522 			case PF_ADDR_DYNIFTL:
4523 				error = pfi_dynaddr_setup(&newpa->addr,
4524 				    pca->af);
4525 				break;
4526 			case PF_ADDR_TABLE:
4527 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4528 				    newpa->addr.v.tblname);
4529 				if (newpa->addr.p.tbl == NULL)
4530 					error = ENOMEM;
4531 				break;
4532 			}
4533 			if (error)
4534 				goto DIOCCHANGEADDR_error;
4535 		}
4536 
4537 		switch (pca->action) {
4538 		case PF_CHANGE_ADD_HEAD:
4539 			oldpa = TAILQ_FIRST(&pool->list);
4540 			break;
4541 		case PF_CHANGE_ADD_TAIL:
4542 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4543 			break;
4544 		default:
4545 			oldpa = TAILQ_FIRST(&pool->list);
4546 			for (int i = 0; oldpa && i < pca->nr; i++)
4547 				oldpa = TAILQ_NEXT(oldpa, entries);
4548 
4549 			if (oldpa == NULL)
4550 				ERROUT(EINVAL);
4551 		}
4552 
4553 		if (pca->action == PF_CHANGE_REMOVE) {
4554 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4555 			switch (oldpa->addr.type) {
4556 			case PF_ADDR_DYNIFTL:
4557 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4558 				break;
4559 			case PF_ADDR_TABLE:
4560 				pfr_detach_table(oldpa->addr.p.tbl);
4561 				break;
4562 			}
4563 			if (oldpa->kif)
4564 				pfi_kkif_unref(oldpa->kif);
4565 			free(oldpa, M_PFRULE);
4566 		} else {
4567 			if (oldpa == NULL)
4568 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4569 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4570 			    pca->action == PF_CHANGE_ADD_BEFORE)
4571 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4572 			else
4573 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4574 				    newpa, entries);
4575 		}
4576 
4577 		pool->cur = TAILQ_FIRST(&pool->list);
4578 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4579 		PF_RULES_WUNLOCK();
4580 		break;
4581 
4582 #undef ERROUT
4583 DIOCCHANGEADDR_error:
4584 		if (newpa != NULL) {
4585 			if (newpa->kif)
4586 				pfi_kkif_unref(newpa->kif);
4587 			free(newpa, M_PFRULE);
4588 		}
4589 		PF_RULES_WUNLOCK();
4590 		pf_kkif_free(kif);
4591 		break;
4592 	}
4593 
4594 	case DIOCGETRULESETS: {
4595 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4596 
4597 		pr->path[sizeof(pr->path) - 1] = 0;
4598 
4599 		error = pf_ioctl_get_rulesets(pr);
4600 		break;
4601 	}
4602 
4603 	case DIOCGETRULESET: {
4604 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4605 
4606 		pr->path[sizeof(pr->path) - 1] = 0;
4607 
4608 		error = pf_ioctl_get_ruleset(pr);
4609 		break;
4610 	}
4611 
4612 	case DIOCRCLRTABLES: {
4613 		struct pfioc_table *io = (struct pfioc_table *)addr;
4614 
4615 		if (io->pfrio_esize != 0) {
4616 			error = ENODEV;
4617 			break;
4618 		}
4619 		PF_RULES_WLOCK();
4620 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4621 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4622 		PF_RULES_WUNLOCK();
4623 		break;
4624 	}
4625 
4626 	case DIOCRADDTABLES: {
4627 		struct pfioc_table *io = (struct pfioc_table *)addr;
4628 		struct pfr_table *pfrts;
4629 		size_t totlen;
4630 
4631 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4632 			error = ENODEV;
4633 			break;
4634 		}
4635 
4636 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4637 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4638 			error = ENOMEM;
4639 			break;
4640 		}
4641 
4642 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4643 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4644 		    M_TEMP, M_WAITOK);
4645 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4646 		if (error) {
4647 			free(pfrts, M_TEMP);
4648 			break;
4649 		}
4650 		PF_RULES_WLOCK();
4651 		error = pfr_add_tables(pfrts, io->pfrio_size,
4652 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4653 		PF_RULES_WUNLOCK();
4654 		free(pfrts, M_TEMP);
4655 		break;
4656 	}
4657 
4658 	case DIOCRDELTABLES: {
4659 		struct pfioc_table *io = (struct pfioc_table *)addr;
4660 		struct pfr_table *pfrts;
4661 		size_t totlen;
4662 
4663 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4664 			error = ENODEV;
4665 			break;
4666 		}
4667 
4668 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4669 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4670 			error = ENOMEM;
4671 			break;
4672 		}
4673 
4674 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4675 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4676 		    M_TEMP, M_WAITOK);
4677 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4678 		if (error) {
4679 			free(pfrts, M_TEMP);
4680 			break;
4681 		}
4682 		PF_RULES_WLOCK();
4683 		error = pfr_del_tables(pfrts, io->pfrio_size,
4684 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4685 		PF_RULES_WUNLOCK();
4686 		free(pfrts, M_TEMP);
4687 		break;
4688 	}
4689 
4690 	case DIOCRGETTABLES: {
4691 		struct pfioc_table *io = (struct pfioc_table *)addr;
4692 		struct pfr_table *pfrts;
4693 		size_t totlen;
4694 		int n;
4695 
4696 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4697 			error = ENODEV;
4698 			break;
4699 		}
4700 		PF_RULES_RLOCK();
4701 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4702 		if (n < 0) {
4703 			PF_RULES_RUNLOCK();
4704 			error = EINVAL;
4705 			break;
4706 		}
4707 		io->pfrio_size = min(io->pfrio_size, n);
4708 
4709 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4710 
4711 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4712 		    M_TEMP, M_NOWAIT | M_ZERO);
4713 		if (pfrts == NULL) {
4714 			error = ENOMEM;
4715 			PF_RULES_RUNLOCK();
4716 			break;
4717 		}
4718 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4719 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4720 		PF_RULES_RUNLOCK();
4721 		if (error == 0)
4722 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4723 		free(pfrts, M_TEMP);
4724 		break;
4725 	}
4726 
4727 	case DIOCRGETTSTATS: {
4728 		struct pfioc_table *io = (struct pfioc_table *)addr;
4729 		struct pfr_tstats *pfrtstats;
4730 		size_t totlen;
4731 		int n;
4732 
4733 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4734 			error = ENODEV;
4735 			break;
4736 		}
4737 		PF_TABLE_STATS_LOCK();
4738 		PF_RULES_RLOCK();
4739 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4740 		if (n < 0) {
4741 			PF_RULES_RUNLOCK();
4742 			PF_TABLE_STATS_UNLOCK();
4743 			error = EINVAL;
4744 			break;
4745 		}
4746 		io->pfrio_size = min(io->pfrio_size, n);
4747 
4748 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4749 		pfrtstats = mallocarray(io->pfrio_size,
4750 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4751 		if (pfrtstats == NULL) {
4752 			error = ENOMEM;
4753 			PF_RULES_RUNLOCK();
4754 			PF_TABLE_STATS_UNLOCK();
4755 			break;
4756 		}
4757 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4758 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4759 		PF_RULES_RUNLOCK();
4760 		PF_TABLE_STATS_UNLOCK();
4761 		if (error == 0)
4762 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4763 		free(pfrtstats, M_TEMP);
4764 		break;
4765 	}
4766 
4767 	case DIOCRCLRTSTATS: {
4768 		struct pfioc_table *io = (struct pfioc_table *)addr;
4769 		struct pfr_table *pfrts;
4770 		size_t totlen;
4771 
4772 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4773 			error = ENODEV;
4774 			break;
4775 		}
4776 
4777 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4778 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4779 			/* We used to count tables and use the minimum required
4780 			 * size, so we didn't fail on overly large requests.
4781 			 * Keep doing so. */
4782 			io->pfrio_size = pf_ioctl_maxcount;
4783 			break;
4784 		}
4785 
4786 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4787 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4788 		    M_TEMP, M_WAITOK);
4789 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4790 		if (error) {
4791 			free(pfrts, M_TEMP);
4792 			break;
4793 		}
4794 
4795 		PF_TABLE_STATS_LOCK();
4796 		PF_RULES_RLOCK();
4797 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4798 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4799 		PF_RULES_RUNLOCK();
4800 		PF_TABLE_STATS_UNLOCK();
4801 		free(pfrts, M_TEMP);
4802 		break;
4803 	}
4804 
4805 	case DIOCRSETTFLAGS: {
4806 		struct pfioc_table *io = (struct pfioc_table *)addr;
4807 		struct pfr_table *pfrts;
4808 		size_t totlen;
4809 		int n;
4810 
4811 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4812 			error = ENODEV;
4813 			break;
4814 		}
4815 
4816 		PF_RULES_RLOCK();
4817 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4818 		if (n < 0) {
4819 			PF_RULES_RUNLOCK();
4820 			error = EINVAL;
4821 			break;
4822 		}
4823 
4824 		io->pfrio_size = min(io->pfrio_size, n);
4825 		PF_RULES_RUNLOCK();
4826 
4827 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4828 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4829 		    M_TEMP, M_WAITOK);
4830 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4831 		if (error) {
4832 			free(pfrts, M_TEMP);
4833 			break;
4834 		}
4835 		PF_RULES_WLOCK();
4836 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4837 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4838 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4839 		PF_RULES_WUNLOCK();
4840 		free(pfrts, M_TEMP);
4841 		break;
4842 	}
4843 
4844 	case DIOCRCLRADDRS: {
4845 		struct pfioc_table *io = (struct pfioc_table *)addr;
4846 
4847 		if (io->pfrio_esize != 0) {
4848 			error = ENODEV;
4849 			break;
4850 		}
4851 		PF_RULES_WLOCK();
4852 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4853 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4854 		PF_RULES_WUNLOCK();
4855 		break;
4856 	}
4857 
4858 	case DIOCRADDADDRS: {
4859 		struct pfioc_table *io = (struct pfioc_table *)addr;
4860 		struct pfr_addr *pfras;
4861 		size_t totlen;
4862 
4863 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4864 			error = ENODEV;
4865 			break;
4866 		}
4867 		if (io->pfrio_size < 0 ||
4868 		    io->pfrio_size > pf_ioctl_maxcount ||
4869 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4870 			error = EINVAL;
4871 			break;
4872 		}
4873 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4874 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4875 		    M_TEMP, M_WAITOK);
4876 		error = copyin(io->pfrio_buffer, pfras, totlen);
4877 		if (error) {
4878 			free(pfras, M_TEMP);
4879 			break;
4880 		}
4881 		PF_RULES_WLOCK();
4882 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4883 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4884 		    PFR_FLAG_USERIOCTL);
4885 		PF_RULES_WUNLOCK();
4886 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4887 			error = copyout(pfras, io->pfrio_buffer, totlen);
4888 		free(pfras, M_TEMP);
4889 		break;
4890 	}
4891 
4892 	case DIOCRDELADDRS: {
4893 		struct pfioc_table *io = (struct pfioc_table *)addr;
4894 		struct pfr_addr *pfras;
4895 		size_t totlen;
4896 
4897 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4898 			error = ENODEV;
4899 			break;
4900 		}
4901 		if (io->pfrio_size < 0 ||
4902 		    io->pfrio_size > pf_ioctl_maxcount ||
4903 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4904 			error = EINVAL;
4905 			break;
4906 		}
4907 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4908 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4909 		    M_TEMP, M_WAITOK);
4910 		error = copyin(io->pfrio_buffer, pfras, totlen);
4911 		if (error) {
4912 			free(pfras, M_TEMP);
4913 			break;
4914 		}
4915 		PF_RULES_WLOCK();
4916 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4917 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4918 		    PFR_FLAG_USERIOCTL);
4919 		PF_RULES_WUNLOCK();
4920 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4921 			error = copyout(pfras, io->pfrio_buffer, totlen);
4922 		free(pfras, M_TEMP);
4923 		break;
4924 	}
4925 
4926 	case DIOCRSETADDRS: {
4927 		struct pfioc_table *io = (struct pfioc_table *)addr;
4928 		struct pfr_addr *pfras;
4929 		size_t totlen, count;
4930 
4931 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4932 			error = ENODEV;
4933 			break;
4934 		}
4935 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4936 			error = EINVAL;
4937 			break;
4938 		}
4939 		count = max(io->pfrio_size, io->pfrio_size2);
4940 		if (count > pf_ioctl_maxcount ||
4941 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4942 			error = EINVAL;
4943 			break;
4944 		}
4945 		totlen = count * sizeof(struct pfr_addr);
4946 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4947 		    M_WAITOK);
4948 		error = copyin(io->pfrio_buffer, pfras, totlen);
4949 		if (error) {
4950 			free(pfras, M_TEMP);
4951 			break;
4952 		}
4953 		PF_RULES_WLOCK();
4954 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4955 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4956 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4957 		    PFR_FLAG_USERIOCTL, 0);
4958 		PF_RULES_WUNLOCK();
4959 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4960 			error = copyout(pfras, io->pfrio_buffer, totlen);
4961 		free(pfras, M_TEMP);
4962 		break;
4963 	}
4964 
4965 	case DIOCRGETADDRS: {
4966 		struct pfioc_table *io = (struct pfioc_table *)addr;
4967 		struct pfr_addr *pfras;
4968 		size_t totlen;
4969 
4970 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4971 			error = ENODEV;
4972 			break;
4973 		}
4974 		if (io->pfrio_size < 0 ||
4975 		    io->pfrio_size > pf_ioctl_maxcount ||
4976 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4977 			error = EINVAL;
4978 			break;
4979 		}
4980 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4981 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4982 		    M_TEMP, M_WAITOK | M_ZERO);
4983 		PF_RULES_RLOCK();
4984 		error = pfr_get_addrs(&io->pfrio_table, pfras,
4985 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4986 		PF_RULES_RUNLOCK();
4987 		if (error == 0)
4988 			error = copyout(pfras, io->pfrio_buffer, totlen);
4989 		free(pfras, M_TEMP);
4990 		break;
4991 	}
4992 
4993 	case DIOCRGETASTATS: {
4994 		struct pfioc_table *io = (struct pfioc_table *)addr;
4995 		struct pfr_astats *pfrastats;
4996 		size_t totlen;
4997 
4998 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
4999 			error = ENODEV;
5000 			break;
5001 		}
5002 		if (io->pfrio_size < 0 ||
5003 		    io->pfrio_size > pf_ioctl_maxcount ||
5004 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5005 			error = EINVAL;
5006 			break;
5007 		}
5008 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5009 		pfrastats = mallocarray(io->pfrio_size,
5010 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5011 		PF_RULES_RLOCK();
5012 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5013 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5014 		PF_RULES_RUNLOCK();
5015 		if (error == 0)
5016 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5017 		free(pfrastats, M_TEMP);
5018 		break;
5019 	}
5020 
5021 	case DIOCRCLRASTATS: {
5022 		struct pfioc_table *io = (struct pfioc_table *)addr;
5023 		struct pfr_addr *pfras;
5024 		size_t totlen;
5025 
5026 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5027 			error = ENODEV;
5028 			break;
5029 		}
5030 		if (io->pfrio_size < 0 ||
5031 		    io->pfrio_size > pf_ioctl_maxcount ||
5032 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5033 			error = EINVAL;
5034 			break;
5035 		}
5036 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5037 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5038 		    M_TEMP, M_WAITOK);
5039 		error = copyin(io->pfrio_buffer, pfras, totlen);
5040 		if (error) {
5041 			free(pfras, M_TEMP);
5042 			break;
5043 		}
5044 		PF_RULES_WLOCK();
5045 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5046 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5047 		    PFR_FLAG_USERIOCTL);
5048 		PF_RULES_WUNLOCK();
5049 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5050 			error = copyout(pfras, io->pfrio_buffer, totlen);
5051 		free(pfras, M_TEMP);
5052 		break;
5053 	}
5054 
5055 	case DIOCRTSTADDRS: {
5056 		struct pfioc_table *io = (struct pfioc_table *)addr;
5057 		struct pfr_addr *pfras;
5058 		size_t totlen;
5059 
5060 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5061 			error = ENODEV;
5062 			break;
5063 		}
5064 		if (io->pfrio_size < 0 ||
5065 		    io->pfrio_size > pf_ioctl_maxcount ||
5066 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5067 			error = EINVAL;
5068 			break;
5069 		}
5070 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5071 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5072 		    M_TEMP, M_WAITOK);
5073 		error = copyin(io->pfrio_buffer, pfras, totlen);
5074 		if (error) {
5075 			free(pfras, M_TEMP);
5076 			break;
5077 		}
5078 		PF_RULES_RLOCK();
5079 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5080 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5081 		    PFR_FLAG_USERIOCTL);
5082 		PF_RULES_RUNLOCK();
5083 		if (error == 0)
5084 			error = copyout(pfras, io->pfrio_buffer, totlen);
5085 		free(pfras, M_TEMP);
5086 		break;
5087 	}
5088 
5089 	case DIOCRINADEFINE: {
5090 		struct pfioc_table *io = (struct pfioc_table *)addr;
5091 		struct pfr_addr *pfras;
5092 		size_t totlen;
5093 
5094 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5095 			error = ENODEV;
5096 			break;
5097 		}
5098 		if (io->pfrio_size < 0 ||
5099 		    io->pfrio_size > pf_ioctl_maxcount ||
5100 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5101 			error = EINVAL;
5102 			break;
5103 		}
5104 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5105 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5106 		    M_TEMP, M_WAITOK);
5107 		error = copyin(io->pfrio_buffer, pfras, totlen);
5108 		if (error) {
5109 			free(pfras, M_TEMP);
5110 			break;
5111 		}
5112 		PF_RULES_WLOCK();
5113 		error = pfr_ina_define(&io->pfrio_table, pfras,
5114 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5115 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5116 		PF_RULES_WUNLOCK();
5117 		free(pfras, M_TEMP);
5118 		break;
5119 	}
5120 
5121 	case DIOCOSFPADD: {
5122 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5123 		PF_RULES_WLOCK();
5124 		error = pf_osfp_add(io);
5125 		PF_RULES_WUNLOCK();
5126 		break;
5127 	}
5128 
5129 	case DIOCOSFPGET: {
5130 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5131 		PF_RULES_RLOCK();
5132 		error = pf_osfp_get(io);
5133 		PF_RULES_RUNLOCK();
5134 		break;
5135 	}
5136 
5137 	case DIOCXBEGIN: {
5138 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5139 		struct pfioc_trans_e	*ioes, *ioe;
5140 		size_t			 totlen;
5141 		int			 i;
5142 
5143 		if (io->esize != sizeof(*ioe)) {
5144 			error = ENODEV;
5145 			break;
5146 		}
5147 		if (io->size < 0 ||
5148 		    io->size > pf_ioctl_maxcount ||
5149 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5150 			error = EINVAL;
5151 			break;
5152 		}
5153 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5154 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5155 		    M_TEMP, M_WAITOK);
5156 		error = copyin(io->array, ioes, totlen);
5157 		if (error) {
5158 			free(ioes, M_TEMP);
5159 			break;
5160 		}
5161 		PF_RULES_WLOCK();
5162 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5163 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5164 			switch (ioe->rs_num) {
5165 			case PF_RULESET_ETH:
5166 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5167 					PF_RULES_WUNLOCK();
5168 					free(ioes, M_TEMP);
5169 					goto fail;
5170 				}
5171 				break;
5172 #ifdef ALTQ
5173 			case PF_RULESET_ALTQ:
5174 				if (ioe->anchor[0]) {
5175 					PF_RULES_WUNLOCK();
5176 					free(ioes, M_TEMP);
5177 					error = EINVAL;
5178 					goto fail;
5179 				}
5180 				if ((error = pf_begin_altq(&ioe->ticket))) {
5181 					PF_RULES_WUNLOCK();
5182 					free(ioes, M_TEMP);
5183 					goto fail;
5184 				}
5185 				break;
5186 #endif /* ALTQ */
5187 			case PF_RULESET_TABLE:
5188 			    {
5189 				struct pfr_table table;
5190 
5191 				bzero(&table, sizeof(table));
5192 				strlcpy(table.pfrt_anchor, ioe->anchor,
5193 				    sizeof(table.pfrt_anchor));
5194 				if ((error = pfr_ina_begin(&table,
5195 				    &ioe->ticket, NULL, 0))) {
5196 					PF_RULES_WUNLOCK();
5197 					free(ioes, M_TEMP);
5198 					goto fail;
5199 				}
5200 				break;
5201 			    }
5202 			default:
5203 				if ((error = pf_begin_rules(&ioe->ticket,
5204 				    ioe->rs_num, ioe->anchor))) {
5205 					PF_RULES_WUNLOCK();
5206 					free(ioes, M_TEMP);
5207 					goto fail;
5208 				}
5209 				break;
5210 			}
5211 		}
5212 		PF_RULES_WUNLOCK();
5213 		error = copyout(ioes, io->array, totlen);
5214 		free(ioes, M_TEMP);
5215 		break;
5216 	}
5217 
5218 	case DIOCXROLLBACK: {
5219 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5220 		struct pfioc_trans_e	*ioe, *ioes;
5221 		size_t			 totlen;
5222 		int			 i;
5223 
5224 		if (io->esize != sizeof(*ioe)) {
5225 			error = ENODEV;
5226 			break;
5227 		}
5228 		if (io->size < 0 ||
5229 		    io->size > pf_ioctl_maxcount ||
5230 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5231 			error = EINVAL;
5232 			break;
5233 		}
5234 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5235 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5236 		    M_TEMP, M_WAITOK);
5237 		error = copyin(io->array, ioes, totlen);
5238 		if (error) {
5239 			free(ioes, M_TEMP);
5240 			break;
5241 		}
5242 		PF_RULES_WLOCK();
5243 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5244 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5245 			switch (ioe->rs_num) {
5246 			case PF_RULESET_ETH:
5247 				if ((error = pf_rollback_eth(ioe->ticket,
5248 				    ioe->anchor))) {
5249 					PF_RULES_WUNLOCK();
5250 					free(ioes, M_TEMP);
5251 					goto fail; /* really bad */
5252 				}
5253 				break;
5254 #ifdef ALTQ
5255 			case PF_RULESET_ALTQ:
5256 				if (ioe->anchor[0]) {
5257 					PF_RULES_WUNLOCK();
5258 					free(ioes, M_TEMP);
5259 					error = EINVAL;
5260 					goto fail;
5261 				}
5262 				if ((error = pf_rollback_altq(ioe->ticket))) {
5263 					PF_RULES_WUNLOCK();
5264 					free(ioes, M_TEMP);
5265 					goto fail; /* really bad */
5266 				}
5267 				break;
5268 #endif /* ALTQ */
5269 			case PF_RULESET_TABLE:
5270 			    {
5271 				struct pfr_table table;
5272 
5273 				bzero(&table, sizeof(table));
5274 				strlcpy(table.pfrt_anchor, ioe->anchor,
5275 				    sizeof(table.pfrt_anchor));
5276 				if ((error = pfr_ina_rollback(&table,
5277 				    ioe->ticket, NULL, 0))) {
5278 					PF_RULES_WUNLOCK();
5279 					free(ioes, M_TEMP);
5280 					goto fail; /* really bad */
5281 				}
5282 				break;
5283 			    }
5284 			default:
5285 				if ((error = pf_rollback_rules(ioe->ticket,
5286 				    ioe->rs_num, ioe->anchor))) {
5287 					PF_RULES_WUNLOCK();
5288 					free(ioes, M_TEMP);
5289 					goto fail; /* really bad */
5290 				}
5291 				break;
5292 			}
5293 		}
5294 		PF_RULES_WUNLOCK();
5295 		free(ioes, M_TEMP);
5296 		break;
5297 	}
5298 
5299 	case DIOCXCOMMIT: {
5300 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5301 		struct pfioc_trans_e	*ioe, *ioes;
5302 		struct pf_kruleset	*rs;
5303 		struct pf_keth_ruleset	*ers;
5304 		size_t			 totlen;
5305 		int			 i;
5306 
5307 		if (io->esize != sizeof(*ioe)) {
5308 			error = ENODEV;
5309 			break;
5310 		}
5311 
5312 		if (io->size < 0 ||
5313 		    io->size > pf_ioctl_maxcount ||
5314 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5315 			error = EINVAL;
5316 			break;
5317 		}
5318 
5319 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5320 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5321 		    M_TEMP, M_WAITOK);
5322 		error = copyin(io->array, ioes, totlen);
5323 		if (error) {
5324 			free(ioes, M_TEMP);
5325 			break;
5326 		}
5327 		PF_RULES_WLOCK();
5328 		/* First makes sure everything will succeed. */
5329 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5330 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5331 			switch (ioe->rs_num) {
5332 			case PF_RULESET_ETH:
5333 				ers = pf_find_keth_ruleset(ioe->anchor);
5334 				if (ers == NULL || ioe->ticket == 0 ||
5335 				    ioe->ticket != ers->inactive.ticket) {
5336 					PF_RULES_WUNLOCK();
5337 					free(ioes, M_TEMP);
5338 					error = EINVAL;
5339 					goto fail;
5340 				}
5341 				break;
5342 #ifdef ALTQ
5343 			case PF_RULESET_ALTQ:
5344 				if (ioe->anchor[0]) {
5345 					PF_RULES_WUNLOCK();
5346 					free(ioes, M_TEMP);
5347 					error = EINVAL;
5348 					goto fail;
5349 				}
5350 				if (!V_altqs_inactive_open || ioe->ticket !=
5351 				    V_ticket_altqs_inactive) {
5352 					PF_RULES_WUNLOCK();
5353 					free(ioes, M_TEMP);
5354 					error = EBUSY;
5355 					goto fail;
5356 				}
5357 				break;
5358 #endif /* ALTQ */
5359 			case PF_RULESET_TABLE:
5360 				rs = pf_find_kruleset(ioe->anchor);
5361 				if (rs == NULL || !rs->topen || ioe->ticket !=
5362 				    rs->tticket) {
5363 					PF_RULES_WUNLOCK();
5364 					free(ioes, M_TEMP);
5365 					error = EBUSY;
5366 					goto fail;
5367 				}
5368 				break;
5369 			default:
5370 				if (ioe->rs_num < 0 || ioe->rs_num >=
5371 				    PF_RULESET_MAX) {
5372 					PF_RULES_WUNLOCK();
5373 					free(ioes, M_TEMP);
5374 					error = EINVAL;
5375 					goto fail;
5376 				}
5377 				rs = pf_find_kruleset(ioe->anchor);
5378 				if (rs == NULL ||
5379 				    !rs->rules[ioe->rs_num].inactive.open ||
5380 				    rs->rules[ioe->rs_num].inactive.ticket !=
5381 				    ioe->ticket) {
5382 					PF_RULES_WUNLOCK();
5383 					free(ioes, M_TEMP);
5384 					error = EBUSY;
5385 					goto fail;
5386 				}
5387 				break;
5388 			}
5389 		}
5390 		/* Now do the commit - no errors should happen here. */
5391 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5392 			switch (ioe->rs_num) {
5393 			case PF_RULESET_ETH:
5394 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5395 					PF_RULES_WUNLOCK();
5396 					free(ioes, M_TEMP);
5397 					goto fail; /* really bad */
5398 				}
5399 				break;
5400 #ifdef ALTQ
5401 			case PF_RULESET_ALTQ:
5402 				if ((error = pf_commit_altq(ioe->ticket))) {
5403 					PF_RULES_WUNLOCK();
5404 					free(ioes, M_TEMP);
5405 					goto fail; /* really bad */
5406 				}
5407 				break;
5408 #endif /* ALTQ */
5409 			case PF_RULESET_TABLE:
5410 			    {
5411 				struct pfr_table table;
5412 
5413 				bzero(&table, sizeof(table));
5414 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5415 				    sizeof(table.pfrt_anchor));
5416 				if ((error = pfr_ina_commit(&table,
5417 				    ioe->ticket, NULL, NULL, 0))) {
5418 					PF_RULES_WUNLOCK();
5419 					free(ioes, M_TEMP);
5420 					goto fail; /* really bad */
5421 				}
5422 				break;
5423 			    }
5424 			default:
5425 				if ((error = pf_commit_rules(ioe->ticket,
5426 				    ioe->rs_num, ioe->anchor))) {
5427 					PF_RULES_WUNLOCK();
5428 					free(ioes, M_TEMP);
5429 					goto fail; /* really bad */
5430 				}
5431 				break;
5432 			}
5433 		}
5434 		PF_RULES_WUNLOCK();
5435 
5436 		/* Only hook into EtherNet taffic if we've got rules for it. */
5437 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5438 			hook_pf_eth();
5439 		else
5440 			dehook_pf_eth();
5441 
5442 		free(ioes, M_TEMP);
5443 		break;
5444 	}
5445 
5446 	case DIOCGETSRCNODES: {
5447 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5448 		struct pf_srchash	*sh;
5449 		struct pf_ksrc_node	*n;
5450 		struct pf_src_node	*p, *pstore;
5451 		uint32_t		 i, nr = 0;
5452 
5453 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5454 				i++, sh++) {
5455 			PF_HASHROW_LOCK(sh);
5456 			LIST_FOREACH(n, &sh->nodes, entry)
5457 				nr++;
5458 			PF_HASHROW_UNLOCK(sh);
5459 		}
5460 
5461 		psn->psn_len = min(psn->psn_len,
5462 		    sizeof(struct pf_src_node) * nr);
5463 
5464 		if (psn->psn_len == 0) {
5465 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5466 			break;
5467 		}
5468 
5469 		nr = 0;
5470 
5471 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5472 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5473 		    i++, sh++) {
5474 		    PF_HASHROW_LOCK(sh);
5475 		    LIST_FOREACH(n, &sh->nodes, entry) {
5476 
5477 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5478 				break;
5479 
5480 			pf_src_node_copy(n, p);
5481 
5482 			p++;
5483 			nr++;
5484 		    }
5485 		    PF_HASHROW_UNLOCK(sh);
5486 		}
5487 		error = copyout(pstore, psn->psn_src_nodes,
5488 		    sizeof(struct pf_src_node) * nr);
5489 		if (error) {
5490 			free(pstore, M_TEMP);
5491 			break;
5492 		}
5493 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5494 		free(pstore, M_TEMP);
5495 		break;
5496 	}
5497 
5498 	case DIOCCLRSRCNODES: {
5499 		pf_kill_srcnodes(NULL);
5500 		break;
5501 	}
5502 
5503 	case DIOCKILLSRCNODES:
5504 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5505 		break;
5506 
5507 #ifdef COMPAT_FREEBSD13
5508 	case DIOCKEEPCOUNTERS_FREEBSD13:
5509 #endif
5510 	case DIOCKEEPCOUNTERS:
5511 		error = pf_keepcounters((struct pfioc_nv *)addr);
5512 		break;
5513 
5514 	case DIOCGETSYNCOOKIES:
5515 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5516 		break;
5517 
5518 	case DIOCSETSYNCOOKIES:
5519 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5520 		break;
5521 
5522 	case DIOCSETHOSTID: {
5523 		u_int32_t	*hostid = (u_int32_t *)addr;
5524 
5525 		PF_RULES_WLOCK();
5526 		if (*hostid == 0)
5527 			V_pf_status.hostid = arc4random();
5528 		else
5529 			V_pf_status.hostid = *hostid;
5530 		PF_RULES_WUNLOCK();
5531 		break;
5532 	}
5533 
5534 	case DIOCOSFPFLUSH:
5535 		PF_RULES_WLOCK();
5536 		pf_osfp_flush();
5537 		PF_RULES_WUNLOCK();
5538 		break;
5539 
5540 	case DIOCIGETIFACES: {
5541 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5542 		struct pfi_kif *ifstore;
5543 		size_t bufsiz;
5544 
5545 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5546 			error = ENODEV;
5547 			break;
5548 		}
5549 
5550 		if (io->pfiio_size < 0 ||
5551 		    io->pfiio_size > pf_ioctl_maxcount ||
5552 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5553 			error = EINVAL;
5554 			break;
5555 		}
5556 
5557 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5558 
5559 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5560 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5561 		    M_TEMP, M_WAITOK | M_ZERO);
5562 
5563 		PF_RULES_RLOCK();
5564 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5565 		PF_RULES_RUNLOCK();
5566 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5567 		free(ifstore, M_TEMP);
5568 		break;
5569 	}
5570 
5571 	case DIOCSETIFFLAG: {
5572 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5573 
5574 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5575 
5576 		PF_RULES_WLOCK();
5577 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5578 		PF_RULES_WUNLOCK();
5579 		break;
5580 	}
5581 
5582 	case DIOCCLRIFFLAG: {
5583 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5584 
5585 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5586 
5587 		PF_RULES_WLOCK();
5588 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5589 		PF_RULES_WUNLOCK();
5590 		break;
5591 	}
5592 
5593 	case DIOCSETREASS: {
5594 		u_int32_t	*reass = (u_int32_t *)addr;
5595 
5596 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5597 		/* Removal of DF flag without reassembly enabled is not a
5598 		 * valid combination. Disable reassembly in such case. */
5599 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5600 			V_pf_status.reass = 0;
5601 		break;
5602 	}
5603 
5604 	default:
5605 		error = ENODEV;
5606 		break;
5607 	}
5608 fail:
5609 	CURVNET_RESTORE();
5610 
5611 #undef ERROUT_IOCTL
5612 
5613 	return (error);
5614 }
5615 
5616 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5617 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5618 {
5619 	bzero(sp, sizeof(union pfsync_state_union));
5620 
5621 	/* copy from state key */
5622 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5623 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5624 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5625 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5626 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5627 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5628 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5629 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5630 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5631 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5632 
5633 	/* copy from state */
5634 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5635 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5636 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5637 	sp->pfs_1301.expire = pf_state_expires(st);
5638 	if (sp->pfs_1301.expire <= time_uptime)
5639 		sp->pfs_1301.expire = htonl(0);
5640 	else
5641 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5642 
5643 	sp->pfs_1301.direction = st->direction;
5644 	sp->pfs_1301.log = st->act.log;
5645 	sp->pfs_1301.timeout = st->timeout;
5646 
5647 	switch (msg_version) {
5648 		case PFSYNC_MSG_VERSION_1301:
5649 			sp->pfs_1301.state_flags = st->state_flags;
5650 			break;
5651 		case PFSYNC_MSG_VERSION_1400:
5652 			sp->pfs_1400.state_flags = htons(st->state_flags);
5653 			sp->pfs_1400.qid = htons(st->act.qid);
5654 			sp->pfs_1400.pqid = htons(st->act.pqid);
5655 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5656 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5657 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5658 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5659 			sp->pfs_1400.set_tos = st->act.set_tos;
5660 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5661 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5662 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5663 			sp->pfs_1400.rt = st->act.rt;
5664 			if (st->act.rt_kif)
5665 				strlcpy(sp->pfs_1400.rt_ifname,
5666 				    st->act.rt_kif->pfik_name,
5667 				    sizeof(sp->pfs_1400.rt_ifname));
5668 			break;
5669 		default:
5670 			panic("%s: Unsupported pfsync_msg_version %d",
5671 			    __func__, msg_version);
5672 	}
5673 
5674 	if (st->src_node)
5675 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5676 	if (st->nat_src_node)
5677 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5678 
5679 	sp->pfs_1301.id = st->id;
5680 	sp->pfs_1301.creatorid = st->creatorid;
5681 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5682 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5683 
5684 	if (st->rule == NULL)
5685 		sp->pfs_1301.rule = htonl(-1);
5686 	else
5687 		sp->pfs_1301.rule = htonl(st->rule->nr);
5688 	if (st->anchor == NULL)
5689 		sp->pfs_1301.anchor = htonl(-1);
5690 	else
5691 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5692 	if (st->nat_rule == NULL)
5693 		sp->pfs_1301.nat_rule = htonl(-1);
5694 	else
5695 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5696 
5697 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5698 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5699 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5700 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5701 }
5702 
5703 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5704 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5705 {
5706 	bzero(sp, sizeof(*sp));
5707 
5708 	sp->version = PF_STATE_VERSION;
5709 
5710 	/* copy from state key */
5711 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5712 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5713 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5714 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5715 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5716 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5717 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5718 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5719 	sp->proto = st->key[PF_SK_WIRE]->proto;
5720 	sp->af = st->key[PF_SK_WIRE]->af;
5721 
5722 	/* copy from state */
5723 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5724 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5725 	    sizeof(sp->orig_ifname));
5726 	bcopy(&st->act.rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
5727 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5728 	sp->expire = pf_state_expires(st);
5729 	if (sp->expire <= time_uptime)
5730 		sp->expire = htonl(0);
5731 	else
5732 		sp->expire = htonl(sp->expire - time_uptime);
5733 
5734 	sp->direction = st->direction;
5735 	sp->log = st->act.log;
5736 	sp->timeout = st->timeout;
5737 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5738 	sp->state_flags_compat = st->state_flags;
5739 	sp->state_flags = htons(st->state_flags);
5740 	if (st->src_node)
5741 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5742 	if (st->nat_src_node)
5743 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5744 
5745 	sp->id = st->id;
5746 	sp->creatorid = st->creatorid;
5747 	pf_state_peer_hton(&st->src, &sp->src);
5748 	pf_state_peer_hton(&st->dst, &sp->dst);
5749 
5750 	if (st->rule == NULL)
5751 		sp->rule = htonl(-1);
5752 	else
5753 		sp->rule = htonl(st->rule->nr);
5754 	if (st->anchor == NULL)
5755 		sp->anchor = htonl(-1);
5756 	else
5757 		sp->anchor = htonl(st->anchor->nr);
5758 	if (st->nat_rule == NULL)
5759 		sp->nat_rule = htonl(-1);
5760 	else
5761 		sp->nat_rule = htonl(st->nat_rule->nr);
5762 
5763 	sp->packets[0] = st->packets[0];
5764 	sp->packets[1] = st->packets[1];
5765 	sp->bytes[0] = st->bytes[0];
5766 	sp->bytes[1] = st->bytes[1];
5767 
5768 	sp->qid = htons(st->act.qid);
5769 	sp->pqid = htons(st->act.pqid);
5770 	sp->dnpipe = htons(st->act.dnpipe);
5771 	sp->dnrpipe = htons(st->act.dnrpipe);
5772 	sp->rtableid = htonl(st->act.rtableid);
5773 	sp->min_ttl = st->act.min_ttl;
5774 	sp->set_tos = st->act.set_tos;
5775 	sp->max_mss = htons(st->act.max_mss);
5776 	sp->rt = st->act.rt;
5777 	if (st->act.rt_kif)
5778 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5779 		    sizeof(sp->rt_ifname));
5780 	sp->set_prio[0] = st->act.set_prio[0];
5781 	sp->set_prio[1] = st->act.set_prio[1];
5782 
5783 }
5784 
5785 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5786 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5787 {
5788 	struct pfr_ktable *kt;
5789 
5790 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5791 
5792 	kt = aw->p.tbl;
5793 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5794 		kt = kt->pfrkt_root;
5795 	aw->p.tbl = NULL;
5796 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5797 		kt->pfrkt_cnt : -1;
5798 }
5799 
5800 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5801 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5802     size_t number, char **names)
5803 {
5804 	nvlist_t        *nvc;
5805 
5806 	nvc = nvlist_create(0);
5807 	if (nvc == NULL)
5808 		return (ENOMEM);
5809 
5810 	for (int i = 0; i < number; i++) {
5811 		nvlist_append_number_array(nvc, "counters",
5812 		    counter_u64_fetch(counters[i]));
5813 		nvlist_append_string_array(nvc, "names",
5814 		    names[i]);
5815 		nvlist_append_number_array(nvc, "ids",
5816 		    i);
5817 	}
5818 	nvlist_add_nvlist(nvl, name, nvc);
5819 	nvlist_destroy(nvc);
5820 
5821 	return (0);
5822 }
5823 
5824 static int
pf_getstatus(struct pfioc_nv * nv)5825 pf_getstatus(struct pfioc_nv *nv)
5826 {
5827 	nvlist_t        *nvl = NULL, *nvc = NULL;
5828 	void            *nvlpacked = NULL;
5829 	int              error;
5830 	struct pf_status s;
5831 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5832 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5833 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5834 	PF_RULES_RLOCK_TRACKER;
5835 
5836 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5837 
5838 	PF_RULES_RLOCK();
5839 
5840 	nvl = nvlist_create(0);
5841 	if (nvl == NULL)
5842 		ERROUT(ENOMEM);
5843 
5844 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5845 	nvlist_add_number(nvl, "since", V_pf_status.since);
5846 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5847 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5848 	nvlist_add_number(nvl, "states", V_pf_status.states);
5849 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5850 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5851 	nvlist_add_bool(nvl, "syncookies_active",
5852 	    V_pf_status.syncookies_active);
5853 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5854 
5855 	/* counters */
5856 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5857 	    PFRES_MAX, pf_reasons);
5858 	if (error != 0)
5859 		ERROUT(error);
5860 
5861 	/* lcounters */
5862 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5863 	    KLCNT_MAX, pf_lcounter);
5864 	if (error != 0)
5865 		ERROUT(error);
5866 
5867 	/* fcounters */
5868 	nvc = nvlist_create(0);
5869 	if (nvc == NULL)
5870 		ERROUT(ENOMEM);
5871 
5872 	for (int i = 0; i < FCNT_MAX; i++) {
5873 		nvlist_append_number_array(nvc, "counters",
5874 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5875 		nvlist_append_string_array(nvc, "names",
5876 		    pf_fcounter[i]);
5877 		nvlist_append_number_array(nvc, "ids",
5878 		    i);
5879 	}
5880 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5881 	nvlist_destroy(nvc);
5882 	nvc = NULL;
5883 
5884 	/* scounters */
5885 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5886 	    SCNT_MAX, pf_fcounter);
5887 	if (error != 0)
5888 		ERROUT(error);
5889 
5890 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5891 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5892 	    PF_MD5_DIGEST_LENGTH);
5893 
5894 	pfi_update_status(V_pf_status.ifname, &s);
5895 
5896 	/* pcounters / bcounters */
5897 	for (int i = 0; i < 2; i++) {
5898 		for (int j = 0; j < 2; j++) {
5899 			for (int k = 0; k < 2; k++) {
5900 				nvlist_append_number_array(nvl, "pcounters",
5901 				    s.pcounters[i][j][k]);
5902 			}
5903 			nvlist_append_number_array(nvl, "bcounters",
5904 			    s.bcounters[i][j]);
5905 		}
5906 	}
5907 
5908 	nvlpacked = nvlist_pack(nvl, &nv->len);
5909 	if (nvlpacked == NULL)
5910 		ERROUT(ENOMEM);
5911 
5912 	if (nv->size == 0)
5913 		ERROUT(0);
5914 	else if (nv->size < nv->len)
5915 		ERROUT(ENOSPC);
5916 
5917 	PF_RULES_RUNLOCK();
5918 	error = copyout(nvlpacked, nv->data, nv->len);
5919 	goto done;
5920 
5921 #undef ERROUT
5922 errout:
5923 	PF_RULES_RUNLOCK();
5924 done:
5925 	free(nvlpacked, M_NVLIST);
5926 	nvlist_destroy(nvc);
5927 	nvlist_destroy(nvl);
5928 
5929 	return (error);
5930 }
5931 
5932 /*
5933  * XXX - Check for version mismatch!!!
5934  */
5935 static void
pf_clear_all_states(void)5936 pf_clear_all_states(void)
5937 {
5938 	struct epoch_tracker	 et;
5939 	struct pf_kstate	*s;
5940 	u_int i;
5941 
5942 	NET_EPOCH_ENTER(et);
5943 	for (i = 0; i <= V_pf_hashmask; i++) {
5944 		struct pf_idhash *ih = &V_pf_idhash[i];
5945 relock:
5946 		PF_HASHROW_LOCK(ih);
5947 		LIST_FOREACH(s, &ih->states, entry) {
5948 			s->timeout = PFTM_PURGE;
5949 			/* Don't send out individual delete messages. */
5950 			s->state_flags |= PFSTATE_NOSYNC;
5951 			pf_unlink_state(s);
5952 			goto relock;
5953 		}
5954 		PF_HASHROW_UNLOCK(ih);
5955 	}
5956 	NET_EPOCH_EXIT(et);
5957 }
5958 
5959 static int
pf_clear_tables(void)5960 pf_clear_tables(void)
5961 {
5962 	struct pfioc_table io;
5963 	int error;
5964 
5965 	bzero(&io, sizeof(io));
5966 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
5967 
5968 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
5969 	    io.pfrio_flags);
5970 
5971 	return (error);
5972 }
5973 
5974 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)5975 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
5976 {
5977 	struct pf_ksrc_node_list	 kill;
5978 	u_int 				 killed;
5979 
5980 	LIST_INIT(&kill);
5981 	for (int i = 0; i <= V_pf_srchashmask; i++) {
5982 		struct pf_srchash *sh = &V_pf_srchash[i];
5983 		struct pf_ksrc_node *sn, *tmp;
5984 
5985 		PF_HASHROW_LOCK(sh);
5986 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
5987 			if (psnk == NULL ||
5988 			    (PF_MATCHA(psnk->psnk_src.neg,
5989 			      &psnk->psnk_src.addr.v.a.addr,
5990 			      &psnk->psnk_src.addr.v.a.mask,
5991 			      &sn->addr, sn->af) &&
5992 			    PF_MATCHA(psnk->psnk_dst.neg,
5993 			      &psnk->psnk_dst.addr.v.a.addr,
5994 			      &psnk->psnk_dst.addr.v.a.mask,
5995 			      &sn->raddr, sn->af))) {
5996 				pf_unlink_src_node(sn);
5997 				LIST_INSERT_HEAD(&kill, sn, entry);
5998 				sn->expire = 1;
5999 			}
6000 		PF_HASHROW_UNLOCK(sh);
6001 	}
6002 
6003 	for (int i = 0; i <= V_pf_hashmask; i++) {
6004 		struct pf_idhash *ih = &V_pf_idhash[i];
6005 		struct pf_kstate *s;
6006 
6007 		PF_HASHROW_LOCK(ih);
6008 		LIST_FOREACH(s, &ih->states, entry) {
6009 			if (s->src_node && s->src_node->expire == 1)
6010 				s->src_node = NULL;
6011 			if (s->nat_src_node && s->nat_src_node->expire == 1)
6012 				s->nat_src_node = NULL;
6013 		}
6014 		PF_HASHROW_UNLOCK(ih);
6015 	}
6016 
6017 	killed = pf_free_src_nodes(&kill);
6018 
6019 	if (psnk != NULL)
6020 		psnk->psnk_killed = killed;
6021 }
6022 
6023 static int
pf_keepcounters(struct pfioc_nv * nv)6024 pf_keepcounters(struct pfioc_nv *nv)
6025 {
6026 	nvlist_t	*nvl = NULL;
6027 	void		*nvlpacked = NULL;
6028 	int		 error = 0;
6029 
6030 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6031 
6032 	if (nv->len > pf_ioctl_maxcount)
6033 		ERROUT(ENOMEM);
6034 
6035 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6036 	error = copyin(nv->data, nvlpacked, nv->len);
6037 	if (error)
6038 		ERROUT(error);
6039 
6040 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6041 	if (nvl == NULL)
6042 		ERROUT(EBADMSG);
6043 
6044 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6045 		ERROUT(EBADMSG);
6046 
6047 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6048 
6049 on_error:
6050 	nvlist_destroy(nvl);
6051 	free(nvlpacked, M_NVLIST);
6052 	return (error);
6053 }
6054 
6055 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6056 pf_clear_states(const struct pf_kstate_kill *kill)
6057 {
6058 	struct pf_state_key_cmp	 match_key;
6059 	struct pf_kstate	*s;
6060 	struct pfi_kkif	*kif;
6061 	int		 idx;
6062 	unsigned int	 killed = 0, dir;
6063 
6064 	NET_EPOCH_ASSERT();
6065 
6066 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6067 		struct pf_idhash *ih = &V_pf_idhash[i];
6068 
6069 relock_DIOCCLRSTATES:
6070 		PF_HASHROW_LOCK(ih);
6071 		LIST_FOREACH(s, &ih->states, entry) {
6072 			/* For floating states look at the original kif. */
6073 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6074 
6075 			if (kill->psk_ifname[0] &&
6076 			    strcmp(kill->psk_ifname,
6077 			    kif->pfik_name))
6078 				continue;
6079 
6080 			if (kill->psk_kill_match) {
6081 				bzero(&match_key, sizeof(match_key));
6082 
6083 				if (s->direction == PF_OUT) {
6084 					dir = PF_IN;
6085 					idx = PF_SK_STACK;
6086 				} else {
6087 					dir = PF_OUT;
6088 					idx = PF_SK_WIRE;
6089 				}
6090 
6091 				match_key.af = s->key[idx]->af;
6092 				match_key.proto = s->key[idx]->proto;
6093 				PF_ACPY(&match_key.addr[0],
6094 				    &s->key[idx]->addr[1], match_key.af);
6095 				match_key.port[0] = s->key[idx]->port[1];
6096 				PF_ACPY(&match_key.addr[1],
6097 				    &s->key[idx]->addr[0], match_key.af);
6098 				match_key.port[1] = s->key[idx]->port[0];
6099 			}
6100 
6101 			/*
6102 			 * Don't send out individual
6103 			 * delete messages.
6104 			 */
6105 			s->state_flags |= PFSTATE_NOSYNC;
6106 			pf_unlink_state(s);
6107 			killed++;
6108 
6109 			if (kill->psk_kill_match)
6110 				killed += pf_kill_matching_state(&match_key,
6111 				    dir);
6112 
6113 			goto relock_DIOCCLRSTATES;
6114 		}
6115 		PF_HASHROW_UNLOCK(ih);
6116 	}
6117 
6118 	if (V_pfsync_clear_states_ptr != NULL)
6119 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6120 
6121 	return (killed);
6122 }
6123 
6124 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6125 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6126 {
6127 	struct pf_kstate	*s;
6128 
6129 	NET_EPOCH_ASSERT();
6130 	if (kill->psk_pfcmp.id) {
6131 		if (kill->psk_pfcmp.creatorid == 0)
6132 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6133 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6134 		    kill->psk_pfcmp.creatorid))) {
6135 			pf_unlink_state(s);
6136 			*killed = 1;
6137 		}
6138 		return;
6139 	}
6140 
6141 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6142 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6143 }
6144 
6145 static int
pf_killstates_nv(struct pfioc_nv * nv)6146 pf_killstates_nv(struct pfioc_nv *nv)
6147 {
6148 	struct pf_kstate_kill	 kill;
6149 	struct epoch_tracker	 et;
6150 	nvlist_t		*nvl = NULL;
6151 	void			*nvlpacked = NULL;
6152 	int			 error = 0;
6153 	unsigned int		 killed = 0;
6154 
6155 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6156 
6157 	if (nv->len > pf_ioctl_maxcount)
6158 		ERROUT(ENOMEM);
6159 
6160 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6161 	error = copyin(nv->data, nvlpacked, nv->len);
6162 	if (error)
6163 		ERROUT(error);
6164 
6165 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6166 	if (nvl == NULL)
6167 		ERROUT(EBADMSG);
6168 
6169 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6170 	if (error)
6171 		ERROUT(error);
6172 
6173 	NET_EPOCH_ENTER(et);
6174 	pf_killstates(&kill, &killed);
6175 	NET_EPOCH_EXIT(et);
6176 
6177 	free(nvlpacked, M_NVLIST);
6178 	nvlpacked = NULL;
6179 	nvlist_destroy(nvl);
6180 	nvl = nvlist_create(0);
6181 	if (nvl == NULL)
6182 		ERROUT(ENOMEM);
6183 
6184 	nvlist_add_number(nvl, "killed", killed);
6185 
6186 	nvlpacked = nvlist_pack(nvl, &nv->len);
6187 	if (nvlpacked == NULL)
6188 		ERROUT(ENOMEM);
6189 
6190 	if (nv->size == 0)
6191 		ERROUT(0);
6192 	else if (nv->size < nv->len)
6193 		ERROUT(ENOSPC);
6194 
6195 	error = copyout(nvlpacked, nv->data, nv->len);
6196 
6197 on_error:
6198 	nvlist_destroy(nvl);
6199 	free(nvlpacked, M_NVLIST);
6200 	return (error);
6201 }
6202 
6203 static int
pf_clearstates_nv(struct pfioc_nv * nv)6204 pf_clearstates_nv(struct pfioc_nv *nv)
6205 {
6206 	struct pf_kstate_kill	 kill;
6207 	struct epoch_tracker	 et;
6208 	nvlist_t		*nvl = NULL;
6209 	void			*nvlpacked = NULL;
6210 	int			 error = 0;
6211 	unsigned int		 killed;
6212 
6213 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6214 
6215 	if (nv->len > pf_ioctl_maxcount)
6216 		ERROUT(ENOMEM);
6217 
6218 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6219 	error = copyin(nv->data, nvlpacked, nv->len);
6220 	if (error)
6221 		ERROUT(error);
6222 
6223 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6224 	if (nvl == NULL)
6225 		ERROUT(EBADMSG);
6226 
6227 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6228 	if (error)
6229 		ERROUT(error);
6230 
6231 	NET_EPOCH_ENTER(et);
6232 	killed = pf_clear_states(&kill);
6233 	NET_EPOCH_EXIT(et);
6234 
6235 	free(nvlpacked, M_NVLIST);
6236 	nvlpacked = NULL;
6237 	nvlist_destroy(nvl);
6238 	nvl = nvlist_create(0);
6239 	if (nvl == NULL)
6240 		ERROUT(ENOMEM);
6241 
6242 	nvlist_add_number(nvl, "killed", killed);
6243 
6244 	nvlpacked = nvlist_pack(nvl, &nv->len);
6245 	if (nvlpacked == NULL)
6246 		ERROUT(ENOMEM);
6247 
6248 	if (nv->size == 0)
6249 		ERROUT(0);
6250 	else if (nv->size < nv->len)
6251 		ERROUT(ENOSPC);
6252 
6253 	error = copyout(nvlpacked, nv->data, nv->len);
6254 
6255 #undef ERROUT
6256 on_error:
6257 	nvlist_destroy(nvl);
6258 	free(nvlpacked, M_NVLIST);
6259 	return (error);
6260 }
6261 
6262 static int
pf_getstate(struct pfioc_nv * nv)6263 pf_getstate(struct pfioc_nv *nv)
6264 {
6265 	nvlist_t		*nvl = NULL, *nvls;
6266 	void			*nvlpacked = NULL;
6267 	struct pf_kstate	*s = NULL;
6268 	int			 error = 0;
6269 	uint64_t		 id, creatorid;
6270 
6271 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6272 
6273 	if (nv->len > pf_ioctl_maxcount)
6274 		ERROUT(ENOMEM);
6275 
6276 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6277 	error = copyin(nv->data, nvlpacked, nv->len);
6278 	if (error)
6279 		ERROUT(error);
6280 
6281 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6282 	if (nvl == NULL)
6283 		ERROUT(EBADMSG);
6284 
6285 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6286 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6287 
6288 	s = pf_find_state_byid(id, creatorid);
6289 	if (s == NULL)
6290 		ERROUT(ENOENT);
6291 
6292 	free(nvlpacked, M_NVLIST);
6293 	nvlpacked = NULL;
6294 	nvlist_destroy(nvl);
6295 	nvl = nvlist_create(0);
6296 	if (nvl == NULL)
6297 		ERROUT(ENOMEM);
6298 
6299 	nvls = pf_state_to_nvstate(s);
6300 	if (nvls == NULL)
6301 		ERROUT(ENOMEM);
6302 
6303 	nvlist_add_nvlist(nvl, "state", nvls);
6304 	nvlist_destroy(nvls);
6305 
6306 	nvlpacked = nvlist_pack(nvl, &nv->len);
6307 	if (nvlpacked == NULL)
6308 		ERROUT(ENOMEM);
6309 
6310 	if (nv->size == 0)
6311 		ERROUT(0);
6312 	else if (nv->size < nv->len)
6313 		ERROUT(ENOSPC);
6314 
6315 	error = copyout(nvlpacked, nv->data, nv->len);
6316 
6317 #undef ERROUT
6318 errout:
6319 	if (s != NULL)
6320 		PF_STATE_UNLOCK(s);
6321 	free(nvlpacked, M_NVLIST);
6322 	nvlist_destroy(nvl);
6323 	return (error);
6324 }
6325 
6326 /*
6327  * XXX - Check for version mismatch!!!
6328  */
6329 
6330 /*
6331  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6332  */
6333 static int
shutdown_pf(void)6334 shutdown_pf(void)
6335 {
6336 	int error = 0;
6337 	u_int32_t t[5];
6338 	char nn = '\0';
6339 	struct pf_kanchor *anchor;
6340 	struct pf_keth_anchor *eth_anchor;
6341 	int rs_num;
6342 
6343 	do {
6344 		/* Unlink rules of all user defined anchors */
6345 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6346 			/* Wildcard based anchors may not have a respective
6347 			 * explicit anchor rule or they may be left empty
6348 			 * without rules. It leads to anchor.refcnt=0, and the
6349 			 * rest of the logic does not expect it. */
6350 			if (anchor->refcnt == 0)
6351 				anchor->refcnt = 1;
6352 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6353 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6354 				    anchor->path)) != 0) {
6355 					DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: "
6356 					    "anchor.path=%s rs_num=%d\n",
6357 					    anchor->path, rs_num));
6358 					goto error;	/* XXX: rollback? */
6359 				}
6360 			}
6361 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6362 				error = pf_commit_rules(t[rs_num], rs_num,
6363 				    anchor->path);
6364 				MPASS(error == 0);
6365 			}
6366 		}
6367 
6368 		/* Unlink rules of all user defined ether anchors */
6369 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6370 		    &V_pf_keth_anchors) {
6371 			/* Wildcard based anchors may not have a respective
6372 			 * explicit anchor rule or they may be left empty
6373 			 * without rules. It leads to anchor.refcnt=0, and the
6374 			 * rest of the logic does not expect it. */
6375 			if (eth_anchor->refcnt == 0)
6376 				eth_anchor->refcnt = 1;
6377 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6378 			    != 0) {
6379 				DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth "
6380 				    "anchor.path=%s\n", eth_anchor->path));
6381 				goto error;
6382 			}
6383 			error = pf_commit_eth(t[0], eth_anchor->path);
6384 			MPASS(error == 0);
6385 		}
6386 
6387 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6388 		    != 0) {
6389 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
6390 			break;
6391 		}
6392 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6393 		    != 0) {
6394 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
6395 			break;		/* XXX: rollback? */
6396 		}
6397 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6398 		    != 0) {
6399 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
6400 			break;		/* XXX: rollback? */
6401 		}
6402 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6403 		    != 0) {
6404 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
6405 			break;		/* XXX: rollback? */
6406 		}
6407 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6408 		    != 0) {
6409 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
6410 			break;		/* XXX: rollback? */
6411 		}
6412 
6413 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6414 		MPASS(error == 0);
6415 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6416 		MPASS(error == 0);
6417 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6418 		MPASS(error == 0);
6419 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6420 		MPASS(error == 0);
6421 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6422 		MPASS(error == 0);
6423 
6424 		if ((error = pf_clear_tables()) != 0)
6425 			break;
6426 
6427 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6428 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: eth\n"));
6429 			break;
6430 		}
6431 		error = pf_commit_eth(t[0], &nn);
6432 		MPASS(error == 0);
6433 
6434 #ifdef ALTQ
6435 		if ((error = pf_begin_altq(&t[0])) != 0) {
6436 			DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
6437 			break;
6438 		}
6439 		pf_commit_altq(t[0]);
6440 #endif
6441 
6442 		pf_clear_all_states();
6443 
6444 		pf_kill_srcnodes(NULL);
6445 
6446 		/* status does not use malloced mem so no need to cleanup */
6447 		/* fingerprints and interfaces have their own cleanup code */
6448 	} while(0);
6449 
6450 error:
6451 	return (error);
6452 }
6453 
6454 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6455 pf_check_return(int chk, struct mbuf **m)
6456 {
6457 
6458 	switch (chk) {
6459 	case PF_PASS:
6460 		if (*m == NULL)
6461 			return (PFIL_CONSUMED);
6462 		else
6463 			return (PFIL_PASS);
6464 		break;
6465 	default:
6466 		if (*m != NULL) {
6467 			m_freem(*m);
6468 			*m = NULL;
6469 		}
6470 		return (PFIL_DROPPED);
6471 	}
6472 }
6473 
6474 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6475 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6476     void *ruleset __unused, struct inpcb *inp)
6477 {
6478 	int chk;
6479 
6480 	CURVNET_ASSERT_SET();
6481 
6482 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6483 
6484 	return (pf_check_return(chk, m));
6485 }
6486 
6487 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6488 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6489     void *ruleset __unused, struct inpcb *inp)
6490 {
6491 	int chk;
6492 
6493 	CURVNET_ASSERT_SET();
6494 
6495 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6496 
6497 	return (pf_check_return(chk, m));
6498 }
6499 
6500 #ifdef INET
6501 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6502 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6503     void *ruleset __unused, struct inpcb *inp)
6504 {
6505 	int chk;
6506 
6507 	CURVNET_ASSERT_SET();
6508 
6509 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6510 
6511 	return (pf_check_return(chk, m));
6512 }
6513 
6514 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6515 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6516     void *ruleset __unused,  struct inpcb *inp)
6517 {
6518 	int chk;
6519 
6520 	CURVNET_ASSERT_SET();
6521 
6522 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6523 
6524 	return (pf_check_return(chk, m));
6525 }
6526 #endif
6527 
6528 #ifdef INET6
6529 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6530 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6531     void *ruleset __unused,  struct inpcb *inp)
6532 {
6533 	int chk;
6534 
6535 	CURVNET_ASSERT_SET();
6536 
6537 	/*
6538 	 * In case of loopback traffic IPv6 uses the real interface in
6539 	 * order to support scoped addresses. In order to support stateful
6540 	 * filtering we have change this to lo0 as it is the case in IPv4.
6541 	 */
6542 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6543 	    m, inp, NULL);
6544 
6545 	return (pf_check_return(chk, m));
6546 }
6547 
6548 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6549 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6550     void *ruleset __unused,  struct inpcb *inp)
6551 {
6552 	int chk;
6553 
6554 	CURVNET_ASSERT_SET();
6555 
6556 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6557 
6558 	return (pf_check_return(chk, m));
6559 }
6560 #endif /* INET6 */
6561 
6562 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6563 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6564 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6565 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6566 
6567 #ifdef INET
6568 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6569 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6570 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6571 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6572 #endif
6573 #ifdef INET6
6574 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6575 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6576 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6577 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6578 #endif
6579 
6580 static void
hook_pf_eth(void)6581 hook_pf_eth(void)
6582 {
6583 	struct pfil_hook_args pha = {
6584 		.pa_version = PFIL_VERSION,
6585 		.pa_modname = "pf",
6586 		.pa_type = PFIL_TYPE_ETHERNET,
6587 	};
6588 	struct pfil_link_args pla = {
6589 		.pa_version = PFIL_VERSION,
6590 	};
6591 	int ret __diagused;
6592 
6593 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6594 		return;
6595 
6596 	pha.pa_mbuf_chk = pf_eth_check_in;
6597 	pha.pa_flags = PFIL_IN;
6598 	pha.pa_rulname = "eth-in";
6599 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6600 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6601 	pla.pa_head = V_link_pfil_head;
6602 	pla.pa_hook = V_pf_eth_in_hook;
6603 	ret = pfil_link(&pla);
6604 	MPASS(ret == 0);
6605 	pha.pa_mbuf_chk = pf_eth_check_out;
6606 	pha.pa_flags = PFIL_OUT;
6607 	pha.pa_rulname = "eth-out";
6608 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6609 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6610 	pla.pa_head = V_link_pfil_head;
6611 	pla.pa_hook = V_pf_eth_out_hook;
6612 	ret = pfil_link(&pla);
6613 	MPASS(ret == 0);
6614 
6615 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6616 }
6617 
6618 static void
hook_pf(void)6619 hook_pf(void)
6620 {
6621 	struct pfil_hook_args pha = {
6622 		.pa_version = PFIL_VERSION,
6623 		.pa_modname = "pf",
6624 	};
6625 	struct pfil_link_args pla = {
6626 		.pa_version = PFIL_VERSION,
6627 	};
6628 	int ret __diagused;
6629 
6630 	if (atomic_load_bool(&V_pf_pfil_hooked))
6631 		return;
6632 
6633 #ifdef INET
6634 	pha.pa_type = PFIL_TYPE_IP4;
6635 	pha.pa_mbuf_chk = pf_check_in;
6636 	pha.pa_flags = PFIL_IN;
6637 	pha.pa_rulname = "default-in";
6638 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6639 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6640 	pla.pa_head = V_inet_pfil_head;
6641 	pla.pa_hook = V_pf_ip4_in_hook;
6642 	ret = pfil_link(&pla);
6643 	MPASS(ret == 0);
6644 	pha.pa_mbuf_chk = pf_check_out;
6645 	pha.pa_flags = PFIL_OUT;
6646 	pha.pa_rulname = "default-out";
6647 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6648 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6649 	pla.pa_head = V_inet_pfil_head;
6650 	pla.pa_hook = V_pf_ip4_out_hook;
6651 	ret = pfil_link(&pla);
6652 	MPASS(ret == 0);
6653 	if (V_pf_filter_local) {
6654 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6655 		pla.pa_head = V_inet_local_pfil_head;
6656 		pla.pa_hook = V_pf_ip4_out_hook;
6657 		ret = pfil_link(&pla);
6658 		MPASS(ret == 0);
6659 	}
6660 #endif
6661 #ifdef INET6
6662 	pha.pa_type = PFIL_TYPE_IP6;
6663 	pha.pa_mbuf_chk = pf_check6_in;
6664 	pha.pa_flags = PFIL_IN;
6665 	pha.pa_rulname = "default-in6";
6666 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6667 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6668 	pla.pa_head = V_inet6_pfil_head;
6669 	pla.pa_hook = V_pf_ip6_in_hook;
6670 	ret = pfil_link(&pla);
6671 	MPASS(ret == 0);
6672 	pha.pa_mbuf_chk = pf_check6_out;
6673 	pha.pa_rulname = "default-out6";
6674 	pha.pa_flags = PFIL_OUT;
6675 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6676 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6677 	pla.pa_head = V_inet6_pfil_head;
6678 	pla.pa_hook = V_pf_ip6_out_hook;
6679 	ret = pfil_link(&pla);
6680 	MPASS(ret == 0);
6681 	if (V_pf_filter_local) {
6682 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6683 		pla.pa_head = V_inet6_local_pfil_head;
6684 		pla.pa_hook = V_pf_ip6_out_hook;
6685 		ret = pfil_link(&pla);
6686 		MPASS(ret == 0);
6687 	}
6688 #endif
6689 
6690 	atomic_store_bool(&V_pf_pfil_hooked, true);
6691 }
6692 
6693 static void
dehook_pf_eth(void)6694 dehook_pf_eth(void)
6695 {
6696 
6697 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6698 		return;
6699 
6700 	pfil_remove_hook(V_pf_eth_in_hook);
6701 	pfil_remove_hook(V_pf_eth_out_hook);
6702 
6703 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6704 }
6705 
6706 static void
dehook_pf(void)6707 dehook_pf(void)
6708 {
6709 
6710 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6711 		return;
6712 
6713 #ifdef INET
6714 	pfil_remove_hook(V_pf_ip4_in_hook);
6715 	pfil_remove_hook(V_pf_ip4_out_hook);
6716 #endif
6717 #ifdef INET6
6718 	pfil_remove_hook(V_pf_ip6_in_hook);
6719 	pfil_remove_hook(V_pf_ip6_out_hook);
6720 #endif
6721 
6722 	atomic_store_bool(&V_pf_pfil_hooked, false);
6723 }
6724 
6725 static void
pf_load_vnet(void)6726 pf_load_vnet(void)
6727 {
6728 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6729 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6730 
6731 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6732 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6733 
6734 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6735 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6736 #ifdef ALTQ
6737 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6738 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6739 #endif
6740 
6741 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6742 
6743 	pfattach_vnet();
6744 	V_pf_vnet_active = 1;
6745 }
6746 
6747 static int
pf_load(void)6748 pf_load(void)
6749 {
6750 	int error;
6751 
6752 	sx_init(&pf_end_lock, "pf end thread");
6753 
6754 	pf_mtag_initialize();
6755 
6756 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6757 	if (pf_dev == NULL)
6758 		return (ENOMEM);
6759 
6760 	pf_end_threads = 0;
6761 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6762 	if (error != 0)
6763 		return (error);
6764 
6765 	pfi_initialize();
6766 
6767 	return (0);
6768 }
6769 
6770 static void
pf_unload_vnet(void)6771 pf_unload_vnet(void)
6772 {
6773 	int ret __diagused;
6774 
6775 	V_pf_vnet_active = 0;
6776 	V_pf_status.running = 0;
6777 	dehook_pf();
6778 	dehook_pf_eth();
6779 
6780 	PF_RULES_WLOCK();
6781 	pf_syncookies_cleanup();
6782 	shutdown_pf();
6783 	PF_RULES_WUNLOCK();
6784 
6785 	ret = swi_remove(V_pf_swi_cookie);
6786 	MPASS(ret == 0);
6787 	ret = intr_event_destroy(V_pf_swi_ie);
6788 	MPASS(ret == 0);
6789 
6790 	pf_unload_vnet_purge();
6791 
6792 	pf_normalize_cleanup();
6793 	PF_RULES_WLOCK();
6794 	pfi_cleanup_vnet();
6795 	PF_RULES_WUNLOCK();
6796 	pfr_cleanup();
6797 	pf_osfp_flush();
6798 	pf_cleanup();
6799 	if (IS_DEFAULT_VNET(curvnet))
6800 		pf_mtag_cleanup();
6801 
6802 	pf_cleanup_tagset(&V_pf_tags);
6803 #ifdef ALTQ
6804 	pf_cleanup_tagset(&V_pf_qids);
6805 #endif
6806 	uma_zdestroy(V_pf_tag_z);
6807 
6808 #ifdef PF_WANT_32_TO_64_COUNTER
6809 	PF_RULES_WLOCK();
6810 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6811 
6812 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6813 	MPASS(V_pf_allkifcount == 0);
6814 
6815 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6816 	V_pf_allrulecount--;
6817 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6818 
6819 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6820 	MPASS(V_pf_allrulecount == 0);
6821 
6822 	PF_RULES_WUNLOCK();
6823 
6824 	free(V_pf_kifmarker, PFI_MTYPE);
6825 	free(V_pf_rulemarker, M_PFRULE);
6826 #endif
6827 
6828 	/* Free counters last as we updated them during shutdown. */
6829 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6830 	for (int i = 0; i < 2; i++) {
6831 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6832 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6833 	}
6834 	counter_u64_free(V_pf_default_rule.states_cur);
6835 	counter_u64_free(V_pf_default_rule.states_tot);
6836 	counter_u64_free(V_pf_default_rule.src_nodes);
6837 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6838 
6839 	for (int i = 0; i < PFRES_MAX; i++)
6840 		counter_u64_free(V_pf_status.counters[i]);
6841 	for (int i = 0; i < KLCNT_MAX; i++)
6842 		counter_u64_free(V_pf_status.lcounters[i]);
6843 	for (int i = 0; i < FCNT_MAX; i++)
6844 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6845 	for (int i = 0; i < SCNT_MAX; i++)
6846 		counter_u64_free(V_pf_status.scounters[i]);
6847 
6848 	rm_destroy(&V_pf_rules_lock);
6849 	sx_destroy(&V_pf_ioctl_lock);
6850 }
6851 
6852 static void
pf_unload(void)6853 pf_unload(void)
6854 {
6855 
6856 	sx_xlock(&pf_end_lock);
6857 	pf_end_threads = 1;
6858 	while (pf_end_threads < 2) {
6859 		wakeup_one(pf_purge_thread);
6860 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6861 	}
6862 	sx_xunlock(&pf_end_lock);
6863 
6864 	pf_nl_unregister();
6865 
6866 	if (pf_dev != NULL)
6867 		destroy_dev(pf_dev);
6868 
6869 	pfi_cleanup();
6870 
6871 	sx_destroy(&pf_end_lock);
6872 }
6873 
6874 static void
vnet_pf_init(void * unused __unused)6875 vnet_pf_init(void *unused __unused)
6876 {
6877 
6878 	pf_load_vnet();
6879 }
6880 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6881     vnet_pf_init, NULL);
6882 
6883 static void
vnet_pf_uninit(const void * unused __unused)6884 vnet_pf_uninit(const void *unused __unused)
6885 {
6886 
6887 	pf_unload_vnet();
6888 }
6889 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6890 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6891     vnet_pf_uninit, NULL);
6892 
6893 static int
pf_modevent(module_t mod,int type,void * data)6894 pf_modevent(module_t mod, int type, void *data)
6895 {
6896 	int error = 0;
6897 
6898 	switch(type) {
6899 	case MOD_LOAD:
6900 		error = pf_load();
6901 		pf_nl_register();
6902 		break;
6903 	case MOD_UNLOAD:
6904 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6905 		 * the vnet_pf_uninit()s */
6906 		break;
6907 	default:
6908 		error = EINVAL;
6909 		break;
6910 	}
6911 
6912 	return (error);
6913 }
6914 
6915 static moduledata_t pf_mod = {
6916 	"pf",
6917 	pf_modevent,
6918 	0
6919 };
6920 
6921 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6922 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6923 MODULE_VERSION(pf, PF_MODVER);
6924