xref: /freebsd/sys/netpfil/pf/pf_ioctl.c (revision ff11f1c8c76c053b442f1f1df97272939fbf5afc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static uint16_t		 pf_qname2qid(const char *);
120 static void		 pf_qid_unref(uint16_t);
121 #endif /* ALTQ */
122 static int		 pf_begin_rules(u_int32_t *, int, const char *);
123 static int		 pf_rollback_rules(u_int32_t, int, char *);
124 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
125 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
126 static void		 pf_hash_rule(struct pf_krule *);
127 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
128 static int		 pf_commit_rules(u_int32_t, int, char *);
129 static int		 pf_addr_setup(struct pf_kruleset *,
130 			    struct pf_addr_wrap *, sa_family_t);
131 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
132 			    struct pf_src_node *);
133 #ifdef ALTQ
134 static int		 pf_export_kaltq(struct pf_altq *,
135 			    struct pfioc_altq_v1 *, size_t);
136 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
137 			    struct pf_altq *, size_t);
138 #endif /* ALTQ */
139 
140 VNET_DEFINE(struct pf_krule,	pf_default_rule);
141 
142 static __inline int             pf_krule_compare(struct pf_krule *,
143 				    struct pf_krule *);
144 
145 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
146 
147 #ifdef ALTQ
148 VNET_DEFINE_STATIC(int,		pf_altq_running);
149 #define	V_pf_altq_running	VNET(pf_altq_running)
150 #endif
151 
152 #define	TAGID_MAX	 50000
153 struct pf_tagname {
154 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
155 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
156 	char			name[PF_TAG_NAME_SIZE];
157 	uint16_t		tag;
158 	int			ref;
159 };
160 
161 struct pf_tagset {
162 	TAILQ_HEAD(, pf_tagname)	*namehash;
163 	TAILQ_HEAD(, pf_tagname)	*taghash;
164 	unsigned int			 mask;
165 	uint32_t			 seed;
166 	BITSET_DEFINE(, TAGID_MAX)	 avail;
167 };
168 
169 VNET_DEFINE(struct pf_tagset, pf_tags);
170 #define	V_pf_tags	VNET(pf_tags)
171 static unsigned int	pf_rule_tag_hashsize;
172 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
173 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
174     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
175     "Size of pf(4) rule tag hashtable");
176 
177 #ifdef ALTQ
178 VNET_DEFINE(struct pf_tagset, pf_qids);
179 #define	V_pf_qids	VNET(pf_qids)
180 static unsigned int	pf_queue_tag_hashsize;
181 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
182 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
183     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
184     "Size of pf(4) queue tag hashtable");
185 #endif
186 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
187 #define	V_pf_tag_z		 VNET(pf_tag_z)
188 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
189 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
190 
191 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
192 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
193 #endif
194 
195 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
196 #define V_pf_filter_local	VNET(pf_filter_local)
197 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
198     &VNET_NAME(pf_filter_local), false,
199     "Enable filtering for packets delivered to local network stack");
200 
201 #ifdef PF_DEFAULT_TO_DROP
202 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
203 #else
204 VNET_DEFINE_STATIC(bool, default_to_drop);
205 #endif
206 #define	V_default_to_drop VNET(default_to_drop)
207 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
208     &VNET_NAME(default_to_drop), false,
209     "Make the default rule drop all packets.");
210 
211 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
212 			    unsigned int);
213 static void		 pf_cleanup_tagset(struct pf_tagset *);
214 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
215 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
216 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *);
217 static u_int16_t	 pf_tagname2tag(const char *);
218 static void		 tag_unref(struct pf_tagset *, u_int16_t);
219 
220 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
221 
222 struct cdev *pf_dev;
223 
224 /*
225  * XXX - These are new and need to be checked when moveing to a new version
226  */
227 static void		 pf_clear_all_states(void);
228 static int		 pf_killstates_row(struct pf_kstate_kill *,
229 			    struct pf_idhash *);
230 static int		 pf_killstates_nv(struct pfioc_nv *);
231 static int		 pf_clearstates_nv(struct pfioc_nv *);
232 static int		 pf_getstate(struct pfioc_nv *);
233 static int		 pf_getstatus(struct pfioc_nv *);
234 static int		 pf_clear_tables(void);
235 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
236 static int		 pf_keepcounters(struct pfioc_nv *);
237 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
238 
239 /*
240  * Wrapper functions for pfil(9) hooks
241  */
242 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
243     int flags, void *ruleset __unused, struct inpcb *inp);
244 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
245     int flags, void *ruleset __unused, struct inpcb *inp);
246 #ifdef INET
247 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
248     int flags, void *ruleset __unused, struct inpcb *inp);
249 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
250     int flags, void *ruleset __unused, struct inpcb *inp);
251 #endif
252 #ifdef INET6
253 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
256     int flags, void *ruleset __unused, struct inpcb *inp);
257 #endif
258 
259 static void		hook_pf_eth(void);
260 static void		hook_pf(void);
261 static void		dehook_pf_eth(void);
262 static void		dehook_pf(void);
263 static int		shutdown_pf(void);
264 static int		pf_load(void);
265 static void		pf_unload(void);
266 
267 static struct cdevsw pf_cdevsw = {
268 	.d_ioctl =	pfioctl,
269 	.d_name =	PF_NAME,
270 	.d_version =	D_VERSION,
271 };
272 
273 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
274 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
275 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
276 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
277 
278 /*
279  * We need a flag that is neither hooked nor running to know when
280  * the VNET is "valid".  We primarily need this to control (global)
281  * external event, e.g., eventhandlers.
282  */
283 VNET_DEFINE(int, pf_vnet_active);
284 #define V_pf_vnet_active	VNET(pf_vnet_active)
285 
286 int pf_end_threads;
287 struct proc *pf_purge_proc;
288 
289 VNET_DEFINE(struct rmlock, pf_rules_lock);
290 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
291 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
292 struct sx			pf_end_lock;
293 
294 /* pfsync */
295 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
296 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
297 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
298 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
299 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
300 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
301 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
302 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
303 
304 /* pflog */
305 pflog_packet_t			*pflog_packet_ptr = NULL;
306 
307 /*
308  * Copy a user-provided string, returning an error if truncation would occur.
309  * Avoid scanning past "sz" bytes in the source string since there's no
310  * guarantee that it's nul-terminated.
311  */
312 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)313 pf_user_strcpy(char *dst, const char *src, size_t sz)
314 {
315 	if (strnlen(src, sz) == sz)
316 		return (EINVAL);
317 	(void)strlcpy(dst, src, sz);
318 	return (0);
319 }
320 
321 static void
pfattach_vnet(void)322 pfattach_vnet(void)
323 {
324 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
325 
326 	bzero(&V_pf_status, sizeof(V_pf_status));
327 
328 	pf_initialize();
329 	pfr_initialize();
330 	pfi_initialize_vnet();
331 	pf_normalize_init();
332 	pf_syncookies_init();
333 
334 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
335 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
336 
337 	RB_INIT(&V_pf_anchors);
338 	pf_init_kruleset(&pf_main_ruleset);
339 
340 	pf_init_keth(V_pf_keth);
341 
342 	/* default rule should never be garbage collected */
343 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
344 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
345 	V_pf_default_rule.nr = (uint32_t)-1;
346 	V_pf_default_rule.rtableid = -1;
347 
348 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
349 	for (int i = 0; i < 2; i++) {
350 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
351 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
352 	}
353 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
354 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
355 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
356 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
357 
358 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
359 	    M_WAITOK | M_ZERO);
360 
361 #ifdef PF_WANT_32_TO_64_COUNTER
362 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
363 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
364 	PF_RULES_WLOCK();
365 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
366 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
367 	V_pf_allrulecount++;
368 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
369 	PF_RULES_WUNLOCK();
370 #endif
371 
372 	/* initialize default timeouts */
373 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
374 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
375 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
376 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
377 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
378 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
379 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
380 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
381 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
382 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
383 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
384 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
385 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
386 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
387 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
388 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
389 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
390 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
391 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
392 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
393 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
394 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
395 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
396 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
397 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
398 
399 	V_pf_status.debug = PF_DEBUG_URGENT;
400 	/*
401 	 * XXX This is different than in OpenBSD where reassembly is enabled by
402 	 * defult. In FreeBSD we expect people to still use scrub rules and
403 	 * switch to the new syntax later. Only when they switch they must
404 	 * explicitly enable reassemle. We could change the default once the
405 	 * scrub rule functionality is hopefully removed some day in future.
406 	 */
407 	V_pf_status.reass = 0;
408 
409 	V_pf_pfil_hooked = false;
410 	V_pf_pfil_eth_hooked = false;
411 
412 	/* XXX do our best to avoid a conflict */
413 	V_pf_status.hostid = arc4random();
414 
415 	for (int i = 0; i < PFRES_MAX; i++)
416 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
417 	for (int i = 0; i < KLCNT_MAX; i++)
418 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
419 	for (int i = 0; i < FCNT_MAX; i++)
420 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
421 	for (int i = 0; i < SCNT_MAX; i++)
422 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
423 
424 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
425 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
426 		/* XXXGL: leaked all above. */
427 		return;
428 }
429 
430 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)431 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
432     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
433     u_int8_t check_ticket, int which)
434 {
435 	struct pf_kruleset	*ruleset;
436 	struct pf_krule		*rule;
437 	int			 rs_num;
438 
439 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
440 
441 	ruleset = pf_find_kruleset(anchor);
442 	if (ruleset == NULL)
443 		return (NULL);
444 	rs_num = pf_get_ruleset_number(rule_action);
445 	if (rs_num >= PF_RULESET_MAX)
446 		return (NULL);
447 	if (active) {
448 		if (check_ticket && ticket !=
449 		    ruleset->rules[rs_num].active.ticket)
450 			return (NULL);
451 		if (r_last)
452 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
453 			    pf_krulequeue);
454 		else
455 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
456 	} else {
457 		if (check_ticket && ticket !=
458 		    ruleset->rules[rs_num].inactive.ticket)
459 			return (NULL);
460 		if (r_last)
461 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
462 			    pf_krulequeue);
463 		else
464 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
465 	}
466 	if (!r_last) {
467 		while ((rule != NULL) && (rule->nr != rule_number))
468 			rule = TAILQ_NEXT(rule, entries);
469 	}
470 	if (rule == NULL)
471 		return (NULL);
472 
473 	switch (which) {
474 	case PF_RDR:
475 		return (&rule->rdr);
476 	case PF_NAT:
477 		return (&rule->nat);
478 	case PF_RT:
479 		return (&rule->route);
480 	default:
481 		panic("Unknow pool type %d", which);
482 	}
483 }
484 
485 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)486 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
487 {
488 	struct pf_kpooladdr	*mv_pool_pa;
489 
490 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
491 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
492 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
493 	}
494 }
495 
496 static void
pf_empty_kpool(struct pf_kpalist * poola)497 pf_empty_kpool(struct pf_kpalist *poola)
498 {
499 	struct pf_kpooladdr *pa;
500 
501 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
502 		switch (pa->addr.type) {
503 		case PF_ADDR_DYNIFTL:
504 			pfi_dynaddr_remove(pa->addr.p.dyn);
505 			break;
506 		case PF_ADDR_TABLE:
507 			/* XXX: this could be unfinished pooladdr on pabuf */
508 			if (pa->addr.p.tbl != NULL)
509 				pfr_detach_table(pa->addr.p.tbl);
510 			break;
511 		}
512 		if (pa->kif)
513 			pfi_kkif_unref(pa->kif);
514 		TAILQ_REMOVE(poola, pa, entries);
515 		free(pa, M_PFRULE);
516 	}
517 }
518 
519 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)520 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
521 {
522 
523 	PF_RULES_WASSERT();
524 	PF_UNLNKDRULES_ASSERT();
525 
526 	TAILQ_REMOVE(rulequeue, rule, entries);
527 
528 	rule->rule_ref |= PFRULE_REFS;
529 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
530 }
531 
532 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)533 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
534 {
535 
536 	PF_RULES_WASSERT();
537 
538 	PF_UNLNKDRULES_LOCK();
539 	pf_unlink_rule_locked(rulequeue, rule);
540 	PF_UNLNKDRULES_UNLOCK();
541 }
542 
543 static void
pf_free_eth_rule(struct pf_keth_rule * rule)544 pf_free_eth_rule(struct pf_keth_rule *rule)
545 {
546 	PF_RULES_WASSERT();
547 
548 	if (rule == NULL)
549 		return;
550 
551 	if (rule->tag)
552 		tag_unref(&V_pf_tags, rule->tag);
553 	if (rule->match_tag)
554 		tag_unref(&V_pf_tags, rule->match_tag);
555 #ifdef ALTQ
556 	pf_qid_unref(rule->qid);
557 #endif
558 
559 	if (rule->bridge_to)
560 		pfi_kkif_unref(rule->bridge_to);
561 	if (rule->kif)
562 		pfi_kkif_unref(rule->kif);
563 
564 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
565 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
566 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
567 		pfr_detach_table(rule->ipdst.addr.p.tbl);
568 
569 	counter_u64_free(rule->evaluations);
570 	for (int i = 0; i < 2; i++) {
571 		counter_u64_free(rule->packets[i]);
572 		counter_u64_free(rule->bytes[i]);
573 	}
574 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
575 	pf_keth_anchor_remove(rule);
576 
577 	free(rule, M_PFRULE);
578 }
579 
580 void
pf_free_rule(struct pf_krule * rule)581 pf_free_rule(struct pf_krule *rule)
582 {
583 
584 	PF_RULES_WASSERT();
585 	PF_CONFIG_ASSERT();
586 
587 	if (rule->tag)
588 		tag_unref(&V_pf_tags, rule->tag);
589 	if (rule->match_tag)
590 		tag_unref(&V_pf_tags, rule->match_tag);
591 #ifdef ALTQ
592 	if (rule->pqid != rule->qid)
593 		pf_qid_unref(rule->pqid);
594 	pf_qid_unref(rule->qid);
595 #endif
596 	switch (rule->src.addr.type) {
597 	case PF_ADDR_DYNIFTL:
598 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
599 		break;
600 	case PF_ADDR_TABLE:
601 		pfr_detach_table(rule->src.addr.p.tbl);
602 		break;
603 	}
604 	switch (rule->dst.addr.type) {
605 	case PF_ADDR_DYNIFTL:
606 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
607 		break;
608 	case PF_ADDR_TABLE:
609 		pfr_detach_table(rule->dst.addr.p.tbl);
610 		break;
611 	}
612 	if (rule->overload_tbl)
613 		pfr_detach_table(rule->overload_tbl);
614 	if (rule->kif)
615 		pfi_kkif_unref(rule->kif);
616 	if (rule->rcv_kif)
617 		pfi_kkif_unref(rule->rcv_kif);
618 	pf_kanchor_remove(rule);
619 	pf_empty_kpool(&rule->rdr.list);
620 	pf_empty_kpool(&rule->nat.list);
621 	pf_empty_kpool(&rule->route.list);
622 
623 	pf_krule_free(rule);
624 }
625 
626 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)627 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
628     unsigned int default_size)
629 {
630 	unsigned int i;
631 	unsigned int hashsize;
632 
633 	if (*tunable_size == 0 || !powerof2(*tunable_size))
634 		*tunable_size = default_size;
635 
636 	hashsize = *tunable_size;
637 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
638 	    M_WAITOK);
639 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
640 	    M_WAITOK);
641 	ts->mask = hashsize - 1;
642 	ts->seed = arc4random();
643 	for (i = 0; i < hashsize; i++) {
644 		TAILQ_INIT(&ts->namehash[i]);
645 		TAILQ_INIT(&ts->taghash[i]);
646 	}
647 	BIT_FILL(TAGID_MAX, &ts->avail);
648 }
649 
650 static void
pf_cleanup_tagset(struct pf_tagset * ts)651 pf_cleanup_tagset(struct pf_tagset *ts)
652 {
653 	unsigned int i;
654 	unsigned int hashsize;
655 	struct pf_tagname *t, *tmp;
656 
657 	/*
658 	 * Only need to clean up one of the hashes as each tag is hashed
659 	 * into each table.
660 	 */
661 	hashsize = ts->mask + 1;
662 	for (i = 0; i < hashsize; i++)
663 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
664 			uma_zfree(V_pf_tag_z, t);
665 
666 	free(ts->namehash, M_PFHASH);
667 	free(ts->taghash, M_PFHASH);
668 }
669 
670 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)671 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
672 {
673 	size_t len;
674 
675 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
676 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
677 }
678 
679 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)680 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
681 {
682 
683 	return (tag & ts->mask);
684 }
685 
686 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname)687 tagname2tag(struct pf_tagset *ts, const char *tagname)
688 {
689 	struct pf_tagname	*tag;
690 	u_int32_t		 index;
691 	u_int16_t		 new_tagid;
692 
693 	PF_RULES_WASSERT();
694 
695 	index = tagname2hashindex(ts, tagname);
696 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
697 		if (strcmp(tagname, tag->name) == 0) {
698 			tag->ref++;
699 			return (tag->tag);
700 		}
701 
702 	/*
703 	 * new entry
704 	 *
705 	 * to avoid fragmentation, we do a linear search from the beginning
706 	 * and take the first free slot we find.
707 	 */
708 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
709 	/*
710 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
711 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
712 	 * set.  It may also return a bit number greater than TAGID_MAX due
713 	 * to rounding of the number of bits in the vector up to a multiple
714 	 * of the vector word size at declaration/allocation time.
715 	 */
716 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX))
717 		return (0);
718 
719 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
720 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
721 
722 	/* allocate and fill new struct pf_tagname */
723 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
724 	if (tag == NULL)
725 		return (0);
726 	strlcpy(tag->name, tagname, sizeof(tag->name));
727 	tag->tag = new_tagid;
728 	tag->ref = 1;
729 
730 	/* Insert into namehash */
731 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
732 
733 	/* Insert into taghash */
734 	index = tag2hashindex(ts, new_tagid);
735 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
736 
737 	return (tag->tag);
738 }
739 
740 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)741 tag_unref(struct pf_tagset *ts, u_int16_t tag)
742 {
743 	struct pf_tagname	*t;
744 	uint16_t		 index;
745 
746 	PF_RULES_WASSERT();
747 
748 	index = tag2hashindex(ts, tag);
749 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
750 		if (tag == t->tag) {
751 			if (--t->ref == 0) {
752 				TAILQ_REMOVE(&ts->taghash[index], t,
753 				    taghash_entries);
754 				index = tagname2hashindex(ts, t->name);
755 				TAILQ_REMOVE(&ts->namehash[index], t,
756 				    namehash_entries);
757 				/* Bits are 0-based for BIT_SET() */
758 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
759 				uma_zfree(V_pf_tag_z, t);
760 			}
761 			break;
762 		}
763 }
764 
765 static uint16_t
pf_tagname2tag(const char * tagname)766 pf_tagname2tag(const char *tagname)
767 {
768 	return (tagname2tag(&V_pf_tags, tagname));
769 }
770 
771 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)772 pf_begin_eth(uint32_t *ticket, const char *anchor)
773 {
774 	struct pf_keth_rule *rule, *tmp;
775 	struct pf_keth_ruleset *rs;
776 
777 	PF_RULES_WASSERT();
778 
779 	rs = pf_find_or_create_keth_ruleset(anchor);
780 	if (rs == NULL)
781 		return (EINVAL);
782 
783 	/* Purge old inactive rules. */
784 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
785 	    tmp) {
786 		TAILQ_REMOVE(rs->inactive.rules, rule,
787 		    entries);
788 		pf_free_eth_rule(rule);
789 	}
790 
791 	*ticket = ++rs->inactive.ticket;
792 	rs->inactive.open = 1;
793 
794 	return (0);
795 }
796 
797 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)798 pf_rollback_eth(uint32_t ticket, const char *anchor)
799 {
800 	struct pf_keth_rule *rule, *tmp;
801 	struct pf_keth_ruleset *rs;
802 
803 	PF_RULES_WASSERT();
804 
805 	rs = pf_find_keth_ruleset(anchor);
806 	if (rs == NULL)
807 		return (EINVAL);
808 
809 	if (!rs->inactive.open ||
810 	    ticket != rs->inactive.ticket)
811 		return (0);
812 
813 	/* Purge old inactive rules. */
814 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
815 	    tmp) {
816 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
817 		pf_free_eth_rule(rule);
818 	}
819 
820 	rs->inactive.open = 0;
821 
822 	pf_remove_if_empty_keth_ruleset(rs);
823 
824 	return (0);
825 }
826 
827 #define	PF_SET_SKIP_STEPS(i)					\
828 	do {							\
829 		while (head[i] != cur) {			\
830 			head[i]->skip[i].ptr = cur;		\
831 			head[i] = TAILQ_NEXT(head[i], entries);	\
832 		}						\
833 	} while (0)
834 
835 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)836 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
837 {
838 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
839 	int i;
840 
841 	cur = TAILQ_FIRST(rules);
842 	prev = cur;
843 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
844 		head[i] = cur;
845 	while (cur != NULL) {
846 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
847 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
848 		if (cur->direction != prev->direction)
849 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
850 		if (cur->proto != prev->proto)
851 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
852 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
853 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
854 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
855 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
856 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
857 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
858 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
859 		if (cur->ipdst.neg != prev->ipdst.neg ||
860 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
861 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
862 
863 		prev = cur;
864 		cur = TAILQ_NEXT(cur, entries);
865 	}
866 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
867 		PF_SET_SKIP_STEPS(i);
868 }
869 
870 static int
pf_commit_eth(uint32_t ticket,const char * anchor)871 pf_commit_eth(uint32_t ticket, const char *anchor)
872 {
873 	struct pf_keth_ruleq *rules;
874 	struct pf_keth_ruleset *rs;
875 
876 	rs = pf_find_keth_ruleset(anchor);
877 	if (rs == NULL) {
878 		return (EINVAL);
879 	}
880 
881 	if (!rs->inactive.open ||
882 	    ticket != rs->inactive.ticket)
883 		return (EBUSY);
884 
885 	PF_RULES_WASSERT();
886 
887 	pf_eth_calc_skip_steps(rs->inactive.rules);
888 
889 	rules = rs->active.rules;
890 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
891 	rs->inactive.rules = rules;
892 	rs->inactive.ticket = rs->active.ticket;
893 
894 	return (pf_rollback_eth(rs->inactive.ticket,
895 	    rs->anchor ? rs->anchor->path : ""));
896 }
897 
898 #ifdef ALTQ
899 static uint16_t
pf_qname2qid(const char * qname)900 pf_qname2qid(const char *qname)
901 {
902 	return (tagname2tag(&V_pf_qids, qname));
903 }
904 
905 static void
pf_qid_unref(uint16_t qid)906 pf_qid_unref(uint16_t qid)
907 {
908 	tag_unref(&V_pf_qids, qid);
909 }
910 
911 static int
pf_begin_altq(u_int32_t * ticket)912 pf_begin_altq(u_int32_t *ticket)
913 {
914 	struct pf_altq	*altq, *tmp;
915 	int		 error = 0;
916 
917 	PF_RULES_WASSERT();
918 
919 	/* Purge the old altq lists */
920 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
921 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
922 			/* detach and destroy the discipline */
923 			error = altq_remove(altq);
924 		}
925 		free(altq, M_PFALTQ);
926 	}
927 	TAILQ_INIT(V_pf_altq_ifs_inactive);
928 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
929 		pf_qid_unref(altq->qid);
930 		free(altq, M_PFALTQ);
931 	}
932 	TAILQ_INIT(V_pf_altqs_inactive);
933 	if (error)
934 		return (error);
935 	*ticket = ++V_ticket_altqs_inactive;
936 	V_altqs_inactive_open = 1;
937 	return (0);
938 }
939 
940 static int
pf_rollback_altq(u_int32_t ticket)941 pf_rollback_altq(u_int32_t ticket)
942 {
943 	struct pf_altq	*altq, *tmp;
944 	int		 error = 0;
945 
946 	PF_RULES_WASSERT();
947 
948 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
949 		return (0);
950 	/* Purge the old altq lists */
951 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
952 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
953 			/* detach and destroy the discipline */
954 			error = altq_remove(altq);
955 		}
956 		free(altq, M_PFALTQ);
957 	}
958 	TAILQ_INIT(V_pf_altq_ifs_inactive);
959 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
960 		pf_qid_unref(altq->qid);
961 		free(altq, M_PFALTQ);
962 	}
963 	TAILQ_INIT(V_pf_altqs_inactive);
964 	V_altqs_inactive_open = 0;
965 	return (error);
966 }
967 
968 static int
pf_commit_altq(u_int32_t ticket)969 pf_commit_altq(u_int32_t ticket)
970 {
971 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
972 	struct pf_altq		*altq, *tmp;
973 	int			 err, error = 0;
974 
975 	PF_RULES_WASSERT();
976 
977 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
978 		return (EBUSY);
979 
980 	/* swap altqs, keep the old. */
981 	old_altqs = V_pf_altqs_active;
982 	old_altq_ifs = V_pf_altq_ifs_active;
983 	V_pf_altqs_active = V_pf_altqs_inactive;
984 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
985 	V_pf_altqs_inactive = old_altqs;
986 	V_pf_altq_ifs_inactive = old_altq_ifs;
987 	V_ticket_altqs_active = V_ticket_altqs_inactive;
988 
989 	/* Attach new disciplines */
990 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
991 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
992 			/* attach the discipline */
993 			error = altq_pfattach(altq);
994 			if (error == 0 && V_pf_altq_running)
995 				error = pf_enable_altq(altq);
996 			if (error != 0)
997 				return (error);
998 		}
999 	}
1000 
1001 	/* Purge the old altq lists */
1002 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1003 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1004 			/* detach and destroy the discipline */
1005 			if (V_pf_altq_running)
1006 				error = pf_disable_altq(altq);
1007 			err = altq_pfdetach(altq);
1008 			if (err != 0 && error == 0)
1009 				error = err;
1010 			err = altq_remove(altq);
1011 			if (err != 0 && error == 0)
1012 				error = err;
1013 		}
1014 		free(altq, M_PFALTQ);
1015 	}
1016 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1017 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1018 		pf_qid_unref(altq->qid);
1019 		free(altq, M_PFALTQ);
1020 	}
1021 	TAILQ_INIT(V_pf_altqs_inactive);
1022 
1023 	V_altqs_inactive_open = 0;
1024 	return (error);
1025 }
1026 
1027 static int
pf_enable_altq(struct pf_altq * altq)1028 pf_enable_altq(struct pf_altq *altq)
1029 {
1030 	struct ifnet		*ifp;
1031 	struct tb_profile	 tb;
1032 	int			 error = 0;
1033 
1034 	if ((ifp = ifunit(altq->ifname)) == NULL)
1035 		return (EINVAL);
1036 
1037 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1038 		error = altq_enable(&ifp->if_snd);
1039 
1040 	/* set tokenbucket regulator */
1041 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1042 		tb.rate = altq->ifbandwidth;
1043 		tb.depth = altq->tbrsize;
1044 		error = tbr_set(&ifp->if_snd, &tb);
1045 	}
1046 
1047 	return (error);
1048 }
1049 
1050 static int
pf_disable_altq(struct pf_altq * altq)1051 pf_disable_altq(struct pf_altq *altq)
1052 {
1053 	struct ifnet		*ifp;
1054 	struct tb_profile	 tb;
1055 	int			 error;
1056 
1057 	if ((ifp = ifunit(altq->ifname)) == NULL)
1058 		return (EINVAL);
1059 
1060 	/*
1061 	 * when the discipline is no longer referenced, it was overridden
1062 	 * by a new one.  if so, just return.
1063 	 */
1064 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1065 		return (0);
1066 
1067 	error = altq_disable(&ifp->if_snd);
1068 
1069 	if (error == 0) {
1070 		/* clear tokenbucket regulator */
1071 		tb.rate = 0;
1072 		error = tbr_set(&ifp->if_snd, &tb);
1073 	}
1074 
1075 	return (error);
1076 }
1077 
1078 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1079 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1080     struct pf_altq *altq)
1081 {
1082 	struct ifnet	*ifp1;
1083 	int		 error = 0;
1084 
1085 	/* Deactivate the interface in question */
1086 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1087 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1088 	    (remove && ifp1 == ifp)) {
1089 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1090 	} else {
1091 		error = altq_add(ifp1, altq);
1092 
1093 		if (ticket != V_ticket_altqs_inactive)
1094 			error = EBUSY;
1095 
1096 		if (error)
1097 			free(altq, M_PFALTQ);
1098 	}
1099 
1100 	return (error);
1101 }
1102 
1103 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1104 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1105 {
1106 	struct pf_altq	*a1, *a2, *a3;
1107 	u_int32_t	 ticket;
1108 	int		 error = 0;
1109 
1110 	/*
1111 	 * No need to re-evaluate the configuration for events on interfaces
1112 	 * that do not support ALTQ, as it's not possible for such
1113 	 * interfaces to be part of the configuration.
1114 	 */
1115 	if (!ALTQ_IS_READY(&ifp->if_snd))
1116 		return;
1117 
1118 	/* Interrupt userland queue modifications */
1119 	if (V_altqs_inactive_open)
1120 		pf_rollback_altq(V_ticket_altqs_inactive);
1121 
1122 	/* Start new altq ruleset */
1123 	if (pf_begin_altq(&ticket))
1124 		return;
1125 
1126 	/* Copy the current active set */
1127 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1128 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1129 		if (a2 == NULL) {
1130 			error = ENOMEM;
1131 			break;
1132 		}
1133 		bcopy(a1, a2, sizeof(struct pf_altq));
1134 
1135 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1136 		if (error)
1137 			break;
1138 
1139 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1140 	}
1141 	if (error)
1142 		goto out;
1143 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1144 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1145 		if (a2 == NULL) {
1146 			error = ENOMEM;
1147 			break;
1148 		}
1149 		bcopy(a1, a2, sizeof(struct pf_altq));
1150 
1151 		if ((a2->qid = pf_qname2qid(a2->qname)) == 0) {
1152 			error = EBUSY;
1153 			free(a2, M_PFALTQ);
1154 			break;
1155 		}
1156 		a2->altq_disc = NULL;
1157 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1158 			if (strncmp(a3->ifname, a2->ifname,
1159 				IFNAMSIZ) == 0) {
1160 				a2->altq_disc = a3->altq_disc;
1161 				break;
1162 			}
1163 		}
1164 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1165 		if (error)
1166 			break;
1167 
1168 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1169 	}
1170 
1171 out:
1172 	if (error != 0)
1173 		pf_rollback_altq(ticket);
1174 	else
1175 		pf_commit_altq(ticket);
1176 }
1177 #endif /* ALTQ */
1178 
1179 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1180 pf_rule_tree_alloc(int flags)
1181 {
1182 	struct pf_krule_global *tree;
1183 
1184 	tree = malloc(sizeof(struct pf_krule_global), M_TEMP, flags);
1185 	if (tree == NULL)
1186 		return (NULL);
1187 	RB_INIT(tree);
1188 	return (tree);
1189 }
1190 
1191 static void
pf_rule_tree_free(struct pf_krule_global * tree)1192 pf_rule_tree_free(struct pf_krule_global *tree)
1193 {
1194 
1195 	free(tree, M_TEMP);
1196 }
1197 
1198 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1199 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1200 {
1201 	struct pf_krule_global *tree;
1202 	struct pf_kruleset	*rs;
1203 	struct pf_krule		*rule;
1204 
1205 	PF_RULES_WASSERT();
1206 
1207 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1208 		return (EINVAL);
1209 	tree = pf_rule_tree_alloc(M_NOWAIT);
1210 	if (tree == NULL)
1211 		return (ENOMEM);
1212 	rs = pf_find_or_create_kruleset(anchor);
1213 	if (rs == NULL) {
1214 		free(tree, M_TEMP);
1215 		return (EINVAL);
1216 	}
1217 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1218 	rs->rules[rs_num].inactive.tree = tree;
1219 
1220 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1221 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1222 		rs->rules[rs_num].inactive.rcount--;
1223 	}
1224 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1225 	rs->rules[rs_num].inactive.open = 1;
1226 	return (0);
1227 }
1228 
1229 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1230 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1231 {
1232 	struct pf_kruleset	*rs;
1233 	struct pf_krule		*rule;
1234 
1235 	PF_RULES_WASSERT();
1236 
1237 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1238 		return (EINVAL);
1239 	rs = pf_find_kruleset(anchor);
1240 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1241 	    rs->rules[rs_num].inactive.ticket != ticket)
1242 		return (0);
1243 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1244 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1245 		rs->rules[rs_num].inactive.rcount--;
1246 	}
1247 	rs->rules[rs_num].inactive.open = 0;
1248 	return (0);
1249 }
1250 
1251 #define PF_MD5_UPD(st, elm)						\
1252 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1253 
1254 #define PF_MD5_UPD_STR(st, elm)						\
1255 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1256 
1257 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1258 		(stor) = htonl((st)->elm);				\
1259 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1260 } while (0)
1261 
1262 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1263 		(stor) = htons((st)->elm);				\
1264 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1265 } while (0)
1266 
1267 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1268 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1269 {
1270 	PF_MD5_UPD(pfr, addr.type);
1271 	switch (pfr->addr.type) {
1272 		case PF_ADDR_DYNIFTL:
1273 			PF_MD5_UPD(pfr, addr.v.ifname);
1274 			PF_MD5_UPD(pfr, addr.iflags);
1275 			break;
1276 		case PF_ADDR_TABLE:
1277 			PF_MD5_UPD(pfr, addr.v.tblname);
1278 			break;
1279 		case PF_ADDR_ADDRMASK:
1280 			/* XXX ignore af? */
1281 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1282 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1283 			break;
1284 	}
1285 
1286 	PF_MD5_UPD(pfr, port[0]);
1287 	PF_MD5_UPD(pfr, port[1]);
1288 	PF_MD5_UPD(pfr, neg);
1289 	PF_MD5_UPD(pfr, port_op);
1290 }
1291 
1292 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1293 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1294 {
1295 	u_int16_t x;
1296 	u_int32_t y;
1297 
1298 	pf_hash_rule_addr(ctx, &rule->src);
1299 	pf_hash_rule_addr(ctx, &rule->dst);
1300 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1301 		PF_MD5_UPD_STR(rule, label[i]);
1302 	PF_MD5_UPD_STR(rule, ifname);
1303 	PF_MD5_UPD_STR(rule, rcv_ifname);
1304 	PF_MD5_UPD_STR(rule, match_tagname);
1305 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1306 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1307 	PF_MD5_UPD_HTONL(rule, prob, y);
1308 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1309 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1310 	PF_MD5_UPD(rule, uid.op);
1311 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1312 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1313 	PF_MD5_UPD(rule, gid.op);
1314 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1315 	PF_MD5_UPD(rule, action);
1316 	PF_MD5_UPD(rule, direction);
1317 	PF_MD5_UPD(rule, af);
1318 	PF_MD5_UPD(rule, quick);
1319 	PF_MD5_UPD(rule, ifnot);
1320 	PF_MD5_UPD(rule, rcvifnot);
1321 	PF_MD5_UPD(rule, match_tag_not);
1322 	PF_MD5_UPD(rule, natpass);
1323 	PF_MD5_UPD(rule, keep_state);
1324 	PF_MD5_UPD(rule, proto);
1325 	PF_MD5_UPD(rule, type);
1326 	PF_MD5_UPD(rule, code);
1327 	PF_MD5_UPD(rule, flags);
1328 	PF_MD5_UPD(rule, flagset);
1329 	PF_MD5_UPD(rule, allow_opts);
1330 	PF_MD5_UPD(rule, rt);
1331 	PF_MD5_UPD(rule, tos);
1332 	PF_MD5_UPD(rule, scrub_flags);
1333 	PF_MD5_UPD(rule, min_ttl);
1334 	PF_MD5_UPD(rule, set_tos);
1335 	if (rule->anchor != NULL)
1336 		PF_MD5_UPD_STR(rule, anchor->path);
1337 }
1338 
1339 static void
pf_hash_rule(struct pf_krule * rule)1340 pf_hash_rule(struct pf_krule *rule)
1341 {
1342 	MD5_CTX		ctx;
1343 
1344 	MD5Init(&ctx);
1345 	pf_hash_rule_rolling(&ctx, rule);
1346 	MD5Final(rule->md5sum, &ctx);
1347 }
1348 
1349 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1350 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1351 {
1352 
1353 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1354 }
1355 
1356 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1357 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1358 {
1359 	struct pf_kruleset	*rs;
1360 	struct pf_krule		*rule, **old_array, *old_rule;
1361 	struct pf_krulequeue	*old_rules;
1362 	struct pf_krule_global  *old_tree;
1363 	int			 error;
1364 	u_int32_t		 old_rcount;
1365 
1366 	PF_RULES_WASSERT();
1367 
1368 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1369 		return (EINVAL);
1370 	rs = pf_find_kruleset(anchor);
1371 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1372 	    ticket != rs->rules[rs_num].inactive.ticket)
1373 		return (EBUSY);
1374 
1375 	/* Calculate checksum for the main ruleset */
1376 	if (rs == &pf_main_ruleset) {
1377 		error = pf_setup_pfsync_matching(rs);
1378 		if (error != 0)
1379 			return (error);
1380 	}
1381 
1382 	/* Swap rules, keep the old. */
1383 	old_rules = rs->rules[rs_num].active.ptr;
1384 	old_rcount = rs->rules[rs_num].active.rcount;
1385 	old_array = rs->rules[rs_num].active.ptr_array;
1386 	old_tree = rs->rules[rs_num].active.tree;
1387 
1388 	rs->rules[rs_num].active.ptr =
1389 	    rs->rules[rs_num].inactive.ptr;
1390 	rs->rules[rs_num].active.ptr_array =
1391 	    rs->rules[rs_num].inactive.ptr_array;
1392 	rs->rules[rs_num].active.tree =
1393 	    rs->rules[rs_num].inactive.tree;
1394 	rs->rules[rs_num].active.rcount =
1395 	    rs->rules[rs_num].inactive.rcount;
1396 
1397 	/* Attempt to preserve counter information. */
1398 	if (V_pf_status.keep_counters && old_tree != NULL) {
1399 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1400 		    entries) {
1401 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1402 			if (old_rule == NULL) {
1403 				continue;
1404 			}
1405 			pf_counter_u64_critical_enter();
1406 			pf_counter_u64_rollup_protected(&rule->evaluations,
1407 			    pf_counter_u64_fetch(&old_rule->evaluations));
1408 			pf_counter_u64_rollup_protected(&rule->packets[0],
1409 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1410 			pf_counter_u64_rollup_protected(&rule->packets[1],
1411 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1412 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1413 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1414 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1415 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1416 			pf_counter_u64_critical_exit();
1417 		}
1418 	}
1419 
1420 	rs->rules[rs_num].inactive.ptr = old_rules;
1421 	rs->rules[rs_num].inactive.ptr_array = old_array;
1422 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1423 	rs->rules[rs_num].inactive.rcount = old_rcount;
1424 
1425 	rs->rules[rs_num].active.ticket =
1426 	    rs->rules[rs_num].inactive.ticket;
1427 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1428 
1429 	/* Purge the old rule list. */
1430 	PF_UNLNKDRULES_LOCK();
1431 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1432 		pf_unlink_rule_locked(old_rules, rule);
1433 	PF_UNLNKDRULES_UNLOCK();
1434 	if (rs->rules[rs_num].inactive.ptr_array)
1435 		free(rs->rules[rs_num].inactive.ptr_array, M_TEMP);
1436 	rs->rules[rs_num].inactive.ptr_array = NULL;
1437 	rs->rules[rs_num].inactive.rcount = 0;
1438 	rs->rules[rs_num].inactive.open = 0;
1439 	pf_remove_if_empty_kruleset(rs);
1440 	free(old_tree, M_TEMP);
1441 
1442 	return (0);
1443 }
1444 
1445 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1446 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1447 {
1448 	MD5_CTX			 ctx;
1449 	struct pf_krule		*rule;
1450 	int			 rs_cnt;
1451 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1452 
1453 	MD5Init(&ctx);
1454 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1455 		/* XXX PF_RULESET_SCRUB as well? */
1456 		if (rs_cnt == PF_RULESET_SCRUB)
1457 			continue;
1458 
1459 		if (rs->rules[rs_cnt].inactive.ptr_array)
1460 			free(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP);
1461 		rs->rules[rs_cnt].inactive.ptr_array = NULL;
1462 
1463 		if (rs->rules[rs_cnt].inactive.rcount) {
1464 			rs->rules[rs_cnt].inactive.ptr_array =
1465 			    mallocarray(rs->rules[rs_cnt].inactive.rcount,
1466 			    sizeof(struct pf_rule **),
1467 			    M_TEMP, M_NOWAIT);
1468 
1469 			if (!rs->rules[rs_cnt].inactive.ptr_array)
1470 				return (ENOMEM);
1471 		}
1472 
1473 		TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1474 		    entries) {
1475 			pf_hash_rule_rolling(&ctx, rule);
1476 			(rs->rules[rs_cnt].inactive.ptr_array)[rule->nr] = rule;
1477 		}
1478 	}
1479 
1480 	MD5Final(digest, &ctx);
1481 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1482 	return (0);
1483 }
1484 
1485 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1486 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1487 {
1488 	int error = 0;
1489 
1490 	switch (addr->type) {
1491 	case PF_ADDR_TABLE:
1492 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1493 		if (addr->p.tbl == NULL)
1494 			error = ENOMEM;
1495 		break;
1496 	default:
1497 		error = EINVAL;
1498 	}
1499 
1500 	return (error);
1501 }
1502 
1503 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1504 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1505     sa_family_t af)
1506 {
1507 	int error = 0;
1508 
1509 	switch (addr->type) {
1510 	case PF_ADDR_TABLE:
1511 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1512 		if (addr->p.tbl == NULL)
1513 			error = ENOMEM;
1514 		break;
1515 	case PF_ADDR_DYNIFTL:
1516 		error = pfi_dynaddr_setup(addr, af);
1517 		break;
1518 	}
1519 
1520 	return (error);
1521 }
1522 
1523 void
pf_addr_copyout(struct pf_addr_wrap * addr)1524 pf_addr_copyout(struct pf_addr_wrap *addr)
1525 {
1526 
1527 	switch (addr->type) {
1528 	case PF_ADDR_DYNIFTL:
1529 		pfi_dynaddr_copyout(addr);
1530 		break;
1531 	case PF_ADDR_TABLE:
1532 		pf_tbladdr_copyout(addr);
1533 		break;
1534 	}
1535 }
1536 
1537 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)1538 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
1539 {
1540 	int	secs = time_uptime;
1541 
1542 	bzero(out, sizeof(struct pf_src_node));
1543 
1544 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
1545 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
1546 
1547 	if (in->rule != NULL)
1548 		out->rule.nr = in->rule->nr;
1549 
1550 	for (int i = 0; i < 2; i++) {
1551 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
1552 		out->packets[i] = counter_u64_fetch(in->packets[i]);
1553 	}
1554 
1555 	out->states = in->states;
1556 	out->conn = in->conn;
1557 	out->af = in->af;
1558 	out->ruletype = in->ruletype;
1559 
1560 	out->creation = secs - in->creation;
1561 	if (out->expire > secs)
1562 		out->expire -= secs;
1563 	else
1564 		out->expire = 0;
1565 
1566 	/* Adjust the connection rate estimate. */
1567 	out->conn_rate.limit = in->conn_rate.limit;
1568 	out->conn_rate.seconds = in->conn_rate.seconds;
1569 	/* If there's no limit there's no counter_rate. */
1570 	if (in->conn_rate.cr != NULL)
1571 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
1572 }
1573 
1574 #ifdef ALTQ
1575 /*
1576  * Handle export of struct pf_kaltq to user binaries that may be using any
1577  * version of struct pf_altq.
1578  */
1579 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)1580 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
1581 {
1582 	u_int32_t version;
1583 
1584 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1585 		version = 0;
1586 	else
1587 		version = pa->version;
1588 
1589 	if (version > PFIOC_ALTQ_VERSION)
1590 		return (EINVAL);
1591 
1592 #define ASSIGN(x) exported_q->x = q->x
1593 #define COPY(x) \
1594 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
1595 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
1596 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
1597 
1598 	switch (version) {
1599 	case 0: {
1600 		struct pf_altq_v0 *exported_q =
1601 		    &((struct pfioc_altq_v0 *)pa)->altq;
1602 
1603 		COPY(ifname);
1604 
1605 		ASSIGN(scheduler);
1606 		ASSIGN(tbrsize);
1607 		exported_q->tbrsize = SATU16(q->tbrsize);
1608 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
1609 
1610 		COPY(qname);
1611 		COPY(parent);
1612 		ASSIGN(parent_qid);
1613 		exported_q->bandwidth = SATU32(q->bandwidth);
1614 		ASSIGN(priority);
1615 		ASSIGN(local_flags);
1616 
1617 		ASSIGN(qlimit);
1618 		ASSIGN(flags);
1619 
1620 		if (q->scheduler == ALTQT_HFSC) {
1621 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
1622 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
1623 			    SATU32(q->pq_u.hfsc_opts.x)
1624 
1625 			ASSIGN_OPT_SATU32(rtsc_m1);
1626 			ASSIGN_OPT(rtsc_d);
1627 			ASSIGN_OPT_SATU32(rtsc_m2);
1628 
1629 			ASSIGN_OPT_SATU32(lssc_m1);
1630 			ASSIGN_OPT(lssc_d);
1631 			ASSIGN_OPT_SATU32(lssc_m2);
1632 
1633 			ASSIGN_OPT_SATU32(ulsc_m1);
1634 			ASSIGN_OPT(ulsc_d);
1635 			ASSIGN_OPT_SATU32(ulsc_m2);
1636 
1637 			ASSIGN_OPT(flags);
1638 
1639 #undef ASSIGN_OPT
1640 #undef ASSIGN_OPT_SATU32
1641 		} else
1642 			COPY(pq_u);
1643 
1644 		ASSIGN(qid);
1645 		break;
1646 	}
1647 	case 1:	{
1648 		struct pf_altq_v1 *exported_q =
1649 		    &((struct pfioc_altq_v1 *)pa)->altq;
1650 
1651 		COPY(ifname);
1652 
1653 		ASSIGN(scheduler);
1654 		ASSIGN(tbrsize);
1655 		ASSIGN(ifbandwidth);
1656 
1657 		COPY(qname);
1658 		COPY(parent);
1659 		ASSIGN(parent_qid);
1660 		ASSIGN(bandwidth);
1661 		ASSIGN(priority);
1662 		ASSIGN(local_flags);
1663 
1664 		ASSIGN(qlimit);
1665 		ASSIGN(flags);
1666 		COPY(pq_u);
1667 
1668 		ASSIGN(qid);
1669 		break;
1670 	}
1671 	default:
1672 		panic("%s: unhandled struct pfioc_altq version", __func__);
1673 		break;
1674 	}
1675 
1676 #undef ASSIGN
1677 #undef COPY
1678 #undef SATU16
1679 #undef SATU32
1680 
1681 	return (0);
1682 }
1683 
1684 /*
1685  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
1686  * that may be using any version of it.
1687  */
1688 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)1689 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
1690 {
1691 	u_int32_t version;
1692 
1693 	if (ioc_size == sizeof(struct pfioc_altq_v0))
1694 		version = 0;
1695 	else
1696 		version = pa->version;
1697 
1698 	if (version > PFIOC_ALTQ_VERSION)
1699 		return (EINVAL);
1700 
1701 #define ASSIGN(x) q->x = imported_q->x
1702 #define COPY(x) \
1703 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
1704 
1705 	switch (version) {
1706 	case 0: {
1707 		struct pf_altq_v0 *imported_q =
1708 		    &((struct pfioc_altq_v0 *)pa)->altq;
1709 
1710 		COPY(ifname);
1711 
1712 		ASSIGN(scheduler);
1713 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
1714 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
1715 
1716 		COPY(qname);
1717 		COPY(parent);
1718 		ASSIGN(parent_qid);
1719 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
1720 		ASSIGN(priority);
1721 		ASSIGN(local_flags);
1722 
1723 		ASSIGN(qlimit);
1724 		ASSIGN(flags);
1725 
1726 		if (imported_q->scheduler == ALTQT_HFSC) {
1727 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
1728 
1729 			/*
1730 			 * The m1 and m2 parameters are being copied from
1731 			 * 32-bit to 64-bit.
1732 			 */
1733 			ASSIGN_OPT(rtsc_m1);
1734 			ASSIGN_OPT(rtsc_d);
1735 			ASSIGN_OPT(rtsc_m2);
1736 
1737 			ASSIGN_OPT(lssc_m1);
1738 			ASSIGN_OPT(lssc_d);
1739 			ASSIGN_OPT(lssc_m2);
1740 
1741 			ASSIGN_OPT(ulsc_m1);
1742 			ASSIGN_OPT(ulsc_d);
1743 			ASSIGN_OPT(ulsc_m2);
1744 
1745 			ASSIGN_OPT(flags);
1746 
1747 #undef ASSIGN_OPT
1748 		} else
1749 			COPY(pq_u);
1750 
1751 		ASSIGN(qid);
1752 		break;
1753 	}
1754 	case 1: {
1755 		struct pf_altq_v1 *imported_q =
1756 		    &((struct pfioc_altq_v1 *)pa)->altq;
1757 
1758 		COPY(ifname);
1759 
1760 		ASSIGN(scheduler);
1761 		ASSIGN(tbrsize);
1762 		ASSIGN(ifbandwidth);
1763 
1764 		COPY(qname);
1765 		COPY(parent);
1766 		ASSIGN(parent_qid);
1767 		ASSIGN(bandwidth);
1768 		ASSIGN(priority);
1769 		ASSIGN(local_flags);
1770 
1771 		ASSIGN(qlimit);
1772 		ASSIGN(flags);
1773 		COPY(pq_u);
1774 
1775 		ASSIGN(qid);
1776 		break;
1777 	}
1778 	default:
1779 		panic("%s: unhandled struct pfioc_altq version", __func__);
1780 		break;
1781 	}
1782 
1783 #undef ASSIGN
1784 #undef COPY
1785 
1786 	return (0);
1787 }
1788 
1789 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)1790 pf_altq_get_nth_active(u_int32_t n)
1791 {
1792 	struct pf_altq		*altq;
1793 	u_int32_t		 nr;
1794 
1795 	nr = 0;
1796 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1797 		if (nr == n)
1798 			return (altq);
1799 		nr++;
1800 	}
1801 
1802 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
1803 		if (nr == n)
1804 			return (altq);
1805 		nr++;
1806 	}
1807 
1808 	return (NULL);
1809 }
1810 #endif /* ALTQ */
1811 
1812 struct pf_krule *
pf_krule_alloc(void)1813 pf_krule_alloc(void)
1814 {
1815 	struct pf_krule *rule;
1816 
1817 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
1818 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
1819 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
1820 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
1821 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
1822 	    M_WAITOK | M_ZERO);
1823 	return (rule);
1824 }
1825 
1826 void
pf_krule_free(struct pf_krule * rule)1827 pf_krule_free(struct pf_krule *rule)
1828 {
1829 #ifdef PF_WANT_32_TO_64_COUNTER
1830 	bool wowned;
1831 #endif
1832 
1833 	if (rule == NULL)
1834 		return;
1835 
1836 #ifdef PF_WANT_32_TO_64_COUNTER
1837 	if (rule->allrulelinked) {
1838 		wowned = PF_RULES_WOWNED();
1839 		if (!wowned)
1840 			PF_RULES_WLOCK();
1841 		LIST_REMOVE(rule, allrulelist);
1842 		V_pf_allrulecount--;
1843 		if (!wowned)
1844 			PF_RULES_WUNLOCK();
1845 	}
1846 #endif
1847 
1848 	pf_counter_u64_deinit(&rule->evaluations);
1849 	for (int i = 0; i < 2; i++) {
1850 		pf_counter_u64_deinit(&rule->packets[i]);
1851 		pf_counter_u64_deinit(&rule->bytes[i]);
1852 	}
1853 	counter_u64_free(rule->states_cur);
1854 	counter_u64_free(rule->states_tot);
1855 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
1856 		counter_u64_free(rule->src_nodes[sn_type]);
1857 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
1858 
1859 	mtx_destroy(&rule->nat.mtx);
1860 	mtx_destroy(&rule->rdr.mtx);
1861 	mtx_destroy(&rule->route.mtx);
1862 	free(rule, M_PFRULE);
1863 }
1864 
1865 void
pf_krule_clear_counters(struct pf_krule * rule)1866 pf_krule_clear_counters(struct pf_krule *rule)
1867 {
1868 	pf_counter_u64_zero(&rule->evaluations);
1869 	for (int i = 0; i < 2; i++) {
1870 		pf_counter_u64_zero(&rule->packets[i]);
1871 		pf_counter_u64_zero(&rule->bytes[i]);
1872 	}
1873 	counter_u64_zero(rule->states_tot);
1874 }
1875 
1876 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)1877 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
1878     struct pf_pooladdr *pool)
1879 {
1880 
1881 	bzero(pool, sizeof(*pool));
1882 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
1883 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
1884 }
1885 
1886 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)1887 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
1888     struct pf_kpooladdr *kpool)
1889 {
1890 	int ret;
1891 
1892 	bzero(kpool, sizeof(*kpool));
1893 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
1894 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
1895 	    sizeof(kpool->ifname));
1896 	return (ret);
1897 }
1898 
1899 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)1900 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
1901 {
1902 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
1903 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
1904 
1905 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
1906 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
1907 
1908 	kpool->tblidx = pool->tblidx;
1909 	kpool->proxy_port[0] = pool->proxy_port[0];
1910 	kpool->proxy_port[1] = pool->proxy_port[1];
1911 	kpool->opts = pool->opts;
1912 }
1913 
1914 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)1915 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
1916 {
1917 	int ret;
1918 
1919 #ifndef INET
1920 	if (rule->af == AF_INET) {
1921 		return (EAFNOSUPPORT);
1922 	}
1923 #endif /* INET */
1924 #ifndef INET6
1925 	if (rule->af == AF_INET6) {
1926 		return (EAFNOSUPPORT);
1927 	}
1928 #endif /* INET6 */
1929 
1930 	ret = pf_check_rule_addr(&rule->src);
1931 	if (ret != 0)
1932 		return (ret);
1933 	ret = pf_check_rule_addr(&rule->dst);
1934 	if (ret != 0)
1935 		return (ret);
1936 
1937 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
1938 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
1939 
1940 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
1941 	if (ret != 0)
1942 		return (ret);
1943 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
1944 	if (ret != 0)
1945 		return (ret);
1946 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
1947 	if (ret != 0)
1948 		return (ret);
1949 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
1950 	if (ret != 0)
1951 		return (ret);
1952 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
1953 	    sizeof(rule->tagname));
1954 	if (ret != 0)
1955 		return (ret);
1956 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
1957 	    sizeof(rule->match_tagname));
1958 	if (ret != 0)
1959 		return (ret);
1960 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
1961 	    sizeof(rule->overload_tblname));
1962 	if (ret != 0)
1963 		return (ret);
1964 
1965 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
1966 
1967 	/* Don't allow userspace to set evaluations, packets or bytes. */
1968 	/* kif, anchor, overload_tbl are not copied over. */
1969 
1970 	krule->os_fingerprint = rule->os_fingerprint;
1971 
1972 	krule->rtableid = rule->rtableid;
1973 	/* pf_rule->timeout is smaller than pf_krule->timeout */
1974 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
1975 	krule->max_states = rule->max_states;
1976 	krule->max_src_nodes = rule->max_src_nodes;
1977 	krule->max_src_states = rule->max_src_states;
1978 	krule->max_src_conn = rule->max_src_conn;
1979 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
1980 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
1981 	krule->qid = rule->qid;
1982 	krule->pqid = rule->pqid;
1983 	krule->nr = rule->nr;
1984 	krule->prob = rule->prob;
1985 	krule->cuid = rule->cuid;
1986 	krule->cpid = rule->cpid;
1987 
1988 	krule->return_icmp = rule->return_icmp;
1989 	krule->return_icmp6 = rule->return_icmp6;
1990 	krule->max_mss = rule->max_mss;
1991 	krule->tag = rule->tag;
1992 	krule->match_tag = rule->match_tag;
1993 	krule->scrub_flags = rule->scrub_flags;
1994 
1995 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
1996 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
1997 
1998 	krule->rule_flag = rule->rule_flag;
1999 	krule->action = rule->action;
2000 	krule->direction = rule->direction;
2001 	krule->log = rule->log;
2002 	krule->logif = rule->logif;
2003 	krule->quick = rule->quick;
2004 	krule->ifnot = rule->ifnot;
2005 	krule->match_tag_not = rule->match_tag_not;
2006 	krule->natpass = rule->natpass;
2007 
2008 	krule->keep_state = rule->keep_state;
2009 	krule->af = rule->af;
2010 	krule->proto = rule->proto;
2011 	krule->type = rule->type;
2012 	krule->code = rule->code;
2013 	krule->flags = rule->flags;
2014 	krule->flagset = rule->flagset;
2015 	krule->min_ttl = rule->min_ttl;
2016 	krule->allow_opts = rule->allow_opts;
2017 	krule->rt = rule->rt;
2018 	krule->return_ttl = rule->return_ttl;
2019 	krule->tos = rule->tos;
2020 	krule->set_tos = rule->set_tos;
2021 
2022 	krule->flush = rule->flush;
2023 	krule->prio = rule->prio;
2024 	krule->set_prio[0] = rule->set_prio[0];
2025 	krule->set_prio[1] = rule->set_prio[1];
2026 
2027 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2028 
2029 	return (0);
2030 }
2031 
2032 int
pf_ioctl_getrules(struct pfioc_rule * pr)2033 pf_ioctl_getrules(struct pfioc_rule *pr)
2034 {
2035 	struct pf_kruleset	*ruleset;
2036 	struct pf_krule		*tail;
2037 	int			 rs_num;
2038 
2039 	PF_RULES_WLOCK();
2040 	ruleset = pf_find_kruleset(pr->anchor);
2041 	if (ruleset == NULL) {
2042 		PF_RULES_WUNLOCK();
2043 		return (EINVAL);
2044 	}
2045 	rs_num = pf_get_ruleset_number(pr->rule.action);
2046 	if (rs_num >= PF_RULESET_MAX) {
2047 		PF_RULES_WUNLOCK();
2048 		return (EINVAL);
2049 	}
2050 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2051 	    pf_krulequeue);
2052 	if (tail)
2053 		pr->nr = tail->nr + 1;
2054 	else
2055 		pr->nr = 0;
2056 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2057 	PF_RULES_WUNLOCK();
2058 
2059 	return (0);
2060 }
2061 
2062 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)2063 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
2064     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
2065     uid_t uid, pid_t pid)
2066 {
2067 	struct pf_kruleset	*ruleset;
2068 	struct pf_krule		*tail;
2069 	struct pf_kpooladdr	*pa;
2070 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
2071 	int			 rs_num;
2072 	int			 error = 0;
2073 
2074 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE) {
2075 		error = EINVAL;
2076 		goto errout_unlocked;
2077 	}
2078 
2079 #define	ERROUT(x)	ERROUT_FUNCTION(errout, x)
2080 
2081 	if (rule->ifname[0])
2082 		kif = pf_kkif_create(M_WAITOK);
2083 	if (rule->rcv_ifname[0])
2084 		rcv_kif = pf_kkif_create(M_WAITOK);
2085 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
2086 	for (int i = 0; i < 2; i++) {
2087 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
2088 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
2089 	}
2090 	rule->states_cur = counter_u64_alloc(M_WAITOK);
2091 	rule->states_tot = counter_u64_alloc(M_WAITOK);
2092 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2093 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
2094 	rule->cuid = uid;
2095 	rule->cpid = pid;
2096 	TAILQ_INIT(&rule->rdr.list);
2097 	TAILQ_INIT(&rule->nat.list);
2098 	TAILQ_INIT(&rule->route.list);
2099 
2100 	PF_CONFIG_LOCK();
2101 	PF_RULES_WLOCK();
2102 #ifdef PF_WANT_32_TO_64_COUNTER
2103 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
2104 	MPASS(!rule->allrulelinked);
2105 	rule->allrulelinked = true;
2106 	V_pf_allrulecount++;
2107 #endif
2108 	ruleset = pf_find_kruleset(anchor);
2109 	if (ruleset == NULL)
2110 		ERROUT(EINVAL);
2111 	rs_num = pf_get_ruleset_number(rule->action);
2112 	if (rs_num >= PF_RULESET_MAX)
2113 		ERROUT(EINVAL);
2114 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
2115 		DPFPRINTF(PF_DEBUG_MISC,
2116 		    ("ticket: %d != [%d]%d\n", ticket, rs_num,
2117 		    ruleset->rules[rs_num].inactive.ticket));
2118 		ERROUT(EBUSY);
2119 	}
2120 	if (pool_ticket != V_ticket_pabuf) {
2121 		DPFPRINTF(PF_DEBUG_MISC,
2122 		    ("pool_ticket: %d != %d\n", pool_ticket,
2123 		    V_ticket_pabuf));
2124 		ERROUT(EBUSY);
2125 	}
2126 	/*
2127 	 * XXXMJG hack: there is no mechanism to ensure they started the
2128 	 * transaction. Ticket checked above may happen to match by accident,
2129 	 * even if nobody called DIOCXBEGIN, let alone this process.
2130 	 * Partially work around it by checking if the RB tree got allocated,
2131 	 * see pf_begin_rules.
2132 	 */
2133 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
2134 		ERROUT(EINVAL);
2135 	}
2136 
2137 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
2138 	    pf_krulequeue);
2139 	if (tail)
2140 		rule->nr = tail->nr + 1;
2141 	else
2142 		rule->nr = 0;
2143 	if (rule->ifname[0]) {
2144 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
2145 		kif = NULL;
2146 		pfi_kkif_ref(rule->kif);
2147 	} else
2148 		rule->kif = NULL;
2149 
2150 	if (rule->rcv_ifname[0]) {
2151 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
2152 		rcv_kif = NULL;
2153 		pfi_kkif_ref(rule->rcv_kif);
2154 	} else
2155 		rule->rcv_kif = NULL;
2156 
2157 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
2158 		error = EBUSY;
2159 #ifdef ALTQ
2160 	/* set queue IDs */
2161 	if (rule->qname[0] != 0) {
2162 		if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
2163 			error = EBUSY;
2164 		else if (rule->pqname[0] != 0) {
2165 			if ((rule->pqid =
2166 			    pf_qname2qid(rule->pqname)) == 0)
2167 				error = EBUSY;
2168 		} else
2169 			rule->pqid = rule->qid;
2170 	}
2171 #endif
2172 	if (rule->tagname[0])
2173 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
2174 			error = EBUSY;
2175 	if (rule->match_tagname[0])
2176 		if ((rule->match_tag =
2177 		    pf_tagname2tag(rule->match_tagname)) == 0)
2178 			error = EBUSY;
2179 	if (rule->rt && !rule->direction)
2180 		error = EINVAL;
2181 	if (!rule->log)
2182 		rule->logif = 0;
2183 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
2184 	   rule->pktrate.seconds))
2185 		error = ENOMEM;
2186 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
2187 		error = ENOMEM;
2188 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
2189 		error = ENOMEM;
2190 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
2191 		error = EINVAL;
2192 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
2193 	    (rule->set_prio[0] > PF_PRIO_MAX ||
2194 	    rule->set_prio[1] > PF_PRIO_MAX))
2195 		error = EINVAL;
2196 	for (int i = 0; i < 3; i++) {
2197 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
2198 			if (pa->addr.type == PF_ADDR_TABLE) {
2199 				pa->addr.p.tbl = pfr_attach_table(ruleset,
2200 				    pa->addr.v.tblname);
2201 				if (pa->addr.p.tbl == NULL)
2202 					error = ENOMEM;
2203 			}
2204 	}
2205 
2206 	rule->overload_tbl = NULL;
2207 	if (rule->overload_tblname[0]) {
2208 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
2209 		    rule->overload_tblname)) == NULL)
2210 			error = EINVAL;
2211 		else
2212 			rule->overload_tbl->pfrkt_flags |=
2213 			    PFR_TFLAG_ACTIVE;
2214 	}
2215 
2216 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
2217 
2218 	/*
2219 	 * Old version of pfctl provide route redirection pools in single
2220 	 * common redirection pool rdr. New versions use rdr only for
2221 	 * rdr-to rules.
2222 	 */
2223 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
2224 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
2225 	} else {
2226 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
2227 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
2228 	}
2229 
2230 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
2231 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
2232 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
2233 		error = EINVAL;
2234 	}
2235 
2236 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
2237 		error = EINVAL;
2238 	}
2239 
2240 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
2241 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
2242 		error = EINVAL;
2243 	}
2244 
2245 	if (error) {
2246 		pf_free_rule(rule);
2247 		rule = NULL;
2248 		ERROUT(error);
2249 	}
2250 
2251 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
2252 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
2253 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
2254 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
2255 	    rule, entries);
2256 	ruleset->rules[rs_num].inactive.rcount++;
2257 
2258 	PF_RULES_WUNLOCK();
2259 	pf_hash_rule(rule);
2260 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
2261 		PF_RULES_WLOCK();
2262 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
2263 		ruleset->rules[rs_num].inactive.rcount--;
2264 		pf_free_rule(rule);
2265 		rule = NULL;
2266 		ERROUT(EEXIST);
2267 	}
2268 	PF_CONFIG_UNLOCK();
2269 
2270 	return (0);
2271 
2272 #undef ERROUT
2273 errout:
2274 	PF_RULES_WUNLOCK();
2275 	PF_CONFIG_UNLOCK();
2276 errout_unlocked:
2277 	pf_kkif_free(rcv_kif);
2278 	pf_kkif_free(kif);
2279 	pf_krule_free(rule);
2280 	return (error);
2281 }
2282 
2283 static bool
pf_label_match(const struct pf_krule * rule,const char * label)2284 pf_label_match(const struct pf_krule *rule, const char *label)
2285 {
2286 	int i = 0;
2287 
2288 	while (*rule->label[i]) {
2289 		if (strcmp(rule->label[i], label) == 0)
2290 			return (true);
2291 		i++;
2292 	}
2293 
2294 	return (false);
2295 }
2296 
2297 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)2298 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
2299 {
2300 	struct pf_kstate *s;
2301 	int more = 0;
2302 
2303 	s = pf_find_state_all(key, dir, &more);
2304 	if (s == NULL)
2305 		return (0);
2306 
2307 	if (more) {
2308 		PF_STATE_UNLOCK(s);
2309 		return (0);
2310 	}
2311 
2312 	pf_remove_state(s);
2313 	return (1);
2314 }
2315 
2316 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)2317 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
2318 {
2319 	struct pf_kstate	*s;
2320 	struct pf_state_key	*sk;
2321 	struct pf_addr		*srcaddr, *dstaddr;
2322 	struct pf_state_key_cmp	 match_key;
2323 	int			 idx, killed = 0;
2324 	unsigned int		 dir;
2325 	u_int16_t		 srcport, dstport;
2326 	struct pfi_kkif		*kif;
2327 
2328 relock_DIOCKILLSTATES:
2329 	PF_HASHROW_LOCK(ih);
2330 	LIST_FOREACH(s, &ih->states, entry) {
2331 		/* For floating states look at the original kif. */
2332 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
2333 
2334 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
2335 		if (s->direction == PF_OUT) {
2336 			srcaddr = &sk->addr[1];
2337 			dstaddr = &sk->addr[0];
2338 			srcport = sk->port[1];
2339 			dstport = sk->port[0];
2340 		} else {
2341 			srcaddr = &sk->addr[0];
2342 			dstaddr = &sk->addr[1];
2343 			srcport = sk->port[0];
2344 			dstport = sk->port[1];
2345 		}
2346 
2347 		if (psk->psk_af && sk->af != psk->psk_af)
2348 			continue;
2349 
2350 		if (psk->psk_proto && psk->psk_proto != sk->proto)
2351 			continue;
2352 
2353 		if (! PF_MATCHA(psk->psk_src.neg, &psk->psk_src.addr.v.a.addr,
2354 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
2355 			continue;
2356 
2357 		if (! PF_MATCHA(psk->psk_dst.neg, &psk->psk_dst.addr.v.a.addr,
2358 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
2359 			continue;
2360 
2361 		if (!  PF_MATCHA(psk->psk_rt_addr.neg,
2362 		    &psk->psk_rt_addr.addr.v.a.addr,
2363 		    &psk->psk_rt_addr.addr.v.a.mask,
2364 		    &s->act.rt_addr, sk->af))
2365 			continue;
2366 
2367 		if (psk->psk_src.port_op != 0 &&
2368 		    ! pf_match_port(psk->psk_src.port_op,
2369 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
2370 			continue;
2371 
2372 		if (psk->psk_dst.port_op != 0 &&
2373 		    ! pf_match_port(psk->psk_dst.port_op,
2374 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
2375 			continue;
2376 
2377 		if (psk->psk_label[0] &&
2378 		    ! pf_label_match(s->rule, psk->psk_label))
2379 			continue;
2380 
2381 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
2382 		    kif->pfik_name))
2383 			continue;
2384 
2385 		if (psk->psk_kill_match) {
2386 			/* Create the key to find matching states, with lock
2387 			 * held. */
2388 
2389 			bzero(&match_key, sizeof(match_key));
2390 
2391 			if (s->direction == PF_OUT) {
2392 				dir = PF_IN;
2393 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
2394 			} else {
2395 				dir = PF_OUT;
2396 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
2397 			}
2398 
2399 			match_key.af = s->key[idx]->af;
2400 			match_key.proto = s->key[idx]->proto;
2401 			PF_ACPY(&match_key.addr[0],
2402 			    &s->key[idx]->addr[1], match_key.af);
2403 			match_key.port[0] = s->key[idx]->port[1];
2404 			PF_ACPY(&match_key.addr[1],
2405 			    &s->key[idx]->addr[0], match_key.af);
2406 			match_key.port[1] = s->key[idx]->port[0];
2407 		}
2408 
2409 		pf_remove_state(s);
2410 		killed++;
2411 
2412 		if (psk->psk_kill_match)
2413 			killed += pf_kill_matching_state(&match_key, dir);
2414 
2415 		goto relock_DIOCKILLSTATES;
2416 	}
2417 	PF_HASHROW_UNLOCK(ih);
2418 
2419 	return (killed);
2420 }
2421 
2422 void
unhandled_af(int af)2423 unhandled_af(int af)
2424 {
2425 	panic("unhandled af %d", af);
2426 }
2427 
2428 int
pf_start(void)2429 pf_start(void)
2430 {
2431 	int error = 0;
2432 
2433 	sx_xlock(&V_pf_ioctl_lock);
2434 	if (V_pf_status.running)
2435 		error = EEXIST;
2436 	else {
2437 		hook_pf();
2438 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
2439 			hook_pf_eth();
2440 		V_pf_status.running = 1;
2441 		V_pf_status.since = time_uptime;
2442 		new_unrhdr64(&V_pf_stateid, time_second);
2443 
2444 		DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
2445 	}
2446 	sx_xunlock(&V_pf_ioctl_lock);
2447 
2448 	return (error);
2449 }
2450 
2451 int
pf_stop(void)2452 pf_stop(void)
2453 {
2454 	int error = 0;
2455 
2456 	sx_xlock(&V_pf_ioctl_lock);
2457 	if (!V_pf_status.running)
2458 		error = ENOENT;
2459 	else {
2460 		V_pf_status.running = 0;
2461 		dehook_pf();
2462 		dehook_pf_eth();
2463 		V_pf_status.since = time_uptime;
2464 		DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
2465 	}
2466 	sx_xunlock(&V_pf_ioctl_lock);
2467 
2468 	return (error);
2469 }
2470 
2471 void
pf_ioctl_clear_status(void)2472 pf_ioctl_clear_status(void)
2473 {
2474 	PF_RULES_WLOCK();
2475 	for (int i = 0; i < PFRES_MAX; i++)
2476 		counter_u64_zero(V_pf_status.counters[i]);
2477 	for (int i = 0; i < FCNT_MAX; i++)
2478 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
2479 	for (int i = 0; i < SCNT_MAX; i++)
2480 		counter_u64_zero(V_pf_status.scounters[i]);
2481 	for (int i = 0; i < KLCNT_MAX; i++)
2482 		counter_u64_zero(V_pf_status.lcounters[i]);
2483 	V_pf_status.since = time_uptime;
2484 	if (*V_pf_status.ifname)
2485 		pfi_update_status(V_pf_status.ifname, NULL);
2486 	PF_RULES_WUNLOCK();
2487 }
2488 
2489 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)2490 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
2491 {
2492 	uint32_t old;
2493 
2494 	if (timeout < 0 || timeout >= PFTM_MAX ||
2495 	    seconds < 0)
2496 		return (EINVAL);
2497 
2498 	PF_RULES_WLOCK();
2499 	old = V_pf_default_rule.timeout[timeout];
2500 	if (timeout == PFTM_INTERVAL && seconds == 0)
2501 		seconds = 1;
2502 	V_pf_default_rule.timeout[timeout] = seconds;
2503 	if (timeout == PFTM_INTERVAL && seconds < old)
2504 		wakeup(pf_purge_thread);
2505 
2506 	if (prev_seconds != NULL)
2507 		*prev_seconds = old;
2508 
2509 	PF_RULES_WUNLOCK();
2510 
2511 	return (0);
2512 }
2513 
2514 int
pf_ioctl_get_timeout(int timeout,int * seconds)2515 pf_ioctl_get_timeout(int timeout, int *seconds)
2516 {
2517 	PF_RULES_RLOCK_TRACKER;
2518 
2519 	if (timeout < 0 || timeout >= PFTM_MAX)
2520 		return (EINVAL);
2521 
2522 	PF_RULES_RLOCK();
2523 	*seconds = V_pf_default_rule.timeout[timeout];
2524 	PF_RULES_RUNLOCK();
2525 
2526 	return (0);
2527 }
2528 
2529 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)2530 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
2531 {
2532 
2533 	PF_RULES_WLOCK();
2534 	if (index < 0 || index >= PF_LIMIT_MAX ||
2535 	    V_pf_limits[index].zone == NULL) {
2536 		PF_RULES_WUNLOCK();
2537 		return (EINVAL);
2538 	}
2539 	uma_zone_set_max(V_pf_limits[index].zone,
2540 	    limit == 0 ? INT_MAX : limit);
2541 	if (old_limit != NULL)
2542 		*old_limit = V_pf_limits[index].limit;
2543 	V_pf_limits[index].limit = limit;
2544 	PF_RULES_WUNLOCK();
2545 
2546 	return (0);
2547 }
2548 
2549 int
pf_ioctl_get_limit(int index,unsigned int * limit)2550 pf_ioctl_get_limit(int index, unsigned int *limit)
2551 {
2552 	PF_RULES_RLOCK_TRACKER;
2553 
2554 	if (index < 0 || index >= PF_LIMIT_MAX)
2555 		return (EINVAL);
2556 
2557 	PF_RULES_RLOCK();
2558 	*limit = V_pf_limits[index].limit;
2559 	PF_RULES_RUNLOCK();
2560 
2561 	return (0);
2562 }
2563 
2564 int
pf_ioctl_begin_addrs(uint32_t * ticket)2565 pf_ioctl_begin_addrs(uint32_t *ticket)
2566 {
2567 	PF_RULES_WLOCK();
2568 	pf_empty_kpool(&V_pf_pabuf[0]);
2569 	pf_empty_kpool(&V_pf_pabuf[1]);
2570 	pf_empty_kpool(&V_pf_pabuf[2]);
2571 	*ticket = ++V_ticket_pabuf;
2572 	PF_RULES_WUNLOCK();
2573 
2574 	return (0);
2575 }
2576 
2577 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)2578 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
2579 {
2580 	struct pf_kpooladdr	*pa = NULL;
2581 	struct pfi_kkif		*kif = NULL;
2582 	int error;
2583 
2584 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2585 	    pp->which != PF_RT)
2586 		return (EINVAL);
2587 
2588 	switch (pp->af) {
2589 #ifdef INET
2590 	case AF_INET:
2591 		/* FALLTHROUGH */
2592 #endif /* INET */
2593 #ifdef INET6
2594 	case AF_INET6:
2595 		/* FALLTHROUGH */
2596 #endif /* INET6 */
2597 	case AF_UNSPEC:
2598 		break;
2599 	default:
2600 		return (EAFNOSUPPORT);
2601 	}
2602 
2603 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2604 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2605 	    pp->addr.addr.type != PF_ADDR_TABLE)
2606 		return (EINVAL);
2607 
2608 	if (pp->addr.addr.p.dyn != NULL)
2609 		return (EINVAL);
2610 
2611 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
2612 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
2613 	if (error != 0)
2614 		goto out;
2615 	if (pa->ifname[0])
2616 		kif = pf_kkif_create(M_WAITOK);
2617 	PF_RULES_WLOCK();
2618 	if (pp->ticket != V_ticket_pabuf) {
2619 		PF_RULES_WUNLOCK();
2620 		if (pa->ifname[0])
2621 			pf_kkif_free(kif);
2622 		error = EBUSY;
2623 		goto out;
2624 	}
2625 	if (pa->ifname[0]) {
2626 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
2627 		kif = NULL;
2628 		pfi_kkif_ref(pa->kif);
2629 	} else
2630 		pa->kif = NULL;
2631 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
2632 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
2633 		if (pa->ifname[0])
2634 			pfi_kkif_unref(pa->kif);
2635 		PF_RULES_WUNLOCK();
2636 		goto out;
2637 	}
2638 	switch (pp->which) {
2639 	case PF_NAT:
2640 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
2641 		break;
2642 	case PF_RDR:
2643 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
2644 		break;
2645 	case PF_RT:
2646 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
2647 		break;
2648 	}
2649 	PF_RULES_WUNLOCK();
2650 
2651 	return (0);
2652 
2653 out:
2654 	free(pa, M_PFRULE);
2655 	return (error);
2656 }
2657 
2658 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)2659 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
2660 {
2661 	struct pf_kpool		*pool;
2662 	struct pf_kpooladdr	*pa;
2663 
2664 	PF_RULES_RLOCK_TRACKER;
2665 
2666 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2667 	    pp->which != PF_RT)
2668 		return (EINVAL);
2669 
2670 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2671 	pp->nr = 0;
2672 
2673 	PF_RULES_RLOCK();
2674 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2675 	    pp->r_num, 0, 1, 0, pp->which);
2676 	if (pool == NULL) {
2677 		PF_RULES_RUNLOCK();
2678 		return (EBUSY);
2679 	}
2680 	TAILQ_FOREACH(pa, &pool->list, entries)
2681 		pp->nr++;
2682 	PF_RULES_RUNLOCK();
2683 
2684 	return (0);
2685 }
2686 
2687 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)2688 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
2689 {
2690 	struct pf_kpool		*pool;
2691 	struct pf_kpooladdr	*pa;
2692 	u_int32_t		 nr = 0;
2693 
2694 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
2695 	    pp->which != PF_RT)
2696 		return (EINVAL);
2697 
2698 	PF_RULES_RLOCK_TRACKER;
2699 
2700 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
2701 
2702 	PF_RULES_RLOCK();
2703 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
2704 	    pp->r_num, 0, 1, 1, pp->which);
2705 	if (pool == NULL) {
2706 		PF_RULES_RUNLOCK();
2707 		return (EBUSY);
2708 	}
2709 	pa = TAILQ_FIRST(&pool->list);
2710 	while ((pa != NULL) && (nr < pp->nr)) {
2711 		pa = TAILQ_NEXT(pa, entries);
2712 		nr++;
2713 	}
2714 	if (pa == NULL) {
2715 		PF_RULES_RUNLOCK();
2716 		return (EBUSY);
2717 	}
2718 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
2719 	pf_addr_copyout(&pp->addr.addr);
2720 	PF_RULES_RUNLOCK();
2721 
2722 	return (0);
2723 }
2724 
2725 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)2726 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
2727 {
2728 	struct pf_kruleset	*ruleset;
2729 	struct pf_kanchor	*anchor;
2730 
2731 	PF_RULES_RLOCK_TRACKER;
2732 
2733 	pr->path[sizeof(pr->path) - 1] = 0;
2734 
2735 	PF_RULES_RLOCK();
2736 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2737 		PF_RULES_RUNLOCK();
2738 		return (ENOENT);
2739 	}
2740 	pr->nr = 0;
2741 	if (ruleset->anchor == NULL) {
2742 		/* XXX kludge for pf_main_ruleset */
2743 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2744 			if (anchor->parent == NULL)
2745 				pr->nr++;
2746 	} else {
2747 		RB_FOREACH(anchor, pf_kanchor_node,
2748 		    &ruleset->anchor->children)
2749 			pr->nr++;
2750 	}
2751 	PF_RULES_RUNLOCK();
2752 
2753 	return (0);
2754 }
2755 
2756 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)2757 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
2758 {
2759 	struct pf_kruleset	*ruleset;
2760 	struct pf_kanchor	*anchor;
2761 	u_int32_t		 nr = 0;
2762 	int			 error = 0;
2763 
2764 	PF_RULES_RLOCK_TRACKER;
2765 
2766 	PF_RULES_RLOCK();
2767 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
2768 		PF_RULES_RUNLOCK();
2769 		return (ENOENT);
2770 	}
2771 
2772 	pr->name[0] = 0;
2773 	if (ruleset->anchor == NULL) {
2774 		/* XXX kludge for pf_main_ruleset */
2775 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
2776 			if (anchor->parent == NULL && nr++ == pr->nr) {
2777 				strlcpy(pr->name, anchor->name,
2778 				    sizeof(pr->name));
2779 				break;
2780 			}
2781 	} else {
2782 		RB_FOREACH(anchor, pf_kanchor_node,
2783 		    &ruleset->anchor->children)
2784 			if (nr++ == pr->nr) {
2785 				strlcpy(pr->name, anchor->name,
2786 				    sizeof(pr->name));
2787 				break;
2788 			}
2789 	}
2790 	if (!pr->name[0])
2791 		error = EBUSY;
2792 	PF_RULES_RUNLOCK();
2793 
2794 	return (error);
2795 }
2796 
2797 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)2798 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
2799 {
2800 	int			 error = 0;
2801 	PF_RULES_RLOCK_TRACKER;
2802 
2803 #define	ERROUT_IOCTL(target, x)					\
2804     do {								\
2805 	    error = (x);						\
2806 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
2807 	    goto target;						\
2808     } while (0)
2809 
2810 
2811 	/* XXX keep in sync with switch() below */
2812 	if (securelevel_gt(td->td_ucred, 2))
2813 		switch (cmd) {
2814 		case DIOCGETRULES:
2815 		case DIOCGETRULENV:
2816 		case DIOCGETADDRS:
2817 		case DIOCGETADDR:
2818 		case DIOCGETSTATE:
2819 		case DIOCGETSTATENV:
2820 		case DIOCSETSTATUSIF:
2821 		case DIOCGETSTATUSNV:
2822 		case DIOCCLRSTATUS:
2823 		case DIOCNATLOOK:
2824 		case DIOCSETDEBUG:
2825 #ifdef COMPAT_FREEBSD14
2826 		case DIOCGETSTATES:
2827 		case DIOCGETSTATESV2:
2828 #endif
2829 		case DIOCGETTIMEOUT:
2830 		case DIOCCLRRULECTRS:
2831 		case DIOCGETLIMIT:
2832 		case DIOCGETALTQSV0:
2833 		case DIOCGETALTQSV1:
2834 		case DIOCGETALTQV0:
2835 		case DIOCGETALTQV1:
2836 		case DIOCGETQSTATSV0:
2837 		case DIOCGETQSTATSV1:
2838 		case DIOCGETRULESETS:
2839 		case DIOCGETRULESET:
2840 		case DIOCRGETTABLES:
2841 		case DIOCRGETTSTATS:
2842 		case DIOCRCLRTSTATS:
2843 		case DIOCRCLRADDRS:
2844 		case DIOCRADDADDRS:
2845 		case DIOCRDELADDRS:
2846 		case DIOCRSETADDRS:
2847 		case DIOCRGETADDRS:
2848 		case DIOCRGETASTATS:
2849 		case DIOCRCLRASTATS:
2850 		case DIOCRTSTADDRS:
2851 		case DIOCOSFPGET:
2852 		case DIOCGETSRCNODES:
2853 		case DIOCCLRSRCNODES:
2854 		case DIOCGETSYNCOOKIES:
2855 		case DIOCIGETIFACES:
2856 		case DIOCGIFSPEEDV0:
2857 		case DIOCGIFSPEEDV1:
2858 		case DIOCSETIFFLAG:
2859 		case DIOCCLRIFFLAG:
2860 		case DIOCGETETHRULES:
2861 		case DIOCGETETHRULE:
2862 		case DIOCGETETHRULESETS:
2863 		case DIOCGETETHRULESET:
2864 			break;
2865 		case DIOCRCLRTABLES:
2866 		case DIOCRADDTABLES:
2867 		case DIOCRDELTABLES:
2868 		case DIOCRSETTFLAGS:
2869 			if (((struct pfioc_table *)addr)->pfrio_flags &
2870 			    PFR_FLAG_DUMMY)
2871 				break; /* dummy operation ok */
2872 			return (EPERM);
2873 		default:
2874 			return (EPERM);
2875 		}
2876 
2877 	if (!(flags & FWRITE))
2878 		switch (cmd) {
2879 		case DIOCGETRULES:
2880 		case DIOCGETADDRS:
2881 		case DIOCGETADDR:
2882 		case DIOCGETSTATE:
2883 		case DIOCGETSTATENV:
2884 		case DIOCGETSTATUSNV:
2885 #ifdef COMPAT_FREEBSD14
2886 		case DIOCGETSTATES:
2887 		case DIOCGETSTATESV2:
2888 #endif
2889 		case DIOCGETTIMEOUT:
2890 		case DIOCGETLIMIT:
2891 		case DIOCGETALTQSV0:
2892 		case DIOCGETALTQSV1:
2893 		case DIOCGETALTQV0:
2894 		case DIOCGETALTQV1:
2895 		case DIOCGETQSTATSV0:
2896 		case DIOCGETQSTATSV1:
2897 		case DIOCGETRULESETS:
2898 		case DIOCGETRULESET:
2899 		case DIOCNATLOOK:
2900 		case DIOCRGETTABLES:
2901 		case DIOCRGETTSTATS:
2902 		case DIOCRGETADDRS:
2903 		case DIOCRGETASTATS:
2904 		case DIOCRTSTADDRS:
2905 		case DIOCOSFPGET:
2906 		case DIOCGETSRCNODES:
2907 		case DIOCGETSYNCOOKIES:
2908 		case DIOCIGETIFACES:
2909 		case DIOCGIFSPEEDV1:
2910 		case DIOCGIFSPEEDV0:
2911 		case DIOCGETRULENV:
2912 		case DIOCGETETHRULES:
2913 		case DIOCGETETHRULE:
2914 		case DIOCGETETHRULESETS:
2915 		case DIOCGETETHRULESET:
2916 			break;
2917 		case DIOCRCLRTABLES:
2918 		case DIOCRADDTABLES:
2919 		case DIOCRDELTABLES:
2920 		case DIOCRCLRTSTATS:
2921 		case DIOCRCLRADDRS:
2922 		case DIOCRADDADDRS:
2923 		case DIOCRDELADDRS:
2924 		case DIOCRSETADDRS:
2925 		case DIOCRSETTFLAGS:
2926 			if (((struct pfioc_table *)addr)->pfrio_flags &
2927 			    PFR_FLAG_DUMMY) {
2928 				flags |= FWRITE; /* need write lock for dummy */
2929 				break; /* dummy operation ok */
2930 			}
2931 			return (EACCES);
2932 		default:
2933 			return (EACCES);
2934 		}
2935 
2936 	CURVNET_SET(TD_TO_VNET(td));
2937 
2938 	switch (cmd) {
2939 #ifdef COMPAT_FREEBSD14
2940 	case DIOCSTART:
2941 		error = pf_start();
2942 		break;
2943 
2944 	case DIOCSTOP:
2945 		error = pf_stop();
2946 		break;
2947 #endif
2948 
2949 	case DIOCGETETHRULES: {
2950 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
2951 		nvlist_t		*nvl;
2952 		void			*packed;
2953 		struct pf_keth_rule	*tail;
2954 		struct pf_keth_ruleset	*rs;
2955 		u_int32_t		 ticket, nr;
2956 		const char		*anchor = "";
2957 
2958 		nvl = NULL;
2959 		packed = NULL;
2960 
2961 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
2962 
2963 		if (nv->len > pf_ioctl_maxcount)
2964 			ERROUT(ENOMEM);
2965 
2966 		/* Copy the request in */
2967 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
2968 		error = copyin(nv->data, packed, nv->len);
2969 		if (error)
2970 			ERROUT(error);
2971 
2972 		nvl = nvlist_unpack(packed, nv->len, 0);
2973 		if (nvl == NULL)
2974 			ERROUT(EBADMSG);
2975 
2976 		if (! nvlist_exists_string(nvl, "anchor"))
2977 			ERROUT(EBADMSG);
2978 
2979 		anchor = nvlist_get_string(nvl, "anchor");
2980 
2981 		rs = pf_find_keth_ruleset(anchor);
2982 
2983 		nvlist_destroy(nvl);
2984 		nvl = NULL;
2985 		free(packed, M_NVLIST);
2986 		packed = NULL;
2987 
2988 		if (rs == NULL)
2989 			ERROUT(ENOENT);
2990 
2991 		/* Reply */
2992 		nvl = nvlist_create(0);
2993 		if (nvl == NULL)
2994 			ERROUT(ENOMEM);
2995 
2996 		PF_RULES_RLOCK();
2997 
2998 		ticket = rs->active.ticket;
2999 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
3000 		if (tail)
3001 			nr = tail->nr + 1;
3002 		else
3003 			nr = 0;
3004 
3005 		PF_RULES_RUNLOCK();
3006 
3007 		nvlist_add_number(nvl, "ticket", ticket);
3008 		nvlist_add_number(nvl, "nr", nr);
3009 
3010 		packed = nvlist_pack(nvl, &nv->len);
3011 		if (packed == NULL)
3012 			ERROUT(ENOMEM);
3013 
3014 		if (nv->size == 0)
3015 			ERROUT(0);
3016 		else if (nv->size < nv->len)
3017 			ERROUT(ENOSPC);
3018 
3019 		error = copyout(packed, nv->data, nv->len);
3020 
3021 #undef ERROUT
3022 DIOCGETETHRULES_error:
3023 		free(packed, M_NVLIST);
3024 		nvlist_destroy(nvl);
3025 		break;
3026 	}
3027 
3028 	case DIOCGETETHRULE: {
3029 		struct epoch_tracker	 et;
3030 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3031 		nvlist_t		*nvl = NULL;
3032 		void			*nvlpacked = NULL;
3033 		struct pf_keth_rule	*rule = NULL;
3034 		struct pf_keth_ruleset	*rs;
3035 		u_int32_t		 ticket, nr;
3036 		bool			 clear = false;
3037 		const char		*anchor;
3038 
3039 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
3040 
3041 		if (nv->len > pf_ioctl_maxcount)
3042 			ERROUT(ENOMEM);
3043 
3044 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3045 		error = copyin(nv->data, nvlpacked, nv->len);
3046 		if (error)
3047 			ERROUT(error);
3048 
3049 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3050 		if (nvl == NULL)
3051 			ERROUT(EBADMSG);
3052 		if (! nvlist_exists_number(nvl, "ticket"))
3053 			ERROUT(EBADMSG);
3054 		ticket = nvlist_get_number(nvl, "ticket");
3055 		if (! nvlist_exists_string(nvl, "anchor"))
3056 			ERROUT(EBADMSG);
3057 		anchor = nvlist_get_string(nvl, "anchor");
3058 
3059 		if (nvlist_exists_bool(nvl, "clear"))
3060 			clear = nvlist_get_bool(nvl, "clear");
3061 
3062 		if (clear && !(flags & FWRITE))
3063 			ERROUT(EACCES);
3064 
3065 		if (! nvlist_exists_number(nvl, "nr"))
3066 			ERROUT(EBADMSG);
3067 		nr = nvlist_get_number(nvl, "nr");
3068 
3069 		PF_RULES_RLOCK();
3070 		rs = pf_find_keth_ruleset(anchor);
3071 		if (rs == NULL) {
3072 			PF_RULES_RUNLOCK();
3073 			ERROUT(ENOENT);
3074 		}
3075 		if (ticket != rs->active.ticket) {
3076 			PF_RULES_RUNLOCK();
3077 			ERROUT(EBUSY);
3078 		}
3079 
3080 		nvlist_destroy(nvl);
3081 		nvl = NULL;
3082 		free(nvlpacked, M_NVLIST);
3083 		nvlpacked = NULL;
3084 
3085 		rule = TAILQ_FIRST(rs->active.rules);
3086 		while ((rule != NULL) && (rule->nr != nr))
3087 			rule = TAILQ_NEXT(rule, entries);
3088 		if (rule == NULL) {
3089 			PF_RULES_RUNLOCK();
3090 			ERROUT(ENOENT);
3091 		}
3092 		/* Make sure rule can't go away. */
3093 		NET_EPOCH_ENTER(et);
3094 		PF_RULES_RUNLOCK();
3095 		nvl = pf_keth_rule_to_nveth_rule(rule);
3096 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
3097 			NET_EPOCH_EXIT(et);
3098 			ERROUT(EBUSY);
3099 		}
3100 		NET_EPOCH_EXIT(et);
3101 		if (nvl == NULL)
3102 			ERROUT(ENOMEM);
3103 
3104 		nvlpacked = nvlist_pack(nvl, &nv->len);
3105 		if (nvlpacked == NULL)
3106 			ERROUT(ENOMEM);
3107 
3108 		if (nv->size == 0)
3109 			ERROUT(0);
3110 		else if (nv->size < nv->len)
3111 			ERROUT(ENOSPC);
3112 
3113 		error = copyout(nvlpacked, nv->data, nv->len);
3114 		if (error == 0 && clear) {
3115 			counter_u64_zero(rule->evaluations);
3116 			for (int i = 0; i < 2; i++) {
3117 				counter_u64_zero(rule->packets[i]);
3118 				counter_u64_zero(rule->bytes[i]);
3119 			}
3120 		}
3121 
3122 #undef ERROUT
3123 DIOCGETETHRULE_error:
3124 		free(nvlpacked, M_NVLIST);
3125 		nvlist_destroy(nvl);
3126 		break;
3127 	}
3128 
3129 	case DIOCADDETHRULE: {
3130 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3131 		nvlist_t		*nvl = NULL;
3132 		void			*nvlpacked = NULL;
3133 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
3134 		struct pf_keth_ruleset	*ruleset = NULL;
3135 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
3136 		const char		*anchor = "", *anchor_call = "";
3137 
3138 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
3139 
3140 		if (nv->len > pf_ioctl_maxcount)
3141 			ERROUT(ENOMEM);
3142 
3143 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3144 		error = copyin(nv->data, nvlpacked, nv->len);
3145 		if (error)
3146 			ERROUT(error);
3147 
3148 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3149 		if (nvl == NULL)
3150 			ERROUT(EBADMSG);
3151 
3152 		if (! nvlist_exists_number(nvl, "ticket"))
3153 			ERROUT(EBADMSG);
3154 
3155 		if (nvlist_exists_string(nvl, "anchor"))
3156 			anchor = nvlist_get_string(nvl, "anchor");
3157 		if (nvlist_exists_string(nvl, "anchor_call"))
3158 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3159 
3160 		ruleset = pf_find_keth_ruleset(anchor);
3161 		if (ruleset == NULL)
3162 			ERROUT(EINVAL);
3163 
3164 		if (nvlist_get_number(nvl, "ticket") !=
3165 		    ruleset->inactive.ticket) {
3166 			DPFPRINTF(PF_DEBUG_MISC,
3167 			    ("ticket: %d != %d\n",
3168 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
3169 			    ruleset->inactive.ticket));
3170 			ERROUT(EBUSY);
3171 		}
3172 
3173 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
3174 		rule->timestamp = NULL;
3175 
3176 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
3177 		if (error != 0)
3178 			ERROUT(error);
3179 
3180 		if (rule->ifname[0])
3181 			kif = pf_kkif_create(M_WAITOK);
3182 		if (rule->bridge_to_name[0])
3183 			bridge_to_kif = pf_kkif_create(M_WAITOK);
3184 		rule->evaluations = counter_u64_alloc(M_WAITOK);
3185 		for (int i = 0; i < 2; i++) {
3186 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
3187 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
3188 		}
3189 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
3190 		    M_WAITOK | M_ZERO);
3191 
3192 		PF_RULES_WLOCK();
3193 
3194 		if (rule->ifname[0]) {
3195 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
3196 			pfi_kkif_ref(rule->kif);
3197 		} else
3198 			rule->kif = NULL;
3199 		if (rule->bridge_to_name[0]) {
3200 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
3201 			    rule->bridge_to_name);
3202 			pfi_kkif_ref(rule->bridge_to);
3203 		} else
3204 			rule->bridge_to = NULL;
3205 
3206 #ifdef ALTQ
3207 		/* set queue IDs */
3208 		if (rule->qname[0] != 0) {
3209 			if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
3210 				error = EBUSY;
3211 			else
3212 				rule->qid = rule->qid;
3213 		}
3214 #endif
3215 		if (rule->tagname[0])
3216 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3217 				error = EBUSY;
3218 		if (rule->match_tagname[0])
3219 			if ((rule->match_tag = pf_tagname2tag(
3220 			    rule->match_tagname)) == 0)
3221 				error = EBUSY;
3222 
3223 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
3224 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
3225 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
3226 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
3227 
3228 		if (error) {
3229 			pf_free_eth_rule(rule);
3230 			PF_RULES_WUNLOCK();
3231 			ERROUT(error);
3232 		}
3233 
3234 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
3235 			pf_free_eth_rule(rule);
3236 			PF_RULES_WUNLOCK();
3237 			ERROUT(EINVAL);
3238 		}
3239 
3240 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
3241 		if (tail)
3242 			rule->nr = tail->nr + 1;
3243 		else
3244 			rule->nr = 0;
3245 
3246 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
3247 
3248 		PF_RULES_WUNLOCK();
3249 
3250 #undef ERROUT
3251 DIOCADDETHRULE_error:
3252 		nvlist_destroy(nvl);
3253 		free(nvlpacked, M_NVLIST);
3254 		break;
3255 	}
3256 
3257 	case DIOCGETETHRULESETS: {
3258 		struct epoch_tracker	 et;
3259 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3260 		nvlist_t		*nvl = NULL;
3261 		void			*nvlpacked = NULL;
3262 		struct pf_keth_ruleset	*ruleset;
3263 		struct pf_keth_anchor	*anchor;
3264 		int			 nr = 0;
3265 
3266 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
3267 
3268 		if (nv->len > pf_ioctl_maxcount)
3269 			ERROUT(ENOMEM);
3270 
3271 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3272 		error = copyin(nv->data, nvlpacked, nv->len);
3273 		if (error)
3274 			ERROUT(error);
3275 
3276 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3277 		if (nvl == NULL)
3278 			ERROUT(EBADMSG);
3279 		if (! nvlist_exists_string(nvl, "path"))
3280 			ERROUT(EBADMSG);
3281 
3282 		NET_EPOCH_ENTER(et);
3283 
3284 		if ((ruleset = pf_find_keth_ruleset(
3285 		    nvlist_get_string(nvl, "path"))) == NULL) {
3286 			NET_EPOCH_EXIT(et);
3287 			ERROUT(ENOENT);
3288 		}
3289 
3290 		if (ruleset->anchor == NULL) {
3291 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
3292 				if (anchor->parent == NULL)
3293 					nr++;
3294 		} else {
3295 			RB_FOREACH(anchor, pf_keth_anchor_node,
3296 			    &ruleset->anchor->children)
3297 				nr++;
3298 		}
3299 
3300 		NET_EPOCH_EXIT(et);
3301 
3302 		nvlist_destroy(nvl);
3303 		nvl = NULL;
3304 		free(nvlpacked, M_NVLIST);
3305 		nvlpacked = NULL;
3306 
3307 		nvl = nvlist_create(0);
3308 		if (nvl == NULL)
3309 			ERROUT(ENOMEM);
3310 
3311 		nvlist_add_number(nvl, "nr", nr);
3312 
3313 		nvlpacked = nvlist_pack(nvl, &nv->len);
3314 		if (nvlpacked == NULL)
3315 			ERROUT(ENOMEM);
3316 
3317 		if (nv->size == 0)
3318 			ERROUT(0);
3319 		else if (nv->size < nv->len)
3320 			ERROUT(ENOSPC);
3321 
3322 		error = copyout(nvlpacked, nv->data, nv->len);
3323 
3324 #undef ERROUT
3325 DIOCGETETHRULESETS_error:
3326 		free(nvlpacked, M_NVLIST);
3327 		nvlist_destroy(nvl);
3328 		break;
3329 	}
3330 
3331 	case DIOCGETETHRULESET: {
3332 		struct epoch_tracker	 et;
3333 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3334 		nvlist_t		*nvl = NULL;
3335 		void			*nvlpacked = NULL;
3336 		struct pf_keth_ruleset	*ruleset;
3337 		struct pf_keth_anchor	*anchor;
3338 		int			 nr = 0, req_nr = 0;
3339 		bool			 found = false;
3340 
3341 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
3342 
3343 		if (nv->len > pf_ioctl_maxcount)
3344 			ERROUT(ENOMEM);
3345 
3346 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3347 		error = copyin(nv->data, nvlpacked, nv->len);
3348 		if (error)
3349 			ERROUT(error);
3350 
3351 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3352 		if (nvl == NULL)
3353 			ERROUT(EBADMSG);
3354 		if (! nvlist_exists_string(nvl, "path"))
3355 			ERROUT(EBADMSG);
3356 		if (! nvlist_exists_number(nvl, "nr"))
3357 			ERROUT(EBADMSG);
3358 
3359 		req_nr = nvlist_get_number(nvl, "nr");
3360 
3361 		NET_EPOCH_ENTER(et);
3362 
3363 		if ((ruleset = pf_find_keth_ruleset(
3364 		    nvlist_get_string(nvl, "path"))) == NULL) {
3365 			NET_EPOCH_EXIT(et);
3366 			ERROUT(ENOENT);
3367 		}
3368 
3369 		nvlist_destroy(nvl);
3370 		nvl = NULL;
3371 		free(nvlpacked, M_NVLIST);
3372 		nvlpacked = NULL;
3373 
3374 		nvl = nvlist_create(0);
3375 		if (nvl == NULL) {
3376 			NET_EPOCH_EXIT(et);
3377 			ERROUT(ENOMEM);
3378 		}
3379 
3380 		if (ruleset->anchor == NULL) {
3381 			RB_FOREACH(anchor, pf_keth_anchor_global,
3382 			    &V_pf_keth_anchors) {
3383 				if (anchor->parent == NULL && nr++ == req_nr) {
3384 					found = true;
3385 					break;
3386 				}
3387 			}
3388 		} else {
3389 			RB_FOREACH(anchor, pf_keth_anchor_node,
3390 			     &ruleset->anchor->children) {
3391 				if (nr++ == req_nr) {
3392 					found = true;
3393 					break;
3394 				}
3395 			}
3396 		}
3397 
3398 		NET_EPOCH_EXIT(et);
3399 		if (found) {
3400 			nvlist_add_number(nvl, "nr", nr);
3401 			nvlist_add_string(nvl, "name", anchor->name);
3402 			if (ruleset->anchor)
3403 				nvlist_add_string(nvl, "path",
3404 				    ruleset->anchor->path);
3405 			else
3406 				nvlist_add_string(nvl, "path", "");
3407 		} else {
3408 			ERROUT(EBUSY);
3409 		}
3410 
3411 		nvlpacked = nvlist_pack(nvl, &nv->len);
3412 		if (nvlpacked == NULL)
3413 			ERROUT(ENOMEM);
3414 
3415 		if (nv->size == 0)
3416 			ERROUT(0);
3417 		else if (nv->size < nv->len)
3418 			ERROUT(ENOSPC);
3419 
3420 		error = copyout(nvlpacked, nv->data, nv->len);
3421 
3422 #undef ERROUT
3423 DIOCGETETHRULESET_error:
3424 		free(nvlpacked, M_NVLIST);
3425 		nvlist_destroy(nvl);
3426 		break;
3427 	}
3428 
3429 	case DIOCADDRULENV: {
3430 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
3431 		nvlist_t	*nvl = NULL;
3432 		void		*nvlpacked = NULL;
3433 		struct pf_krule	*rule = NULL;
3434 		const char	*anchor = "", *anchor_call = "";
3435 		uint32_t	 ticket = 0, pool_ticket = 0;
3436 
3437 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
3438 
3439 		if (nv->len > pf_ioctl_maxcount)
3440 			ERROUT(ENOMEM);
3441 
3442 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3443 		error = copyin(nv->data, nvlpacked, nv->len);
3444 		if (error)
3445 			ERROUT(error);
3446 
3447 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3448 		if (nvl == NULL)
3449 			ERROUT(EBADMSG);
3450 
3451 		if (! nvlist_exists_number(nvl, "ticket"))
3452 			ERROUT(EINVAL);
3453 		ticket = nvlist_get_number(nvl, "ticket");
3454 
3455 		if (! nvlist_exists_number(nvl, "pool_ticket"))
3456 			ERROUT(EINVAL);
3457 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
3458 
3459 		if (! nvlist_exists_nvlist(nvl, "rule"))
3460 			ERROUT(EINVAL);
3461 
3462 		rule = pf_krule_alloc();
3463 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
3464 		    rule);
3465 		if (error)
3466 			ERROUT(error);
3467 
3468 		if (nvlist_exists_string(nvl, "anchor"))
3469 			anchor = nvlist_get_string(nvl, "anchor");
3470 		if (nvlist_exists_string(nvl, "anchor_call"))
3471 			anchor_call = nvlist_get_string(nvl, "anchor_call");
3472 
3473 		if ((error = nvlist_error(nvl)))
3474 			ERROUT(error);
3475 
3476 		/* Frees rule on error */
3477 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
3478 		    anchor_call, td->td_ucred->cr_ruid,
3479 		    td->td_proc ? td->td_proc->p_pid : 0);
3480 
3481 		nvlist_destroy(nvl);
3482 		free(nvlpacked, M_NVLIST);
3483 		break;
3484 #undef ERROUT
3485 DIOCADDRULENV_error:
3486 		pf_krule_free(rule);
3487 		nvlist_destroy(nvl);
3488 		free(nvlpacked, M_NVLIST);
3489 
3490 		break;
3491 	}
3492 	case DIOCADDRULE: {
3493 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3494 		struct pf_krule		*rule;
3495 
3496 		rule = pf_krule_alloc();
3497 		error = pf_rule_to_krule(&pr->rule, rule);
3498 		if (error != 0) {
3499 			pf_krule_free(rule);
3500 			break;
3501 		}
3502 
3503 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3504 
3505 		/* Frees rule on error */
3506 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
3507 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
3508 		    td->td_proc ? td->td_proc->p_pid : 0);
3509 		break;
3510 	}
3511 
3512 	case DIOCGETRULES: {
3513 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
3514 
3515 		pr->anchor[sizeof(pr->anchor) - 1] = 0;
3516 
3517 		error = pf_ioctl_getrules(pr);
3518 
3519 		break;
3520 	}
3521 
3522 	case DIOCGETRULENV: {
3523 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3524 		nvlist_t		*nvrule = NULL;
3525 		nvlist_t		*nvl = NULL;
3526 		struct pf_kruleset	*ruleset;
3527 		struct pf_krule		*rule;
3528 		void			*nvlpacked = NULL;
3529 		int			 rs_num, nr;
3530 		bool			 clear_counter = false;
3531 
3532 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
3533 
3534 		if (nv->len > pf_ioctl_maxcount)
3535 			ERROUT(ENOMEM);
3536 
3537 		/* Copy the request in */
3538 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
3539 		error = copyin(nv->data, nvlpacked, nv->len);
3540 		if (error)
3541 			ERROUT(error);
3542 
3543 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
3544 		if (nvl == NULL)
3545 			ERROUT(EBADMSG);
3546 
3547 		if (! nvlist_exists_string(nvl, "anchor"))
3548 			ERROUT(EBADMSG);
3549 		if (! nvlist_exists_number(nvl, "ruleset"))
3550 			ERROUT(EBADMSG);
3551 		if (! nvlist_exists_number(nvl, "ticket"))
3552 			ERROUT(EBADMSG);
3553 		if (! nvlist_exists_number(nvl, "nr"))
3554 			ERROUT(EBADMSG);
3555 
3556 		if (nvlist_exists_bool(nvl, "clear_counter"))
3557 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
3558 
3559 		if (clear_counter && !(flags & FWRITE))
3560 			ERROUT(EACCES);
3561 
3562 		nr = nvlist_get_number(nvl, "nr");
3563 
3564 		PF_RULES_WLOCK();
3565 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
3566 		if (ruleset == NULL) {
3567 			PF_RULES_WUNLOCK();
3568 			ERROUT(ENOENT);
3569 		}
3570 
3571 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
3572 		if (rs_num >= PF_RULESET_MAX) {
3573 			PF_RULES_WUNLOCK();
3574 			ERROUT(EINVAL);
3575 		}
3576 
3577 		if (nvlist_get_number(nvl, "ticket") !=
3578 		    ruleset->rules[rs_num].active.ticket) {
3579 			PF_RULES_WUNLOCK();
3580 			ERROUT(EBUSY);
3581 		}
3582 
3583 		if ((error = nvlist_error(nvl))) {
3584 			PF_RULES_WUNLOCK();
3585 			ERROUT(error);
3586 		}
3587 
3588 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
3589 		while ((rule != NULL) && (rule->nr != nr))
3590 			rule = TAILQ_NEXT(rule, entries);
3591 		if (rule == NULL) {
3592 			PF_RULES_WUNLOCK();
3593 			ERROUT(EBUSY);
3594 		}
3595 
3596 		nvrule = pf_krule_to_nvrule(rule);
3597 
3598 		nvlist_destroy(nvl);
3599 		nvl = nvlist_create(0);
3600 		if (nvl == NULL) {
3601 			PF_RULES_WUNLOCK();
3602 			ERROUT(ENOMEM);
3603 		}
3604 		nvlist_add_number(nvl, "nr", nr);
3605 		nvlist_add_nvlist(nvl, "rule", nvrule);
3606 		nvlist_destroy(nvrule);
3607 		nvrule = NULL;
3608 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl)) {
3609 			PF_RULES_WUNLOCK();
3610 			ERROUT(EBUSY);
3611 		}
3612 
3613 		free(nvlpacked, M_NVLIST);
3614 		nvlpacked = nvlist_pack(nvl, &nv->len);
3615 		if (nvlpacked == NULL) {
3616 			PF_RULES_WUNLOCK();
3617 			ERROUT(ENOMEM);
3618 		}
3619 
3620 		if (nv->size == 0) {
3621 			PF_RULES_WUNLOCK();
3622 			ERROUT(0);
3623 		}
3624 		else if (nv->size < nv->len) {
3625 			PF_RULES_WUNLOCK();
3626 			ERROUT(ENOSPC);
3627 		}
3628 
3629 		if (clear_counter)
3630 			pf_krule_clear_counters(rule);
3631 
3632 		PF_RULES_WUNLOCK();
3633 
3634 		error = copyout(nvlpacked, nv->data, nv->len);
3635 
3636 #undef ERROUT
3637 DIOCGETRULENV_error:
3638 		free(nvlpacked, M_NVLIST);
3639 		nvlist_destroy(nvrule);
3640 		nvlist_destroy(nvl);
3641 
3642 		break;
3643 	}
3644 
3645 	case DIOCCHANGERULE: {
3646 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
3647 		struct pf_kruleset	*ruleset;
3648 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
3649 		struct pfi_kkif		*kif = NULL;
3650 		struct pf_kpooladdr	*pa;
3651 		u_int32_t		 nr = 0;
3652 		int			 rs_num;
3653 
3654 		pcr->anchor[sizeof(pcr->anchor) - 1] = 0;
3655 
3656 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
3657 		    pcr->action > PF_CHANGE_GET_TICKET) {
3658 			error = EINVAL;
3659 			break;
3660 		}
3661 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
3662 			error = EINVAL;
3663 			break;
3664 		}
3665 
3666 		if (pcr->action != PF_CHANGE_REMOVE) {
3667 			newrule = pf_krule_alloc();
3668 			error = pf_rule_to_krule(&pcr->rule, newrule);
3669 			if (error != 0) {
3670 				pf_krule_free(newrule);
3671 				break;
3672 			}
3673 
3674 			if (newrule->ifname[0])
3675 				kif = pf_kkif_create(M_WAITOK);
3676 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
3677 			for (int i = 0; i < 2; i++) {
3678 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
3679 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
3680 			}
3681 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
3682 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
3683 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3684 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3685 			newrule->cuid = td->td_ucred->cr_ruid;
3686 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
3687 			TAILQ_INIT(&newrule->nat.list);
3688 			TAILQ_INIT(&newrule->rdr.list);
3689 			TAILQ_INIT(&newrule->route.list);
3690 		}
3691 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
3692 
3693 		PF_CONFIG_LOCK();
3694 		PF_RULES_WLOCK();
3695 #ifdef PF_WANT_32_TO_64_COUNTER
3696 		if (newrule != NULL) {
3697 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
3698 			newrule->allrulelinked = true;
3699 			V_pf_allrulecount++;
3700 		}
3701 #endif
3702 
3703 		if (!(pcr->action == PF_CHANGE_REMOVE ||
3704 		    pcr->action == PF_CHANGE_GET_TICKET) &&
3705 		    pcr->pool_ticket != V_ticket_pabuf)
3706 			ERROUT(EBUSY);
3707 
3708 		ruleset = pf_find_kruleset(pcr->anchor);
3709 		if (ruleset == NULL)
3710 			ERROUT(EINVAL);
3711 
3712 		rs_num = pf_get_ruleset_number(pcr->rule.action);
3713 		if (rs_num >= PF_RULESET_MAX)
3714 			ERROUT(EINVAL);
3715 
3716 		/*
3717 		 * XXXMJG: there is no guarantee that the ruleset was
3718 		 * created by the usual route of calling DIOCXBEGIN.
3719 		 * As a result it is possible the rule tree will not
3720 		 * be allocated yet. Hack around it by doing it here.
3721 		 * Note it is fine to let the tree persist in case of
3722 		 * error as it will be freed down the road on future
3723 		 * updates (if need be).
3724 		 */
3725 		if (ruleset->rules[rs_num].active.tree == NULL) {
3726 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
3727 			if (ruleset->rules[rs_num].active.tree == NULL) {
3728 				ERROUT(ENOMEM);
3729 			}
3730 		}
3731 
3732 		if (pcr->action == PF_CHANGE_GET_TICKET) {
3733 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
3734 			ERROUT(0);
3735 		} else if (pcr->ticket !=
3736 			    ruleset->rules[rs_num].active.ticket)
3737 				ERROUT(EINVAL);
3738 
3739 		if (pcr->action != PF_CHANGE_REMOVE) {
3740 			if (newrule->ifname[0]) {
3741 				newrule->kif = pfi_kkif_attach(kif,
3742 				    newrule->ifname);
3743 				kif = NULL;
3744 				pfi_kkif_ref(newrule->kif);
3745 			} else
3746 				newrule->kif = NULL;
3747 
3748 			if (newrule->rtableid > 0 &&
3749 			    newrule->rtableid >= rt_numfibs)
3750 				error = EBUSY;
3751 
3752 #ifdef ALTQ
3753 			/* set queue IDs */
3754 			if (newrule->qname[0] != 0) {
3755 				if ((newrule->qid =
3756 				    pf_qname2qid(newrule->qname)) == 0)
3757 					error = EBUSY;
3758 				else if (newrule->pqname[0] != 0) {
3759 					if ((newrule->pqid =
3760 					    pf_qname2qid(newrule->pqname)) == 0)
3761 						error = EBUSY;
3762 				} else
3763 					newrule->pqid = newrule->qid;
3764 			}
3765 #endif /* ALTQ */
3766 			if (newrule->tagname[0])
3767 				if ((newrule->tag =
3768 				    pf_tagname2tag(newrule->tagname)) == 0)
3769 					error = EBUSY;
3770 			if (newrule->match_tagname[0])
3771 				if ((newrule->match_tag = pf_tagname2tag(
3772 				    newrule->match_tagname)) == 0)
3773 					error = EBUSY;
3774 			if (newrule->rt && !newrule->direction)
3775 				error = EINVAL;
3776 			if (!newrule->log)
3777 				newrule->logif = 0;
3778 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
3779 				error = ENOMEM;
3780 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
3781 				error = ENOMEM;
3782 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
3783 				error = EINVAL;
3784 			for (int i = 0; i < 3; i++) {
3785 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3786 					if (pa->addr.type == PF_ADDR_TABLE) {
3787 						pa->addr.p.tbl =
3788 						    pfr_attach_table(ruleset,
3789 						    pa->addr.v.tblname);
3790 						if (pa->addr.p.tbl == NULL)
3791 							error = ENOMEM;
3792 					}
3793 			}
3794 
3795 			newrule->overload_tbl = NULL;
3796 			if (newrule->overload_tblname[0]) {
3797 				if ((newrule->overload_tbl = pfr_attach_table(
3798 				    ruleset, newrule->overload_tblname)) ==
3799 				    NULL)
3800 					error = EINVAL;
3801 				else
3802 					newrule->overload_tbl->pfrkt_flags |=
3803 					    PFR_TFLAG_ACTIVE;
3804 			}
3805 
3806 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
3807 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
3808 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
3809 			if (((((newrule->action == PF_NAT) ||
3810 			    (newrule->action == PF_RDR) ||
3811 			    (newrule->action == PF_BINAT) ||
3812 			    (newrule->rt > PF_NOPFROUTE)) &&
3813 			    !newrule->anchor)) &&
3814 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
3815 				error = EINVAL;
3816 
3817 			if (error) {
3818 				pf_free_rule(newrule);
3819 				PF_RULES_WUNLOCK();
3820 				PF_CONFIG_UNLOCK();
3821 				break;
3822 			}
3823 
3824 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
3825 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
3826 		}
3827 		pf_empty_kpool(&V_pf_pabuf[0]);
3828 		pf_empty_kpool(&V_pf_pabuf[1]);
3829 		pf_empty_kpool(&V_pf_pabuf[2]);
3830 
3831 		if (pcr->action == PF_CHANGE_ADD_HEAD)
3832 			oldrule = TAILQ_FIRST(
3833 			    ruleset->rules[rs_num].active.ptr);
3834 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
3835 			oldrule = TAILQ_LAST(
3836 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
3837 		else {
3838 			oldrule = TAILQ_FIRST(
3839 			    ruleset->rules[rs_num].active.ptr);
3840 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
3841 				oldrule = TAILQ_NEXT(oldrule, entries);
3842 			if (oldrule == NULL) {
3843 				if (newrule != NULL)
3844 					pf_free_rule(newrule);
3845 				PF_RULES_WUNLOCK();
3846 				PF_CONFIG_UNLOCK();
3847 				error = EINVAL;
3848 				break;
3849 			}
3850 		}
3851 
3852 		if (pcr->action == PF_CHANGE_REMOVE) {
3853 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
3854 			    oldrule);
3855 			RB_REMOVE(pf_krule_global,
3856 			    ruleset->rules[rs_num].active.tree, oldrule);
3857 			ruleset->rules[rs_num].active.rcount--;
3858 		} else {
3859 			pf_hash_rule(newrule);
3860 			if (RB_INSERT(pf_krule_global,
3861 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
3862 				pf_free_rule(newrule);
3863 				PF_RULES_WUNLOCK();
3864 				PF_CONFIG_UNLOCK();
3865 				error = EEXIST;
3866 				break;
3867 			}
3868 
3869 			if (oldrule == NULL)
3870 				TAILQ_INSERT_TAIL(
3871 				    ruleset->rules[rs_num].active.ptr,
3872 				    newrule, entries);
3873 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
3874 			    pcr->action == PF_CHANGE_ADD_BEFORE)
3875 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
3876 			else
3877 				TAILQ_INSERT_AFTER(
3878 				    ruleset->rules[rs_num].active.ptr,
3879 				    oldrule, newrule, entries);
3880 			ruleset->rules[rs_num].active.rcount++;
3881 		}
3882 
3883 		nr = 0;
3884 		TAILQ_FOREACH(oldrule,
3885 		    ruleset->rules[rs_num].active.ptr, entries)
3886 			oldrule->nr = nr++;
3887 
3888 		ruleset->rules[rs_num].active.ticket++;
3889 
3890 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
3891 		pf_remove_if_empty_kruleset(ruleset);
3892 
3893 		PF_RULES_WUNLOCK();
3894 		PF_CONFIG_UNLOCK();
3895 		break;
3896 
3897 #undef ERROUT
3898 DIOCCHANGERULE_error:
3899 		PF_RULES_WUNLOCK();
3900 		PF_CONFIG_UNLOCK();
3901 		pf_krule_free(newrule);
3902 		pf_kkif_free(kif);
3903 		break;
3904 	}
3905 
3906 	case DIOCCLRSTATESNV: {
3907 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
3908 		break;
3909 	}
3910 
3911 	case DIOCKILLSTATESNV: {
3912 		error = pf_killstates_nv((struct pfioc_nv *)addr);
3913 		break;
3914 	}
3915 
3916 	case DIOCADDSTATE: {
3917 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
3918 		struct pfsync_state_1301	*sp = &ps->state;
3919 
3920 		if (sp->timeout >= PFTM_MAX) {
3921 			error = EINVAL;
3922 			break;
3923 		}
3924 		if (V_pfsync_state_import_ptr != NULL) {
3925 			PF_RULES_RLOCK();
3926 			error = V_pfsync_state_import_ptr(
3927 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
3928 			    PFSYNC_MSG_VERSION_1301);
3929 			PF_RULES_RUNLOCK();
3930 		} else
3931 			error = EOPNOTSUPP;
3932 		break;
3933 	}
3934 
3935 	case DIOCGETSTATE: {
3936 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
3937 		struct pf_kstate	*s;
3938 
3939 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
3940 		if (s == NULL) {
3941 			error = ENOENT;
3942 			break;
3943 		}
3944 
3945 		pfsync_state_export((union pfsync_state_union*)&ps->state,
3946 		    s, PFSYNC_MSG_VERSION_1301);
3947 		PF_STATE_UNLOCK(s);
3948 		break;
3949 	}
3950 
3951 	case DIOCGETSTATENV: {
3952 		error = pf_getstate((struct pfioc_nv *)addr);
3953 		break;
3954 	}
3955 
3956 #ifdef COMPAT_FREEBSD14
3957 	case DIOCGETSTATES: {
3958 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
3959 		struct pf_kstate	*s;
3960 		struct pfsync_state_1301	*pstore, *p;
3961 		int			 i, nr;
3962 		size_t			 slice_count = 16, count;
3963 		void			*out;
3964 
3965 		if (ps->ps_len <= 0) {
3966 			nr = uma_zone_get_cur(V_pf_state_z);
3967 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
3968 			break;
3969 		}
3970 
3971 		out = ps->ps_states;
3972 		pstore = mallocarray(slice_count,
3973 		    sizeof(struct pfsync_state_1301), M_TEMP, M_WAITOK | M_ZERO);
3974 		nr = 0;
3975 
3976 		for (i = 0; i <= V_pf_hashmask; i++) {
3977 			struct pf_idhash *ih = &V_pf_idhash[i];
3978 
3979 DIOCGETSTATES_retry:
3980 			p = pstore;
3981 
3982 			if (LIST_EMPTY(&ih->states))
3983 				continue;
3984 
3985 			PF_HASHROW_LOCK(ih);
3986 			count = 0;
3987 			LIST_FOREACH(s, &ih->states, entry) {
3988 				if (s->timeout == PFTM_UNLINKED)
3989 					continue;
3990 				count++;
3991 			}
3992 
3993 			if (count > slice_count) {
3994 				PF_HASHROW_UNLOCK(ih);
3995 				free(pstore, M_TEMP);
3996 				slice_count = count * 2;
3997 				pstore = mallocarray(slice_count,
3998 				    sizeof(struct pfsync_state_1301), M_TEMP,
3999 				    M_WAITOK | M_ZERO);
4000 				goto DIOCGETSTATES_retry;
4001 			}
4002 
4003 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4004 				PF_HASHROW_UNLOCK(ih);
4005 				goto DIOCGETSTATES_full;
4006 			}
4007 
4008 			LIST_FOREACH(s, &ih->states, entry) {
4009 				if (s->timeout == PFTM_UNLINKED)
4010 					continue;
4011 
4012 				pfsync_state_export((union pfsync_state_union*)p,
4013 				    s, PFSYNC_MSG_VERSION_1301);
4014 				p++;
4015 				nr++;
4016 			}
4017 			PF_HASHROW_UNLOCK(ih);
4018 			error = copyout(pstore, out,
4019 			    sizeof(struct pfsync_state_1301) * count);
4020 			if (error)
4021 				break;
4022 			out = ps->ps_states + nr;
4023 		}
4024 DIOCGETSTATES_full:
4025 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
4026 		free(pstore, M_TEMP);
4027 
4028 		break;
4029 	}
4030 
4031 	case DIOCGETSTATESV2: {
4032 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
4033 		struct pf_kstate	*s;
4034 		struct pf_state_export	*pstore, *p;
4035 		int i, nr;
4036 		size_t slice_count = 16, count;
4037 		void *out;
4038 
4039 		if (ps->ps_req_version > PF_STATE_VERSION) {
4040 			error = ENOTSUP;
4041 			break;
4042 		}
4043 
4044 		if (ps->ps_len <= 0) {
4045 			nr = uma_zone_get_cur(V_pf_state_z);
4046 			ps->ps_len = sizeof(struct pf_state_export) * nr;
4047 			break;
4048 		}
4049 
4050 		out = ps->ps_states;
4051 		pstore = mallocarray(slice_count,
4052 		    sizeof(struct pf_state_export), M_TEMP, M_WAITOK | M_ZERO);
4053 		nr = 0;
4054 
4055 		for (i = 0; i <= V_pf_hashmask; i++) {
4056 			struct pf_idhash *ih = &V_pf_idhash[i];
4057 
4058 DIOCGETSTATESV2_retry:
4059 			p = pstore;
4060 
4061 			if (LIST_EMPTY(&ih->states))
4062 				continue;
4063 
4064 			PF_HASHROW_LOCK(ih);
4065 			count = 0;
4066 			LIST_FOREACH(s, &ih->states, entry) {
4067 				if (s->timeout == PFTM_UNLINKED)
4068 					continue;
4069 				count++;
4070 			}
4071 
4072 			if (count > slice_count) {
4073 				PF_HASHROW_UNLOCK(ih);
4074 				free(pstore, M_TEMP);
4075 				slice_count = count * 2;
4076 				pstore = mallocarray(slice_count,
4077 				    sizeof(struct pf_state_export), M_TEMP,
4078 				    M_WAITOK | M_ZERO);
4079 				goto DIOCGETSTATESV2_retry;
4080 			}
4081 
4082 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
4083 				PF_HASHROW_UNLOCK(ih);
4084 				goto DIOCGETSTATESV2_full;
4085 			}
4086 
4087 			LIST_FOREACH(s, &ih->states, entry) {
4088 				if (s->timeout == PFTM_UNLINKED)
4089 					continue;
4090 
4091 				pf_state_export(p, s);
4092 				p++;
4093 				nr++;
4094 			}
4095 			PF_HASHROW_UNLOCK(ih);
4096 			error = copyout(pstore, out,
4097 			    sizeof(struct pf_state_export) * count);
4098 			if (error)
4099 				break;
4100 			out = ps->ps_states + nr;
4101 		}
4102 DIOCGETSTATESV2_full:
4103 		ps->ps_len = nr * sizeof(struct pf_state_export);
4104 		free(pstore, M_TEMP);
4105 
4106 		break;
4107 	}
4108 #endif
4109 	case DIOCGETSTATUSNV: {
4110 		error = pf_getstatus((struct pfioc_nv *)addr);
4111 		break;
4112 	}
4113 
4114 	case DIOCSETSTATUSIF: {
4115 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
4116 
4117 		if (pi->ifname[0] == 0) {
4118 			bzero(V_pf_status.ifname, IFNAMSIZ);
4119 			break;
4120 		}
4121 		PF_RULES_WLOCK();
4122 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
4123 		PF_RULES_WUNLOCK();
4124 		break;
4125 	}
4126 
4127 	case DIOCCLRSTATUS: {
4128 		pf_ioctl_clear_status();
4129 		break;
4130 	}
4131 
4132 	case DIOCNATLOOK: {
4133 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
4134 		struct pf_state_key	*sk;
4135 		struct pf_kstate	*state;
4136 		struct pf_state_key_cmp	 key;
4137 		int			 m = 0, direction = pnl->direction;
4138 		int			 sidx, didx;
4139 
4140 		/* NATLOOK src and dst are reversed, so reverse sidx/didx */
4141 		sidx = (direction == PF_IN) ? 1 : 0;
4142 		didx = (direction == PF_IN) ? 0 : 1;
4143 
4144 		if (!pnl->proto ||
4145 		    PF_AZERO(&pnl->saddr, pnl->af) ||
4146 		    PF_AZERO(&pnl->daddr, pnl->af) ||
4147 		    ((pnl->proto == IPPROTO_TCP ||
4148 		    pnl->proto == IPPROTO_UDP) &&
4149 		    (!pnl->dport || !pnl->sport)))
4150 			error = EINVAL;
4151 		else {
4152 			bzero(&key, sizeof(key));
4153 			key.af = pnl->af;
4154 			key.proto = pnl->proto;
4155 			PF_ACPY(&key.addr[sidx], &pnl->saddr, pnl->af);
4156 			key.port[sidx] = pnl->sport;
4157 			PF_ACPY(&key.addr[didx], &pnl->daddr, pnl->af);
4158 			key.port[didx] = pnl->dport;
4159 
4160 			state = pf_find_state_all(&key, direction, &m);
4161 			if (state == NULL) {
4162 				error = ENOENT;
4163 			} else {
4164 				if (m > 1) {
4165 					PF_STATE_UNLOCK(state);
4166 					error = E2BIG;	/* more than one state */
4167 				} else {
4168 					sk = state->key[sidx];
4169 					PF_ACPY(&pnl->rsaddr, &sk->addr[sidx], sk->af);
4170 					pnl->rsport = sk->port[sidx];
4171 					PF_ACPY(&pnl->rdaddr, &sk->addr[didx], sk->af);
4172 					pnl->rdport = sk->port[didx];
4173 					PF_STATE_UNLOCK(state);
4174 				}
4175 			}
4176 		}
4177 		break;
4178 	}
4179 
4180 	case DIOCSETTIMEOUT: {
4181 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4182 
4183 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
4184 		    &pt->seconds);
4185 		break;
4186 	}
4187 
4188 	case DIOCGETTIMEOUT: {
4189 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
4190 
4191 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
4192 		break;
4193 	}
4194 
4195 	case DIOCGETLIMIT: {
4196 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4197 
4198 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
4199 		break;
4200 	}
4201 
4202 	case DIOCSETLIMIT: {
4203 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
4204 		unsigned int old_limit;
4205 
4206 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
4207 		pl->limit = old_limit;
4208 		break;
4209 	}
4210 
4211 	case DIOCSETDEBUG: {
4212 		u_int32_t	*level = (u_int32_t *)addr;
4213 
4214 		PF_RULES_WLOCK();
4215 		V_pf_status.debug = *level;
4216 		PF_RULES_WUNLOCK();
4217 		break;
4218 	}
4219 
4220 	case DIOCCLRRULECTRS: {
4221 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
4222 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
4223 		struct pf_krule		*rule;
4224 
4225 		PF_RULES_WLOCK();
4226 		TAILQ_FOREACH(rule,
4227 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
4228 			pf_counter_u64_zero(&rule->evaluations);
4229 			for (int i = 0; i < 2; i++) {
4230 				pf_counter_u64_zero(&rule->packets[i]);
4231 				pf_counter_u64_zero(&rule->bytes[i]);
4232 			}
4233 		}
4234 		PF_RULES_WUNLOCK();
4235 		break;
4236 	}
4237 
4238 	case DIOCGIFSPEEDV0:
4239 	case DIOCGIFSPEEDV1: {
4240 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
4241 		struct pf_ifspeed_v1	ps;
4242 		struct ifnet		*ifp;
4243 
4244 		if (psp->ifname[0] == '\0') {
4245 			error = EINVAL;
4246 			break;
4247 		}
4248 
4249 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
4250 		if (error != 0)
4251 			break;
4252 		ifp = ifunit(ps.ifname);
4253 		if (ifp != NULL) {
4254 			psp->baudrate32 =
4255 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
4256 			if (cmd == DIOCGIFSPEEDV1)
4257 				psp->baudrate = ifp->if_baudrate;
4258 		} else {
4259 			error = EINVAL;
4260 		}
4261 		break;
4262 	}
4263 
4264 #ifdef ALTQ
4265 	case DIOCSTARTALTQ: {
4266 		struct pf_altq		*altq;
4267 
4268 		PF_RULES_WLOCK();
4269 		/* enable all altq interfaces on active list */
4270 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4271 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4272 				error = pf_enable_altq(altq);
4273 				if (error != 0)
4274 					break;
4275 			}
4276 		}
4277 		if (error == 0)
4278 			V_pf_altq_running = 1;
4279 		PF_RULES_WUNLOCK();
4280 		DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
4281 		break;
4282 	}
4283 
4284 	case DIOCSTOPALTQ: {
4285 		struct pf_altq		*altq;
4286 
4287 		PF_RULES_WLOCK();
4288 		/* disable all altq interfaces on active list */
4289 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
4290 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
4291 				error = pf_disable_altq(altq);
4292 				if (error != 0)
4293 					break;
4294 			}
4295 		}
4296 		if (error == 0)
4297 			V_pf_altq_running = 0;
4298 		PF_RULES_WUNLOCK();
4299 		DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
4300 		break;
4301 	}
4302 
4303 	case DIOCADDALTQV0:
4304 	case DIOCADDALTQV1: {
4305 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4306 		struct pf_altq		*altq, *a;
4307 		struct ifnet		*ifp;
4308 
4309 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
4310 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
4311 		if (error)
4312 			break;
4313 		altq->local_flags = 0;
4314 
4315 		PF_RULES_WLOCK();
4316 		if (pa->ticket != V_ticket_altqs_inactive) {
4317 			PF_RULES_WUNLOCK();
4318 			free(altq, M_PFALTQ);
4319 			error = EBUSY;
4320 			break;
4321 		}
4322 
4323 		/*
4324 		 * if this is for a queue, find the discipline and
4325 		 * copy the necessary fields
4326 		 */
4327 		if (altq->qname[0] != 0) {
4328 			if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
4329 				PF_RULES_WUNLOCK();
4330 				error = EBUSY;
4331 				free(altq, M_PFALTQ);
4332 				break;
4333 			}
4334 			altq->altq_disc = NULL;
4335 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
4336 				if (strncmp(a->ifname, altq->ifname,
4337 				    IFNAMSIZ) == 0) {
4338 					altq->altq_disc = a->altq_disc;
4339 					break;
4340 				}
4341 			}
4342 		}
4343 
4344 		if ((ifp = ifunit(altq->ifname)) == NULL)
4345 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
4346 		else
4347 			error = altq_add(ifp, altq);
4348 
4349 		if (error) {
4350 			PF_RULES_WUNLOCK();
4351 			free(altq, M_PFALTQ);
4352 			break;
4353 		}
4354 
4355 		if (altq->qname[0] != 0)
4356 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
4357 		else
4358 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
4359 		/* version error check done on import above */
4360 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4361 		PF_RULES_WUNLOCK();
4362 		break;
4363 	}
4364 
4365 	case DIOCGETALTQSV0:
4366 	case DIOCGETALTQSV1: {
4367 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4368 		struct pf_altq		*altq;
4369 
4370 		PF_RULES_RLOCK();
4371 		pa->nr = 0;
4372 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
4373 			pa->nr++;
4374 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
4375 			pa->nr++;
4376 		pa->ticket = V_ticket_altqs_active;
4377 		PF_RULES_RUNLOCK();
4378 		break;
4379 	}
4380 
4381 	case DIOCGETALTQV0:
4382 	case DIOCGETALTQV1: {
4383 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
4384 		struct pf_altq		*altq;
4385 
4386 		PF_RULES_RLOCK();
4387 		if (pa->ticket != V_ticket_altqs_active) {
4388 			PF_RULES_RUNLOCK();
4389 			error = EBUSY;
4390 			break;
4391 		}
4392 		altq = pf_altq_get_nth_active(pa->nr);
4393 		if (altq == NULL) {
4394 			PF_RULES_RUNLOCK();
4395 			error = EBUSY;
4396 			break;
4397 		}
4398 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
4399 		PF_RULES_RUNLOCK();
4400 		break;
4401 	}
4402 
4403 	case DIOCCHANGEALTQV0:
4404 	case DIOCCHANGEALTQV1:
4405 		/* CHANGEALTQ not supported yet! */
4406 		error = ENODEV;
4407 		break;
4408 
4409 	case DIOCGETQSTATSV0:
4410 	case DIOCGETQSTATSV1: {
4411 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
4412 		struct pf_altq		*altq;
4413 		int			 nbytes;
4414 		u_int32_t		 version;
4415 
4416 		PF_RULES_RLOCK();
4417 		if (pq->ticket != V_ticket_altqs_active) {
4418 			PF_RULES_RUNLOCK();
4419 			error = EBUSY;
4420 			break;
4421 		}
4422 		nbytes = pq->nbytes;
4423 		altq = pf_altq_get_nth_active(pq->nr);
4424 		if (altq == NULL) {
4425 			PF_RULES_RUNLOCK();
4426 			error = EBUSY;
4427 			break;
4428 		}
4429 
4430 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
4431 			PF_RULES_RUNLOCK();
4432 			error = ENXIO;
4433 			break;
4434 		}
4435 		PF_RULES_RUNLOCK();
4436 		if (cmd == DIOCGETQSTATSV0)
4437 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
4438 		else
4439 			version = pq->version;
4440 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
4441 		if (error == 0) {
4442 			pq->scheduler = altq->scheduler;
4443 			pq->nbytes = nbytes;
4444 		}
4445 		break;
4446 	}
4447 #endif /* ALTQ */
4448 
4449 	case DIOCBEGINADDRS: {
4450 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4451 
4452 		error = pf_ioctl_begin_addrs(&pp->ticket);
4453 		break;
4454 	}
4455 
4456 	case DIOCADDADDR: {
4457 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4458 		struct pf_nl_pooladdr npp = {};
4459 
4460 		npp.which = PF_RDR;
4461 		memcpy(&npp, pp, sizeof(*pp));
4462 		error = pf_ioctl_add_addr(&npp);
4463 		break;
4464 	}
4465 
4466 	case DIOCGETADDRS: {
4467 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4468 		struct pf_nl_pooladdr npp = {};
4469 
4470 		npp.which = PF_RDR;
4471 		memcpy(&npp, pp, sizeof(*pp));
4472 		error = pf_ioctl_get_addrs(&npp);
4473 		memcpy(pp, &npp, sizeof(*pp));
4474 
4475 		break;
4476 	}
4477 
4478 	case DIOCGETADDR: {
4479 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
4480 		struct pf_nl_pooladdr npp = {};
4481 
4482 		npp.which = PF_RDR;
4483 		memcpy(&npp, pp, sizeof(*pp));
4484 		error = pf_ioctl_get_addr(&npp);
4485 		memcpy(pp, &npp, sizeof(*pp));
4486 
4487 		break;
4488 	}
4489 
4490 	case DIOCCHANGEADDR: {
4491 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
4492 		struct pf_kpool		*pool;
4493 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
4494 		struct pf_kruleset	*ruleset;
4495 		struct pfi_kkif		*kif = NULL;
4496 
4497 		pca->anchor[sizeof(pca->anchor) - 1] = 0;
4498 
4499 		if (pca->action < PF_CHANGE_ADD_HEAD ||
4500 		    pca->action > PF_CHANGE_REMOVE) {
4501 			error = EINVAL;
4502 			break;
4503 		}
4504 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
4505 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
4506 		    pca->addr.addr.type != PF_ADDR_TABLE) {
4507 			error = EINVAL;
4508 			break;
4509 		}
4510 		if (pca->addr.addr.p.dyn != NULL) {
4511 			error = EINVAL;
4512 			break;
4513 		}
4514 
4515 		if (pca->action != PF_CHANGE_REMOVE) {
4516 #ifndef INET
4517 			if (pca->af == AF_INET) {
4518 				error = EAFNOSUPPORT;
4519 				break;
4520 			}
4521 #endif /* INET */
4522 #ifndef INET6
4523 			if (pca->af == AF_INET6) {
4524 				error = EAFNOSUPPORT;
4525 				break;
4526 			}
4527 #endif /* INET6 */
4528 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
4529 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
4530 			if (newpa->ifname[0])
4531 				kif = pf_kkif_create(M_WAITOK);
4532 			newpa->kif = NULL;
4533 		}
4534 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
4535 		PF_RULES_WLOCK();
4536 		ruleset = pf_find_kruleset(pca->anchor);
4537 		if (ruleset == NULL)
4538 			ERROUT(EBUSY);
4539 
4540 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
4541 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
4542 		if (pool == NULL)
4543 			ERROUT(EBUSY);
4544 
4545 		if (pca->action != PF_CHANGE_REMOVE) {
4546 			if (newpa->ifname[0]) {
4547 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
4548 				pfi_kkif_ref(newpa->kif);
4549 				kif = NULL;
4550 			}
4551 
4552 			switch (newpa->addr.type) {
4553 			case PF_ADDR_DYNIFTL:
4554 				error = pfi_dynaddr_setup(&newpa->addr,
4555 				    pca->af);
4556 				break;
4557 			case PF_ADDR_TABLE:
4558 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
4559 				    newpa->addr.v.tblname);
4560 				if (newpa->addr.p.tbl == NULL)
4561 					error = ENOMEM;
4562 				break;
4563 			}
4564 			if (error)
4565 				goto DIOCCHANGEADDR_error;
4566 		}
4567 
4568 		switch (pca->action) {
4569 		case PF_CHANGE_ADD_HEAD:
4570 			oldpa = TAILQ_FIRST(&pool->list);
4571 			break;
4572 		case PF_CHANGE_ADD_TAIL:
4573 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
4574 			break;
4575 		default:
4576 			oldpa = TAILQ_FIRST(&pool->list);
4577 			for (int i = 0; oldpa && i < pca->nr; i++)
4578 				oldpa = TAILQ_NEXT(oldpa, entries);
4579 
4580 			if (oldpa == NULL)
4581 				ERROUT(EINVAL);
4582 		}
4583 
4584 		if (pca->action == PF_CHANGE_REMOVE) {
4585 			TAILQ_REMOVE(&pool->list, oldpa, entries);
4586 			switch (oldpa->addr.type) {
4587 			case PF_ADDR_DYNIFTL:
4588 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
4589 				break;
4590 			case PF_ADDR_TABLE:
4591 				pfr_detach_table(oldpa->addr.p.tbl);
4592 				break;
4593 			}
4594 			if (oldpa->kif)
4595 				pfi_kkif_unref(oldpa->kif);
4596 			free(oldpa, M_PFRULE);
4597 		} else {
4598 			if (oldpa == NULL)
4599 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
4600 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
4601 			    pca->action == PF_CHANGE_ADD_BEFORE)
4602 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
4603 			else
4604 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
4605 				    newpa, entries);
4606 		}
4607 
4608 		pool->cur = TAILQ_FIRST(&pool->list);
4609 		PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
4610 		PF_RULES_WUNLOCK();
4611 		break;
4612 
4613 #undef ERROUT
4614 DIOCCHANGEADDR_error:
4615 		if (newpa != NULL) {
4616 			if (newpa->kif)
4617 				pfi_kkif_unref(newpa->kif);
4618 			free(newpa, M_PFRULE);
4619 		}
4620 		PF_RULES_WUNLOCK();
4621 		pf_kkif_free(kif);
4622 		break;
4623 	}
4624 
4625 	case DIOCGETRULESETS: {
4626 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4627 
4628 		pr->path[sizeof(pr->path) - 1] = 0;
4629 
4630 		error = pf_ioctl_get_rulesets(pr);
4631 		break;
4632 	}
4633 
4634 	case DIOCGETRULESET: {
4635 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
4636 
4637 		pr->path[sizeof(pr->path) - 1] = 0;
4638 
4639 		error = pf_ioctl_get_ruleset(pr);
4640 		break;
4641 	}
4642 
4643 	case DIOCRCLRTABLES: {
4644 		struct pfioc_table *io = (struct pfioc_table *)addr;
4645 
4646 		if (io->pfrio_esize != 0) {
4647 			error = ENODEV;
4648 			break;
4649 		}
4650 		PF_RULES_WLOCK();
4651 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
4652 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4653 		PF_RULES_WUNLOCK();
4654 		break;
4655 	}
4656 
4657 	case DIOCRADDTABLES: {
4658 		struct pfioc_table *io = (struct pfioc_table *)addr;
4659 		struct pfr_table *pfrts;
4660 		size_t totlen;
4661 
4662 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4663 			error = ENODEV;
4664 			break;
4665 		}
4666 
4667 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4668 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4669 			error = ENOMEM;
4670 			break;
4671 		}
4672 
4673 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4674 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4675 		    M_TEMP, M_WAITOK);
4676 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4677 		if (error) {
4678 			free(pfrts, M_TEMP);
4679 			break;
4680 		}
4681 		PF_RULES_WLOCK();
4682 		error = pfr_add_tables(pfrts, io->pfrio_size,
4683 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4684 		PF_RULES_WUNLOCK();
4685 		free(pfrts, M_TEMP);
4686 		break;
4687 	}
4688 
4689 	case DIOCRDELTABLES: {
4690 		struct pfioc_table *io = (struct pfioc_table *)addr;
4691 		struct pfr_table *pfrts;
4692 		size_t totlen;
4693 
4694 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4695 			error = ENODEV;
4696 			break;
4697 		}
4698 
4699 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4700 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4701 			error = ENOMEM;
4702 			break;
4703 		}
4704 
4705 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4706 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4707 		    M_TEMP, M_WAITOK);
4708 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4709 		if (error) {
4710 			free(pfrts, M_TEMP);
4711 			break;
4712 		}
4713 		PF_RULES_WLOCK();
4714 		error = pfr_del_tables(pfrts, io->pfrio_size,
4715 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4716 		PF_RULES_WUNLOCK();
4717 		free(pfrts, M_TEMP);
4718 		break;
4719 	}
4720 
4721 	case DIOCRGETTABLES: {
4722 		struct pfioc_table *io = (struct pfioc_table *)addr;
4723 		struct pfr_table *pfrts;
4724 		size_t totlen;
4725 		int n;
4726 
4727 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4728 			error = ENODEV;
4729 			break;
4730 		}
4731 		PF_RULES_RLOCK();
4732 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4733 		if (n < 0) {
4734 			PF_RULES_RUNLOCK();
4735 			error = EINVAL;
4736 			break;
4737 		}
4738 		io->pfrio_size = min(io->pfrio_size, n);
4739 
4740 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4741 
4742 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4743 		    M_TEMP, M_NOWAIT | M_ZERO);
4744 		if (pfrts == NULL) {
4745 			error = ENOMEM;
4746 			PF_RULES_RUNLOCK();
4747 			break;
4748 		}
4749 		error = pfr_get_tables(&io->pfrio_table, pfrts,
4750 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4751 		PF_RULES_RUNLOCK();
4752 		if (error == 0)
4753 			error = copyout(pfrts, io->pfrio_buffer, totlen);
4754 		free(pfrts, M_TEMP);
4755 		break;
4756 	}
4757 
4758 	case DIOCRGETTSTATS: {
4759 		struct pfioc_table *io = (struct pfioc_table *)addr;
4760 		struct pfr_tstats *pfrtstats;
4761 		size_t totlen;
4762 		int n;
4763 
4764 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
4765 			error = ENODEV;
4766 			break;
4767 		}
4768 		PF_TABLE_STATS_LOCK();
4769 		PF_RULES_RLOCK();
4770 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4771 		if (n < 0) {
4772 			PF_RULES_RUNLOCK();
4773 			PF_TABLE_STATS_UNLOCK();
4774 			error = EINVAL;
4775 			break;
4776 		}
4777 		io->pfrio_size = min(io->pfrio_size, n);
4778 
4779 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
4780 		pfrtstats = mallocarray(io->pfrio_size,
4781 		    sizeof(struct pfr_tstats), M_TEMP, M_NOWAIT | M_ZERO);
4782 		if (pfrtstats == NULL) {
4783 			error = ENOMEM;
4784 			PF_RULES_RUNLOCK();
4785 			PF_TABLE_STATS_UNLOCK();
4786 			break;
4787 		}
4788 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
4789 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4790 		PF_RULES_RUNLOCK();
4791 		PF_TABLE_STATS_UNLOCK();
4792 		if (error == 0)
4793 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
4794 		free(pfrtstats, M_TEMP);
4795 		break;
4796 	}
4797 
4798 	case DIOCRCLRTSTATS: {
4799 		struct pfioc_table *io = (struct pfioc_table *)addr;
4800 		struct pfr_table *pfrts;
4801 		size_t totlen;
4802 
4803 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4804 			error = ENODEV;
4805 			break;
4806 		}
4807 
4808 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
4809 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
4810 			/* We used to count tables and use the minimum required
4811 			 * size, so we didn't fail on overly large requests.
4812 			 * Keep doing so. */
4813 			io->pfrio_size = pf_ioctl_maxcount;
4814 			break;
4815 		}
4816 
4817 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4818 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4819 		    M_TEMP, M_WAITOK);
4820 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4821 		if (error) {
4822 			free(pfrts, M_TEMP);
4823 			break;
4824 		}
4825 
4826 		PF_TABLE_STATS_LOCK();
4827 		PF_RULES_RLOCK();
4828 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
4829 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4830 		PF_RULES_RUNLOCK();
4831 		PF_TABLE_STATS_UNLOCK();
4832 		free(pfrts, M_TEMP);
4833 		break;
4834 	}
4835 
4836 	case DIOCRSETTFLAGS: {
4837 		struct pfioc_table *io = (struct pfioc_table *)addr;
4838 		struct pfr_table *pfrts;
4839 		size_t totlen;
4840 		int n;
4841 
4842 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
4843 			error = ENODEV;
4844 			break;
4845 		}
4846 
4847 		PF_RULES_RLOCK();
4848 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
4849 		if (n < 0) {
4850 			PF_RULES_RUNLOCK();
4851 			error = EINVAL;
4852 			break;
4853 		}
4854 
4855 		io->pfrio_size = min(io->pfrio_size, n);
4856 		PF_RULES_RUNLOCK();
4857 
4858 		totlen = io->pfrio_size * sizeof(struct pfr_table);
4859 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
4860 		    M_TEMP, M_WAITOK);
4861 		error = copyin(io->pfrio_buffer, pfrts, totlen);
4862 		if (error) {
4863 			free(pfrts, M_TEMP);
4864 			break;
4865 		}
4866 		PF_RULES_WLOCK();
4867 		error = pfr_set_tflags(pfrts, io->pfrio_size,
4868 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
4869 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
4870 		PF_RULES_WUNLOCK();
4871 		free(pfrts, M_TEMP);
4872 		break;
4873 	}
4874 
4875 	case DIOCRCLRADDRS: {
4876 		struct pfioc_table *io = (struct pfioc_table *)addr;
4877 
4878 		if (io->pfrio_esize != 0) {
4879 			error = ENODEV;
4880 			break;
4881 		}
4882 		PF_RULES_WLOCK();
4883 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
4884 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
4885 		PF_RULES_WUNLOCK();
4886 		break;
4887 	}
4888 
4889 	case DIOCRADDADDRS: {
4890 		struct pfioc_table *io = (struct pfioc_table *)addr;
4891 		struct pfr_addr *pfras;
4892 		size_t totlen;
4893 
4894 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4895 			error = ENODEV;
4896 			break;
4897 		}
4898 		if (io->pfrio_size < 0 ||
4899 		    io->pfrio_size > pf_ioctl_maxcount ||
4900 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4901 			error = EINVAL;
4902 			break;
4903 		}
4904 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4905 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4906 		    M_TEMP, M_WAITOK);
4907 		error = copyin(io->pfrio_buffer, pfras, totlen);
4908 		if (error) {
4909 			free(pfras, M_TEMP);
4910 			break;
4911 		}
4912 		PF_RULES_WLOCK();
4913 		error = pfr_add_addrs(&io->pfrio_table, pfras,
4914 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
4915 		    PFR_FLAG_USERIOCTL);
4916 		PF_RULES_WUNLOCK();
4917 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4918 			error = copyout(pfras, io->pfrio_buffer, totlen);
4919 		free(pfras, M_TEMP);
4920 		break;
4921 	}
4922 
4923 	case DIOCRDELADDRS: {
4924 		struct pfioc_table *io = (struct pfioc_table *)addr;
4925 		struct pfr_addr *pfras;
4926 		size_t totlen;
4927 
4928 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4929 			error = ENODEV;
4930 			break;
4931 		}
4932 		if (io->pfrio_size < 0 ||
4933 		    io->pfrio_size > pf_ioctl_maxcount ||
4934 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
4935 			error = EINVAL;
4936 			break;
4937 		}
4938 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
4939 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
4940 		    M_TEMP, M_WAITOK);
4941 		error = copyin(io->pfrio_buffer, pfras, totlen);
4942 		if (error) {
4943 			free(pfras, M_TEMP);
4944 			break;
4945 		}
4946 		PF_RULES_WLOCK();
4947 		error = pfr_del_addrs(&io->pfrio_table, pfras,
4948 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
4949 		    PFR_FLAG_USERIOCTL);
4950 		PF_RULES_WUNLOCK();
4951 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4952 			error = copyout(pfras, io->pfrio_buffer, totlen);
4953 		free(pfras, M_TEMP);
4954 		break;
4955 	}
4956 
4957 	case DIOCRSETADDRS: {
4958 		struct pfioc_table *io = (struct pfioc_table *)addr;
4959 		struct pfr_addr *pfras;
4960 		size_t totlen, count;
4961 
4962 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
4963 			error = ENODEV;
4964 			break;
4965 		}
4966 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
4967 			error = EINVAL;
4968 			break;
4969 		}
4970 		count = max(io->pfrio_size, io->pfrio_size2);
4971 		if (count > pf_ioctl_maxcount ||
4972 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
4973 			error = EINVAL;
4974 			break;
4975 		}
4976 		totlen = count * sizeof(struct pfr_addr);
4977 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_TEMP,
4978 		    M_WAITOK);
4979 		error = copyin(io->pfrio_buffer, pfras, totlen);
4980 		if (error) {
4981 			free(pfras, M_TEMP);
4982 			break;
4983 		}
4984 		PF_RULES_WLOCK();
4985 		error = pfr_set_addrs(&io->pfrio_table, pfras,
4986 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
4987 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
4988 		    PFR_FLAG_USERIOCTL, 0);
4989 		PF_RULES_WUNLOCK();
4990 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
4991 			error = copyout(pfras, io->pfrio_buffer, totlen);
4992 		free(pfras, M_TEMP);
4993 		break;
4994 	}
4995 
4996 	case DIOCRGETADDRS: {
4997 		struct pfioc_table *io = (struct pfioc_table *)addr;
4998 		struct pfr_addr *pfras;
4999 		size_t totlen;
5000 
5001 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5002 			error = ENODEV;
5003 			break;
5004 		}
5005 		if (io->pfrio_size < 0 ||
5006 		    io->pfrio_size > pf_ioctl_maxcount ||
5007 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5008 			error = EINVAL;
5009 			break;
5010 		}
5011 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5012 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5013 		    M_TEMP, M_WAITOK | M_ZERO);
5014 		PF_RULES_RLOCK();
5015 		error = pfr_get_addrs(&io->pfrio_table, pfras,
5016 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5017 		PF_RULES_RUNLOCK();
5018 		if (error == 0)
5019 			error = copyout(pfras, io->pfrio_buffer, totlen);
5020 		free(pfras, M_TEMP);
5021 		break;
5022 	}
5023 
5024 	case DIOCRGETASTATS: {
5025 		struct pfioc_table *io = (struct pfioc_table *)addr;
5026 		struct pfr_astats *pfrastats;
5027 		size_t totlen;
5028 
5029 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
5030 			error = ENODEV;
5031 			break;
5032 		}
5033 		if (io->pfrio_size < 0 ||
5034 		    io->pfrio_size > pf_ioctl_maxcount ||
5035 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
5036 			error = EINVAL;
5037 			break;
5038 		}
5039 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
5040 		pfrastats = mallocarray(io->pfrio_size,
5041 		    sizeof(struct pfr_astats), M_TEMP, M_WAITOK | M_ZERO);
5042 		PF_RULES_RLOCK();
5043 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
5044 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5045 		PF_RULES_RUNLOCK();
5046 		if (error == 0)
5047 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
5048 		free(pfrastats, M_TEMP);
5049 		break;
5050 	}
5051 
5052 	case DIOCRCLRASTATS: {
5053 		struct pfioc_table *io = (struct pfioc_table *)addr;
5054 		struct pfr_addr *pfras;
5055 		size_t totlen;
5056 
5057 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5058 			error = ENODEV;
5059 			break;
5060 		}
5061 		if (io->pfrio_size < 0 ||
5062 		    io->pfrio_size > pf_ioctl_maxcount ||
5063 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5064 			error = EINVAL;
5065 			break;
5066 		}
5067 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5068 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5069 		    M_TEMP, M_WAITOK);
5070 		error = copyin(io->pfrio_buffer, pfras, totlen);
5071 		if (error) {
5072 			free(pfras, M_TEMP);
5073 			break;
5074 		}
5075 		PF_RULES_WLOCK();
5076 		error = pfr_clr_astats(&io->pfrio_table, pfras,
5077 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
5078 		    PFR_FLAG_USERIOCTL);
5079 		PF_RULES_WUNLOCK();
5080 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5081 			error = copyout(pfras, io->pfrio_buffer, totlen);
5082 		free(pfras, M_TEMP);
5083 		break;
5084 	}
5085 
5086 	case DIOCRTSTADDRS: {
5087 		struct pfioc_table *io = (struct pfioc_table *)addr;
5088 		struct pfr_addr *pfras;
5089 		size_t totlen;
5090 
5091 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5092 			error = ENODEV;
5093 			break;
5094 		}
5095 		if (io->pfrio_size < 0 ||
5096 		    io->pfrio_size > pf_ioctl_maxcount ||
5097 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5098 			error = EINVAL;
5099 			break;
5100 		}
5101 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5102 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5103 		    M_TEMP, M_WAITOK);
5104 		error = copyin(io->pfrio_buffer, pfras, totlen);
5105 		if (error) {
5106 			free(pfras, M_TEMP);
5107 			break;
5108 		}
5109 		PF_RULES_RLOCK();
5110 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
5111 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
5112 		    PFR_FLAG_USERIOCTL);
5113 		PF_RULES_RUNLOCK();
5114 		if (error == 0)
5115 			error = copyout(pfras, io->pfrio_buffer, totlen);
5116 		free(pfras, M_TEMP);
5117 		break;
5118 	}
5119 
5120 	case DIOCRINADEFINE: {
5121 		struct pfioc_table *io = (struct pfioc_table *)addr;
5122 		struct pfr_addr *pfras;
5123 		size_t totlen;
5124 
5125 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5126 			error = ENODEV;
5127 			break;
5128 		}
5129 		if (io->pfrio_size < 0 ||
5130 		    io->pfrio_size > pf_ioctl_maxcount ||
5131 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5132 			error = EINVAL;
5133 			break;
5134 		}
5135 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5136 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5137 		    M_TEMP, M_WAITOK);
5138 		error = copyin(io->pfrio_buffer, pfras, totlen);
5139 		if (error) {
5140 			free(pfras, M_TEMP);
5141 			break;
5142 		}
5143 		PF_RULES_WLOCK();
5144 		error = pfr_ina_define(&io->pfrio_table, pfras,
5145 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
5146 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5147 		PF_RULES_WUNLOCK();
5148 		free(pfras, M_TEMP);
5149 		break;
5150 	}
5151 
5152 	case DIOCOSFPADD: {
5153 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5154 		PF_RULES_WLOCK();
5155 		error = pf_osfp_add(io);
5156 		PF_RULES_WUNLOCK();
5157 		break;
5158 	}
5159 
5160 	case DIOCOSFPGET: {
5161 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
5162 		PF_RULES_RLOCK();
5163 		error = pf_osfp_get(io);
5164 		PF_RULES_RUNLOCK();
5165 		break;
5166 	}
5167 
5168 	case DIOCXBEGIN: {
5169 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5170 		struct pfioc_trans_e	*ioes, *ioe;
5171 		size_t			 totlen;
5172 		int			 i;
5173 
5174 		if (io->esize != sizeof(*ioe)) {
5175 			error = ENODEV;
5176 			break;
5177 		}
5178 		if (io->size < 0 ||
5179 		    io->size > pf_ioctl_maxcount ||
5180 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5181 			error = EINVAL;
5182 			break;
5183 		}
5184 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5185 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5186 		    M_TEMP, M_WAITOK);
5187 		error = copyin(io->array, ioes, totlen);
5188 		if (error) {
5189 			free(ioes, M_TEMP);
5190 			break;
5191 		}
5192 		PF_RULES_WLOCK();
5193 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5194 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5195 			switch (ioe->rs_num) {
5196 			case PF_RULESET_ETH:
5197 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
5198 					PF_RULES_WUNLOCK();
5199 					free(ioes, M_TEMP);
5200 					goto fail;
5201 				}
5202 				break;
5203 #ifdef ALTQ
5204 			case PF_RULESET_ALTQ:
5205 				if (ioe->anchor[0]) {
5206 					PF_RULES_WUNLOCK();
5207 					free(ioes, M_TEMP);
5208 					error = EINVAL;
5209 					goto fail;
5210 				}
5211 				if ((error = pf_begin_altq(&ioe->ticket))) {
5212 					PF_RULES_WUNLOCK();
5213 					free(ioes, M_TEMP);
5214 					goto fail;
5215 				}
5216 				break;
5217 #endif /* ALTQ */
5218 			case PF_RULESET_TABLE:
5219 			    {
5220 				struct pfr_table table;
5221 
5222 				bzero(&table, sizeof(table));
5223 				strlcpy(table.pfrt_anchor, ioe->anchor,
5224 				    sizeof(table.pfrt_anchor));
5225 				if ((error = pfr_ina_begin(&table,
5226 				    &ioe->ticket, NULL, 0))) {
5227 					PF_RULES_WUNLOCK();
5228 					free(ioes, M_TEMP);
5229 					goto fail;
5230 				}
5231 				break;
5232 			    }
5233 			default:
5234 				if ((error = pf_begin_rules(&ioe->ticket,
5235 				    ioe->rs_num, ioe->anchor))) {
5236 					PF_RULES_WUNLOCK();
5237 					free(ioes, M_TEMP);
5238 					goto fail;
5239 				}
5240 				break;
5241 			}
5242 		}
5243 		PF_RULES_WUNLOCK();
5244 		error = copyout(ioes, io->array, totlen);
5245 		free(ioes, M_TEMP);
5246 		break;
5247 	}
5248 
5249 	case DIOCXROLLBACK: {
5250 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5251 		struct pfioc_trans_e	*ioe, *ioes;
5252 		size_t			 totlen;
5253 		int			 i;
5254 
5255 		if (io->esize != sizeof(*ioe)) {
5256 			error = ENODEV;
5257 			break;
5258 		}
5259 		if (io->size < 0 ||
5260 		    io->size > pf_ioctl_maxcount ||
5261 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5262 			error = EINVAL;
5263 			break;
5264 		}
5265 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5266 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5267 		    M_TEMP, M_WAITOK);
5268 		error = copyin(io->array, ioes, totlen);
5269 		if (error) {
5270 			free(ioes, M_TEMP);
5271 			break;
5272 		}
5273 		PF_RULES_WLOCK();
5274 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5275 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
5276 			switch (ioe->rs_num) {
5277 			case PF_RULESET_ETH:
5278 				if ((error = pf_rollback_eth(ioe->ticket,
5279 				    ioe->anchor))) {
5280 					PF_RULES_WUNLOCK();
5281 					free(ioes, M_TEMP);
5282 					goto fail; /* really bad */
5283 				}
5284 				break;
5285 #ifdef ALTQ
5286 			case PF_RULESET_ALTQ:
5287 				if (ioe->anchor[0]) {
5288 					PF_RULES_WUNLOCK();
5289 					free(ioes, M_TEMP);
5290 					error = EINVAL;
5291 					goto fail;
5292 				}
5293 				if ((error = pf_rollback_altq(ioe->ticket))) {
5294 					PF_RULES_WUNLOCK();
5295 					free(ioes, M_TEMP);
5296 					goto fail; /* really bad */
5297 				}
5298 				break;
5299 #endif /* ALTQ */
5300 			case PF_RULESET_TABLE:
5301 			    {
5302 				struct pfr_table table;
5303 
5304 				bzero(&table, sizeof(table));
5305 				strlcpy(table.pfrt_anchor, ioe->anchor,
5306 				    sizeof(table.pfrt_anchor));
5307 				if ((error = pfr_ina_rollback(&table,
5308 				    ioe->ticket, NULL, 0))) {
5309 					PF_RULES_WUNLOCK();
5310 					free(ioes, M_TEMP);
5311 					goto fail; /* really bad */
5312 				}
5313 				break;
5314 			    }
5315 			default:
5316 				if ((error = pf_rollback_rules(ioe->ticket,
5317 				    ioe->rs_num, ioe->anchor))) {
5318 					PF_RULES_WUNLOCK();
5319 					free(ioes, M_TEMP);
5320 					goto fail; /* really bad */
5321 				}
5322 				break;
5323 			}
5324 		}
5325 		PF_RULES_WUNLOCK();
5326 		free(ioes, M_TEMP);
5327 		break;
5328 	}
5329 
5330 	case DIOCXCOMMIT: {
5331 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
5332 		struct pfioc_trans_e	*ioe, *ioes;
5333 		struct pf_kruleset	*rs;
5334 		struct pf_keth_ruleset	*ers;
5335 		size_t			 totlen;
5336 		int			 i;
5337 
5338 		if (io->esize != sizeof(*ioe)) {
5339 			error = ENODEV;
5340 			break;
5341 		}
5342 
5343 		if (io->size < 0 ||
5344 		    io->size > pf_ioctl_maxcount ||
5345 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
5346 			error = EINVAL;
5347 			break;
5348 		}
5349 
5350 		totlen = sizeof(struct pfioc_trans_e) * io->size;
5351 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
5352 		    M_TEMP, M_WAITOK);
5353 		error = copyin(io->array, ioes, totlen);
5354 		if (error) {
5355 			free(ioes, M_TEMP);
5356 			break;
5357 		}
5358 		PF_RULES_WLOCK();
5359 		/* First makes sure everything will succeed. */
5360 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5361 			ioe->anchor[sizeof(ioe->anchor) - 1] = 0;
5362 			switch (ioe->rs_num) {
5363 			case PF_RULESET_ETH:
5364 				ers = pf_find_keth_ruleset(ioe->anchor);
5365 				if (ers == NULL || ioe->ticket == 0 ||
5366 				    ioe->ticket != ers->inactive.ticket) {
5367 					PF_RULES_WUNLOCK();
5368 					free(ioes, M_TEMP);
5369 					error = EINVAL;
5370 					goto fail;
5371 				}
5372 				break;
5373 #ifdef ALTQ
5374 			case PF_RULESET_ALTQ:
5375 				if (ioe->anchor[0]) {
5376 					PF_RULES_WUNLOCK();
5377 					free(ioes, M_TEMP);
5378 					error = EINVAL;
5379 					goto fail;
5380 				}
5381 				if (!V_altqs_inactive_open || ioe->ticket !=
5382 				    V_ticket_altqs_inactive) {
5383 					PF_RULES_WUNLOCK();
5384 					free(ioes, M_TEMP);
5385 					error = EBUSY;
5386 					goto fail;
5387 				}
5388 				break;
5389 #endif /* ALTQ */
5390 			case PF_RULESET_TABLE:
5391 				rs = pf_find_kruleset(ioe->anchor);
5392 				if (rs == NULL || !rs->topen || ioe->ticket !=
5393 				    rs->tticket) {
5394 					PF_RULES_WUNLOCK();
5395 					free(ioes, M_TEMP);
5396 					error = EBUSY;
5397 					goto fail;
5398 				}
5399 				break;
5400 			default:
5401 				if (ioe->rs_num < 0 || ioe->rs_num >=
5402 				    PF_RULESET_MAX) {
5403 					PF_RULES_WUNLOCK();
5404 					free(ioes, M_TEMP);
5405 					error = EINVAL;
5406 					goto fail;
5407 				}
5408 				rs = pf_find_kruleset(ioe->anchor);
5409 				if (rs == NULL ||
5410 				    !rs->rules[ioe->rs_num].inactive.open ||
5411 				    rs->rules[ioe->rs_num].inactive.ticket !=
5412 				    ioe->ticket) {
5413 					PF_RULES_WUNLOCK();
5414 					free(ioes, M_TEMP);
5415 					error = EBUSY;
5416 					goto fail;
5417 				}
5418 				break;
5419 			}
5420 		}
5421 		/* Now do the commit - no errors should happen here. */
5422 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
5423 			switch (ioe->rs_num) {
5424 			case PF_RULESET_ETH:
5425 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
5426 					PF_RULES_WUNLOCK();
5427 					free(ioes, M_TEMP);
5428 					goto fail; /* really bad */
5429 				}
5430 				break;
5431 #ifdef ALTQ
5432 			case PF_RULESET_ALTQ:
5433 				if ((error = pf_commit_altq(ioe->ticket))) {
5434 					PF_RULES_WUNLOCK();
5435 					free(ioes, M_TEMP);
5436 					goto fail; /* really bad */
5437 				}
5438 				break;
5439 #endif /* ALTQ */
5440 			case PF_RULESET_TABLE:
5441 			    {
5442 				struct pfr_table table;
5443 
5444 				bzero(&table, sizeof(table));
5445 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
5446 				    sizeof(table.pfrt_anchor));
5447 				if ((error = pfr_ina_commit(&table,
5448 				    ioe->ticket, NULL, NULL, 0))) {
5449 					PF_RULES_WUNLOCK();
5450 					free(ioes, M_TEMP);
5451 					goto fail; /* really bad */
5452 				}
5453 				break;
5454 			    }
5455 			default:
5456 				if ((error = pf_commit_rules(ioe->ticket,
5457 				    ioe->rs_num, ioe->anchor))) {
5458 					PF_RULES_WUNLOCK();
5459 					free(ioes, M_TEMP);
5460 					goto fail; /* really bad */
5461 				}
5462 				break;
5463 			}
5464 		}
5465 		PF_RULES_WUNLOCK();
5466 
5467 		/* Only hook into EtherNet taffic if we've got rules for it. */
5468 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
5469 			hook_pf_eth();
5470 		else
5471 			dehook_pf_eth();
5472 
5473 		free(ioes, M_TEMP);
5474 		break;
5475 	}
5476 
5477 	case DIOCGETSRCNODES: {
5478 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
5479 		struct pf_srchash	*sh;
5480 		struct pf_ksrc_node	*n;
5481 		struct pf_src_node	*p, *pstore;
5482 		uint32_t		 i, nr = 0;
5483 
5484 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5485 				i++, sh++) {
5486 			PF_HASHROW_LOCK(sh);
5487 			LIST_FOREACH(n, &sh->nodes, entry)
5488 				nr++;
5489 			PF_HASHROW_UNLOCK(sh);
5490 		}
5491 
5492 		psn->psn_len = min(psn->psn_len,
5493 		    sizeof(struct pf_src_node) * nr);
5494 
5495 		if (psn->psn_len == 0) {
5496 			psn->psn_len = sizeof(struct pf_src_node) * nr;
5497 			break;
5498 		}
5499 
5500 		nr = 0;
5501 
5502 		p = pstore = malloc(psn->psn_len, M_TEMP, M_WAITOK | M_ZERO);
5503 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
5504 		    i++, sh++) {
5505 		    PF_HASHROW_LOCK(sh);
5506 		    LIST_FOREACH(n, &sh->nodes, entry) {
5507 
5508 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
5509 				break;
5510 
5511 			pf_src_node_copy(n, p);
5512 
5513 			p++;
5514 			nr++;
5515 		    }
5516 		    PF_HASHROW_UNLOCK(sh);
5517 		}
5518 		error = copyout(pstore, psn->psn_src_nodes,
5519 		    sizeof(struct pf_src_node) * nr);
5520 		if (error) {
5521 			free(pstore, M_TEMP);
5522 			break;
5523 		}
5524 		psn->psn_len = sizeof(struct pf_src_node) * nr;
5525 		free(pstore, M_TEMP);
5526 		break;
5527 	}
5528 
5529 	case DIOCCLRSRCNODES: {
5530 		pf_kill_srcnodes(NULL);
5531 		break;
5532 	}
5533 
5534 	case DIOCKILLSRCNODES:
5535 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
5536 		break;
5537 
5538 #ifdef COMPAT_FREEBSD13
5539 	case DIOCKEEPCOUNTERS_FREEBSD13:
5540 #endif
5541 	case DIOCKEEPCOUNTERS:
5542 		error = pf_keepcounters((struct pfioc_nv *)addr);
5543 		break;
5544 
5545 	case DIOCGETSYNCOOKIES:
5546 		error = pf_get_syncookies((struct pfioc_nv *)addr);
5547 		break;
5548 
5549 	case DIOCSETSYNCOOKIES:
5550 		error = pf_set_syncookies((struct pfioc_nv *)addr);
5551 		break;
5552 
5553 	case DIOCSETHOSTID: {
5554 		u_int32_t	*hostid = (u_int32_t *)addr;
5555 
5556 		PF_RULES_WLOCK();
5557 		if (*hostid == 0)
5558 			V_pf_status.hostid = arc4random();
5559 		else
5560 			V_pf_status.hostid = *hostid;
5561 		PF_RULES_WUNLOCK();
5562 		break;
5563 	}
5564 
5565 	case DIOCOSFPFLUSH:
5566 		PF_RULES_WLOCK();
5567 		pf_osfp_flush();
5568 		PF_RULES_WUNLOCK();
5569 		break;
5570 
5571 	case DIOCIGETIFACES: {
5572 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5573 		struct pfi_kif *ifstore;
5574 		size_t bufsiz;
5575 
5576 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
5577 			error = ENODEV;
5578 			break;
5579 		}
5580 
5581 		if (io->pfiio_size < 0 ||
5582 		    io->pfiio_size > pf_ioctl_maxcount ||
5583 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
5584 			error = EINVAL;
5585 			break;
5586 		}
5587 
5588 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5589 
5590 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
5591 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
5592 		    M_TEMP, M_WAITOK | M_ZERO);
5593 
5594 		PF_RULES_RLOCK();
5595 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
5596 		PF_RULES_RUNLOCK();
5597 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
5598 		free(ifstore, M_TEMP);
5599 		break;
5600 	}
5601 
5602 	case DIOCSETIFFLAG: {
5603 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5604 
5605 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5606 
5607 		PF_RULES_WLOCK();
5608 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
5609 		PF_RULES_WUNLOCK();
5610 		break;
5611 	}
5612 
5613 	case DIOCCLRIFFLAG: {
5614 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
5615 
5616 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
5617 
5618 		PF_RULES_WLOCK();
5619 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
5620 		PF_RULES_WUNLOCK();
5621 		break;
5622 	}
5623 
5624 	case DIOCSETREASS: {
5625 		u_int32_t	*reass = (u_int32_t *)addr;
5626 
5627 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
5628 		/* Removal of DF flag without reassembly enabled is not a
5629 		 * valid combination. Disable reassembly in such case. */
5630 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
5631 			V_pf_status.reass = 0;
5632 		break;
5633 	}
5634 
5635 	default:
5636 		error = ENODEV;
5637 		break;
5638 	}
5639 fail:
5640 	CURVNET_RESTORE();
5641 
5642 #undef ERROUT_IOCTL
5643 
5644 	return (error);
5645 }
5646 
5647 void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)5648 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
5649 {
5650 	bzero(sp, sizeof(union pfsync_state_union));
5651 
5652 	/* copy from state key */
5653 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5654 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5655 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5656 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5657 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5658 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5659 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5660 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5661 	sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
5662 	sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
5663 
5664 	/* copy from state */
5665 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
5666 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
5667 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
5668 	sp->pfs_1301.expire = pf_state_expires(st);
5669 	if (sp->pfs_1301.expire <= time_uptime)
5670 		sp->pfs_1301.expire = htonl(0);
5671 	else
5672 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
5673 
5674 	sp->pfs_1301.direction = st->direction;
5675 	sp->pfs_1301.log = st->act.log;
5676 	sp->pfs_1301.timeout = st->timeout;
5677 
5678 	switch (msg_version) {
5679 		case PFSYNC_MSG_VERSION_1301:
5680 			sp->pfs_1301.state_flags = st->state_flags;
5681 			break;
5682 		case PFSYNC_MSG_VERSION_1400:
5683 			sp->pfs_1400.state_flags = htons(st->state_flags);
5684 			sp->pfs_1400.qid = htons(st->act.qid);
5685 			sp->pfs_1400.pqid = htons(st->act.pqid);
5686 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
5687 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
5688 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
5689 			sp->pfs_1400.min_ttl = st->act.min_ttl;
5690 			sp->pfs_1400.set_tos = st->act.set_tos;
5691 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
5692 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
5693 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
5694 			sp->pfs_1400.rt = st->act.rt;
5695 			if (st->act.rt_kif)
5696 				strlcpy(sp->pfs_1400.rt_ifname,
5697 				    st->act.rt_kif->pfik_name,
5698 				    sizeof(sp->pfs_1400.rt_ifname));
5699 			break;
5700 		default:
5701 			panic("%s: Unsupported pfsync_msg_version %d",
5702 			    __func__, msg_version);
5703 	}
5704 
5705 	/*
5706 	 * XXX Why do we bother pfsyncing source node information if source
5707 	 * nodes are not synced? Showing users that there is source tracking
5708 	 * when there is none seems useless.
5709 	 */
5710 	if (st->sns[PF_SN_LIMIT] != NULL)
5711 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
5712 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
5713 		sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5714 
5715 	sp->pfs_1301.id = st->id;
5716 	sp->pfs_1301.creatorid = st->creatorid;
5717 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
5718 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
5719 
5720 	if (st->rule == NULL)
5721 		sp->pfs_1301.rule = htonl(-1);
5722 	else
5723 		sp->pfs_1301.rule = htonl(st->rule->nr);
5724 	if (st->anchor == NULL)
5725 		sp->pfs_1301.anchor = htonl(-1);
5726 	else
5727 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
5728 	if (st->nat_rule == NULL)
5729 		sp->pfs_1301.nat_rule = htonl(-1);
5730 	else
5731 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
5732 
5733 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
5734 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
5735 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
5736 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
5737 }
5738 
5739 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)5740 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
5741 {
5742 	bzero(sp, sizeof(*sp));
5743 
5744 	sp->version = PF_STATE_VERSION;
5745 
5746 	/* copy from state key */
5747 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
5748 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
5749 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
5750 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
5751 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
5752 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
5753 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
5754 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
5755 	sp->proto = st->key[PF_SK_WIRE]->proto;
5756 	sp->af = st->key[PF_SK_WIRE]->af;
5757 
5758 	/* copy from state */
5759 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
5760 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
5761 	    sizeof(sp->orig_ifname));
5762 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
5763 	sp->creation = htonl(time_uptime - (st->creation / 1000));
5764 	sp->expire = pf_state_expires(st);
5765 	if (sp->expire <= time_uptime)
5766 		sp->expire = htonl(0);
5767 	else
5768 		sp->expire = htonl(sp->expire - time_uptime);
5769 
5770 	sp->direction = st->direction;
5771 	sp->log = st->act.log;
5772 	sp->timeout = st->timeout;
5773 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
5774 	sp->state_flags_compat = st->state_flags;
5775 	sp->state_flags = htons(st->state_flags);
5776 	if (st->sns[PF_SN_LIMIT] != NULL)
5777 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
5778 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
5779 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
5780 	sp->id = st->id;
5781 	sp->creatorid = st->creatorid;
5782 	pf_state_peer_hton(&st->src, &sp->src);
5783 	pf_state_peer_hton(&st->dst, &sp->dst);
5784 
5785 	if (st->rule == NULL)
5786 		sp->rule = htonl(-1);
5787 	else
5788 		sp->rule = htonl(st->rule->nr);
5789 	if (st->anchor == NULL)
5790 		sp->anchor = htonl(-1);
5791 	else
5792 		sp->anchor = htonl(st->anchor->nr);
5793 	if (st->nat_rule == NULL)
5794 		sp->nat_rule = htonl(-1);
5795 	else
5796 		sp->nat_rule = htonl(st->nat_rule->nr);
5797 
5798 	sp->packets[0] = st->packets[0];
5799 	sp->packets[1] = st->packets[1];
5800 	sp->bytes[0] = st->bytes[0];
5801 	sp->bytes[1] = st->bytes[1];
5802 
5803 	sp->qid = htons(st->act.qid);
5804 	sp->pqid = htons(st->act.pqid);
5805 	sp->dnpipe = htons(st->act.dnpipe);
5806 	sp->dnrpipe = htons(st->act.dnrpipe);
5807 	sp->rtableid = htonl(st->act.rtableid);
5808 	sp->min_ttl = st->act.min_ttl;
5809 	sp->set_tos = st->act.set_tos;
5810 	sp->max_mss = htons(st->act.max_mss);
5811 	sp->rt = st->act.rt;
5812 	if (st->act.rt_kif)
5813 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
5814 		    sizeof(sp->rt_ifname));
5815 	sp->set_prio[0] = st->act.set_prio[0];
5816 	sp->set_prio[1] = st->act.set_prio[1];
5817 
5818 }
5819 
5820 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)5821 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
5822 {
5823 	struct pfr_ktable *kt;
5824 
5825 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
5826 
5827 	kt = aw->p.tbl;
5828 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
5829 		kt = kt->pfrkt_root;
5830 	aw->p.tbl = NULL;
5831 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
5832 		kt->pfrkt_cnt : -1;
5833 }
5834 
5835 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)5836 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
5837     size_t number, char **names)
5838 {
5839 	nvlist_t        *nvc;
5840 
5841 	nvc = nvlist_create(0);
5842 	if (nvc == NULL)
5843 		return (ENOMEM);
5844 
5845 	for (int i = 0; i < number; i++) {
5846 		nvlist_append_number_array(nvc, "counters",
5847 		    counter_u64_fetch(counters[i]));
5848 		nvlist_append_string_array(nvc, "names",
5849 		    names[i]);
5850 		nvlist_append_number_array(nvc, "ids",
5851 		    i);
5852 	}
5853 	nvlist_add_nvlist(nvl, name, nvc);
5854 	nvlist_destroy(nvc);
5855 
5856 	return (0);
5857 }
5858 
5859 static int
pf_getstatus(struct pfioc_nv * nv)5860 pf_getstatus(struct pfioc_nv *nv)
5861 {
5862 	nvlist_t        *nvl = NULL, *nvc = NULL;
5863 	void            *nvlpacked = NULL;
5864 	int              error;
5865 	struct pf_status s;
5866 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
5867 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
5868 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
5869 	time_t since;
5870 
5871 	PF_RULES_RLOCK_TRACKER;
5872 
5873 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
5874 
5875 	PF_RULES_RLOCK();
5876 
5877 	nvl = nvlist_create(0);
5878 	if (nvl == NULL)
5879 		ERROUT(ENOMEM);
5880 
5881 	since = time_second - (time_uptime - V_pf_status.since);
5882 
5883 	nvlist_add_bool(nvl, "running", V_pf_status.running);
5884 	nvlist_add_number(nvl, "since", since);
5885 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
5886 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
5887 	nvlist_add_number(nvl, "states", V_pf_status.states);
5888 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
5889 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
5890 	nvlist_add_bool(nvl, "syncookies_active",
5891 	    V_pf_status.syncookies_active);
5892 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
5893 
5894 	/* counters */
5895 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
5896 	    PFRES_MAX, pf_reasons);
5897 	if (error != 0)
5898 		ERROUT(error);
5899 
5900 	/* lcounters */
5901 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
5902 	    KLCNT_MAX, pf_lcounter);
5903 	if (error != 0)
5904 		ERROUT(error);
5905 
5906 	/* fcounters */
5907 	nvc = nvlist_create(0);
5908 	if (nvc == NULL)
5909 		ERROUT(ENOMEM);
5910 
5911 	for (int i = 0; i < FCNT_MAX; i++) {
5912 		nvlist_append_number_array(nvc, "counters",
5913 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
5914 		nvlist_append_string_array(nvc, "names",
5915 		    pf_fcounter[i]);
5916 		nvlist_append_number_array(nvc, "ids",
5917 		    i);
5918 	}
5919 	nvlist_add_nvlist(nvl, "fcounters", nvc);
5920 	nvlist_destroy(nvc);
5921 	nvc = NULL;
5922 
5923 	/* scounters */
5924 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
5925 	    SCNT_MAX, pf_fcounter);
5926 	if (error != 0)
5927 		ERROUT(error);
5928 
5929 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
5930 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
5931 	    PF_MD5_DIGEST_LENGTH);
5932 
5933 	pfi_update_status(V_pf_status.ifname, &s);
5934 
5935 	/* pcounters / bcounters */
5936 	for (int i = 0; i < 2; i++) {
5937 		for (int j = 0; j < 2; j++) {
5938 			for (int k = 0; k < 2; k++) {
5939 				nvlist_append_number_array(nvl, "pcounters",
5940 				    s.pcounters[i][j][k]);
5941 			}
5942 			nvlist_append_number_array(nvl, "bcounters",
5943 			    s.bcounters[i][j]);
5944 		}
5945 	}
5946 
5947 	nvlpacked = nvlist_pack(nvl, &nv->len);
5948 	if (nvlpacked == NULL)
5949 		ERROUT(ENOMEM);
5950 
5951 	if (nv->size == 0)
5952 		ERROUT(0);
5953 	else if (nv->size < nv->len)
5954 		ERROUT(ENOSPC);
5955 
5956 	PF_RULES_RUNLOCK();
5957 	error = copyout(nvlpacked, nv->data, nv->len);
5958 	goto done;
5959 
5960 #undef ERROUT
5961 errout:
5962 	PF_RULES_RUNLOCK();
5963 done:
5964 	free(nvlpacked, M_NVLIST);
5965 	nvlist_destroy(nvc);
5966 	nvlist_destroy(nvl);
5967 
5968 	return (error);
5969 }
5970 
5971 /*
5972  * XXX - Check for version mismatch!!!
5973  */
5974 static void
pf_clear_all_states(void)5975 pf_clear_all_states(void)
5976 {
5977 	struct epoch_tracker	 et;
5978 	struct pf_kstate	*s;
5979 	u_int i;
5980 
5981 	NET_EPOCH_ENTER(et);
5982 	for (i = 0; i <= V_pf_hashmask; i++) {
5983 		struct pf_idhash *ih = &V_pf_idhash[i];
5984 relock:
5985 		PF_HASHROW_LOCK(ih);
5986 		LIST_FOREACH(s, &ih->states, entry) {
5987 			s->timeout = PFTM_PURGE;
5988 			/* Don't send out individual delete messages. */
5989 			s->state_flags |= PFSTATE_NOSYNC;
5990 			pf_remove_state(s);
5991 			goto relock;
5992 		}
5993 		PF_HASHROW_UNLOCK(ih);
5994 	}
5995 	NET_EPOCH_EXIT(et);
5996 }
5997 
5998 static int
pf_clear_tables(void)5999 pf_clear_tables(void)
6000 {
6001 	struct pfioc_table io;
6002 	int error;
6003 
6004 	bzero(&io, sizeof(io));
6005 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
6006 
6007 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
6008 	    io.pfrio_flags);
6009 
6010 	return (error);
6011 }
6012 
6013 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)6014 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
6015 {
6016 	struct pf_ksrc_node_list	 kill;
6017 	u_int 				 killed;
6018 
6019 	LIST_INIT(&kill);
6020 	for (int i = 0; i <= V_pf_srchashmask; i++) {
6021 		struct pf_srchash *sh = &V_pf_srchash[i];
6022 		struct pf_ksrc_node *sn, *tmp;
6023 
6024 		PF_HASHROW_LOCK(sh);
6025 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
6026 			if (psnk == NULL ||
6027 			    (PF_MATCHA(psnk->psnk_src.neg,
6028 			      &psnk->psnk_src.addr.v.a.addr,
6029 			      &psnk->psnk_src.addr.v.a.mask,
6030 			      &sn->addr, sn->af) &&
6031 			    PF_MATCHA(psnk->psnk_dst.neg,
6032 			      &psnk->psnk_dst.addr.v.a.addr,
6033 			      &psnk->psnk_dst.addr.v.a.mask,
6034 			      &sn->raddr, sn->af))) {
6035 				pf_unlink_src_node(sn);
6036 				LIST_INSERT_HEAD(&kill, sn, entry);
6037 				sn->expire = 1;
6038 			}
6039 		PF_HASHROW_UNLOCK(sh);
6040 	}
6041 
6042 	for (int i = 0; i <= V_pf_hashmask; i++) {
6043 		struct pf_idhash *ih = &V_pf_idhash[i];
6044 		struct pf_kstate *s;
6045 
6046 		PF_HASHROW_LOCK(ih);
6047 		LIST_FOREACH(s, &ih->states, entry) {
6048 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
6049 			    sn_type++) {
6050 				if (s->sns[sn_type] &&
6051 				    s->sns[sn_type]->expire == 1) {
6052 					s->sns[sn_type] = NULL;
6053 				}
6054 			}
6055 		}
6056 		PF_HASHROW_UNLOCK(ih);
6057 	}
6058 
6059 	killed = pf_free_src_nodes(&kill);
6060 
6061 	if (psnk != NULL)
6062 		psnk->psnk_killed = killed;
6063 }
6064 
6065 static int
pf_keepcounters(struct pfioc_nv * nv)6066 pf_keepcounters(struct pfioc_nv *nv)
6067 {
6068 	nvlist_t	*nvl = NULL;
6069 	void		*nvlpacked = NULL;
6070 	int		 error = 0;
6071 
6072 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6073 
6074 	if (nv->len > pf_ioctl_maxcount)
6075 		ERROUT(ENOMEM);
6076 
6077 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6078 	error = copyin(nv->data, nvlpacked, nv->len);
6079 	if (error)
6080 		ERROUT(error);
6081 
6082 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6083 	if (nvl == NULL)
6084 		ERROUT(EBADMSG);
6085 
6086 	if (! nvlist_exists_bool(nvl, "keep_counters"))
6087 		ERROUT(EBADMSG);
6088 
6089 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
6090 
6091 on_error:
6092 	nvlist_destroy(nvl);
6093 	free(nvlpacked, M_NVLIST);
6094 	return (error);
6095 }
6096 
6097 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)6098 pf_clear_states(const struct pf_kstate_kill *kill)
6099 {
6100 	struct pf_state_key_cmp	 match_key;
6101 	struct pf_kstate	*s;
6102 	struct pfi_kkif	*kif;
6103 	int		 idx;
6104 	unsigned int	 killed = 0, dir;
6105 
6106 	NET_EPOCH_ASSERT();
6107 
6108 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
6109 		struct pf_idhash *ih = &V_pf_idhash[i];
6110 
6111 relock_DIOCCLRSTATES:
6112 		PF_HASHROW_LOCK(ih);
6113 		LIST_FOREACH(s, &ih->states, entry) {
6114 			/* For floating states look at the original kif. */
6115 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
6116 
6117 			if (kill->psk_ifname[0] &&
6118 			    strcmp(kill->psk_ifname,
6119 			    kif->pfik_name))
6120 				continue;
6121 
6122 			if (kill->psk_kill_match) {
6123 				bzero(&match_key, sizeof(match_key));
6124 
6125 				if (s->direction == PF_OUT) {
6126 					dir = PF_IN;
6127 					idx = PF_SK_STACK;
6128 				} else {
6129 					dir = PF_OUT;
6130 					idx = PF_SK_WIRE;
6131 				}
6132 
6133 				match_key.af = s->key[idx]->af;
6134 				match_key.proto = s->key[idx]->proto;
6135 				PF_ACPY(&match_key.addr[0],
6136 				    &s->key[idx]->addr[1], match_key.af);
6137 				match_key.port[0] = s->key[idx]->port[1];
6138 				PF_ACPY(&match_key.addr[1],
6139 				    &s->key[idx]->addr[0], match_key.af);
6140 				match_key.port[1] = s->key[idx]->port[0];
6141 			}
6142 
6143 			/*
6144 			 * Don't send out individual
6145 			 * delete messages.
6146 			 */
6147 			s->state_flags |= PFSTATE_NOSYNC;
6148 			pf_remove_state(s);
6149 			killed++;
6150 
6151 			if (kill->psk_kill_match)
6152 				killed += pf_kill_matching_state(&match_key,
6153 				    dir);
6154 
6155 			goto relock_DIOCCLRSTATES;
6156 		}
6157 		PF_HASHROW_UNLOCK(ih);
6158 	}
6159 
6160 	if (V_pfsync_clear_states_ptr != NULL)
6161 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
6162 
6163 	return (killed);
6164 }
6165 
6166 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)6167 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
6168 {
6169 	struct pf_kstate	*s;
6170 
6171 	NET_EPOCH_ASSERT();
6172 	if (kill->psk_pfcmp.id) {
6173 		if (kill->psk_pfcmp.creatorid == 0)
6174 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
6175 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
6176 		    kill->psk_pfcmp.creatorid))) {
6177 			pf_remove_state(s);
6178 			*killed = 1;
6179 		}
6180 		return;
6181 	}
6182 
6183 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
6184 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
6185 }
6186 
6187 static int
pf_killstates_nv(struct pfioc_nv * nv)6188 pf_killstates_nv(struct pfioc_nv *nv)
6189 {
6190 	struct pf_kstate_kill	 kill;
6191 	struct epoch_tracker	 et;
6192 	nvlist_t		*nvl = NULL;
6193 	void			*nvlpacked = NULL;
6194 	int			 error = 0;
6195 	unsigned int		 killed = 0;
6196 
6197 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6198 
6199 	if (nv->len > pf_ioctl_maxcount)
6200 		ERROUT(ENOMEM);
6201 
6202 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6203 	error = copyin(nv->data, nvlpacked, nv->len);
6204 	if (error)
6205 		ERROUT(error);
6206 
6207 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6208 	if (nvl == NULL)
6209 		ERROUT(EBADMSG);
6210 
6211 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6212 	if (error)
6213 		ERROUT(error);
6214 
6215 	NET_EPOCH_ENTER(et);
6216 	pf_killstates(&kill, &killed);
6217 	NET_EPOCH_EXIT(et);
6218 
6219 	free(nvlpacked, M_NVLIST);
6220 	nvlpacked = NULL;
6221 	nvlist_destroy(nvl);
6222 	nvl = nvlist_create(0);
6223 	if (nvl == NULL)
6224 		ERROUT(ENOMEM);
6225 
6226 	nvlist_add_number(nvl, "killed", killed);
6227 
6228 	nvlpacked = nvlist_pack(nvl, &nv->len);
6229 	if (nvlpacked == NULL)
6230 		ERROUT(ENOMEM);
6231 
6232 	if (nv->size == 0)
6233 		ERROUT(0);
6234 	else if (nv->size < nv->len)
6235 		ERROUT(ENOSPC);
6236 
6237 	error = copyout(nvlpacked, nv->data, nv->len);
6238 
6239 on_error:
6240 	nvlist_destroy(nvl);
6241 	free(nvlpacked, M_NVLIST);
6242 	return (error);
6243 }
6244 
6245 static int
pf_clearstates_nv(struct pfioc_nv * nv)6246 pf_clearstates_nv(struct pfioc_nv *nv)
6247 {
6248 	struct pf_kstate_kill	 kill;
6249 	struct epoch_tracker	 et;
6250 	nvlist_t		*nvl = NULL;
6251 	void			*nvlpacked = NULL;
6252 	int			 error = 0;
6253 	unsigned int		 killed;
6254 
6255 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
6256 
6257 	if (nv->len > pf_ioctl_maxcount)
6258 		ERROUT(ENOMEM);
6259 
6260 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6261 	error = copyin(nv->data, nvlpacked, nv->len);
6262 	if (error)
6263 		ERROUT(error);
6264 
6265 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6266 	if (nvl == NULL)
6267 		ERROUT(EBADMSG);
6268 
6269 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
6270 	if (error)
6271 		ERROUT(error);
6272 
6273 	NET_EPOCH_ENTER(et);
6274 	killed = pf_clear_states(&kill);
6275 	NET_EPOCH_EXIT(et);
6276 
6277 	free(nvlpacked, M_NVLIST);
6278 	nvlpacked = NULL;
6279 	nvlist_destroy(nvl);
6280 	nvl = nvlist_create(0);
6281 	if (nvl == NULL)
6282 		ERROUT(ENOMEM);
6283 
6284 	nvlist_add_number(nvl, "killed", killed);
6285 
6286 	nvlpacked = nvlist_pack(nvl, &nv->len);
6287 	if (nvlpacked == NULL)
6288 		ERROUT(ENOMEM);
6289 
6290 	if (nv->size == 0)
6291 		ERROUT(0);
6292 	else if (nv->size < nv->len)
6293 		ERROUT(ENOSPC);
6294 
6295 	error = copyout(nvlpacked, nv->data, nv->len);
6296 
6297 #undef ERROUT
6298 on_error:
6299 	nvlist_destroy(nvl);
6300 	free(nvlpacked, M_NVLIST);
6301 	return (error);
6302 }
6303 
6304 static int
pf_getstate(struct pfioc_nv * nv)6305 pf_getstate(struct pfioc_nv *nv)
6306 {
6307 	nvlist_t		*nvl = NULL, *nvls;
6308 	void			*nvlpacked = NULL;
6309 	struct pf_kstate	*s = NULL;
6310 	int			 error = 0;
6311 	uint64_t		 id, creatorid;
6312 
6313 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
6314 
6315 	if (nv->len > pf_ioctl_maxcount)
6316 		ERROUT(ENOMEM);
6317 
6318 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
6319 	error = copyin(nv->data, nvlpacked, nv->len);
6320 	if (error)
6321 		ERROUT(error);
6322 
6323 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
6324 	if (nvl == NULL)
6325 		ERROUT(EBADMSG);
6326 
6327 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
6328 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
6329 
6330 	s = pf_find_state_byid(id, creatorid);
6331 	if (s == NULL)
6332 		ERROUT(ENOENT);
6333 
6334 	free(nvlpacked, M_NVLIST);
6335 	nvlpacked = NULL;
6336 	nvlist_destroy(nvl);
6337 	nvl = nvlist_create(0);
6338 	if (nvl == NULL)
6339 		ERROUT(ENOMEM);
6340 
6341 	nvls = pf_state_to_nvstate(s);
6342 	if (nvls == NULL)
6343 		ERROUT(ENOMEM);
6344 
6345 	nvlist_add_nvlist(nvl, "state", nvls);
6346 	nvlist_destroy(nvls);
6347 
6348 	nvlpacked = nvlist_pack(nvl, &nv->len);
6349 	if (nvlpacked == NULL)
6350 		ERROUT(ENOMEM);
6351 
6352 	if (nv->size == 0)
6353 		ERROUT(0);
6354 	else if (nv->size < nv->len)
6355 		ERROUT(ENOSPC);
6356 
6357 	error = copyout(nvlpacked, nv->data, nv->len);
6358 
6359 #undef ERROUT
6360 errout:
6361 	if (s != NULL)
6362 		PF_STATE_UNLOCK(s);
6363 	free(nvlpacked, M_NVLIST);
6364 	nvlist_destroy(nvl);
6365 	return (error);
6366 }
6367 
6368 /*
6369  * XXX - Check for version mismatch!!!
6370  */
6371 
6372 /*
6373  * Duplicate pfctl -Fa operation to get rid of as much as we can.
6374  */
6375 static int
shutdown_pf(void)6376 shutdown_pf(void)
6377 {
6378 	int error = 0;
6379 	u_int32_t t[5];
6380 	char nn = '\0';
6381 	struct pf_kanchor *anchor;
6382 	struct pf_keth_anchor *eth_anchor;
6383 	int rs_num;
6384 
6385 	do {
6386 		/* Unlink rules of all user defined anchors */
6387 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors) {
6388 			/* Wildcard based anchors may not have a respective
6389 			 * explicit anchor rule or they may be left empty
6390 			 * without rules. It leads to anchor.refcnt=0, and the
6391 			 * rest of the logic does not expect it. */
6392 			if (anchor->refcnt == 0)
6393 				anchor->refcnt = 1;
6394 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6395 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
6396 				    anchor->path)) != 0) {
6397 					DPFPRINTF(PF_DEBUG_MISC, ("%s: "
6398 					    "anchor.path=%s rs_num=%d\n",
6399 					    __func__, anchor->path, rs_num));
6400 					goto error;	/* XXX: rollback? */
6401 				}
6402 			}
6403 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
6404 				error = pf_commit_rules(t[rs_num], rs_num,
6405 				    anchor->path);
6406 				MPASS(error == 0);
6407 			}
6408 		}
6409 
6410 		/* Unlink rules of all user defined ether anchors */
6411 		RB_FOREACH(eth_anchor, pf_keth_anchor_global,
6412 		    &V_pf_keth_anchors) {
6413 			/* Wildcard based anchors may not have a respective
6414 			 * explicit anchor rule or they may be left empty
6415 			 * without rules. It leads to anchor.refcnt=0, and the
6416 			 * rest of the logic does not expect it. */
6417 			if (eth_anchor->refcnt == 0)
6418 				eth_anchor->refcnt = 1;
6419 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
6420 			    != 0) {
6421 				DPFPRINTF(PF_DEBUG_MISC, ("%s: eth "
6422 				    "anchor.path=%s\n", __func__,
6423 				    eth_anchor->path));
6424 				goto error;
6425 			}
6426 			error = pf_commit_eth(t[0], eth_anchor->path);
6427 			MPASS(error == 0);
6428 		}
6429 
6430 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
6431 		    != 0) {
6432 			DPFPRINTF(PF_DEBUG_MISC, ("%s: SCRUB\n", __func__));
6433 			break;
6434 		}
6435 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
6436 		    != 0) {
6437 			DPFPRINTF(PF_DEBUG_MISC, ("%s: FILTER\n", __func__));
6438 			break;		/* XXX: rollback? */
6439 		}
6440 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
6441 		    != 0) {
6442 			DPFPRINTF(PF_DEBUG_MISC, ("%s: NAT\n", __func__));
6443 			break;		/* XXX: rollback? */
6444 		}
6445 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
6446 		    != 0) {
6447 			DPFPRINTF(PF_DEBUG_MISC, ("%s: BINAT\n", __func__));
6448 			break;		/* XXX: rollback? */
6449 		}
6450 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
6451 		    != 0) {
6452 			DPFPRINTF(PF_DEBUG_MISC, ("%s: RDR\n", __func__));
6453 			break;		/* XXX: rollback? */
6454 		}
6455 
6456 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
6457 		MPASS(error == 0);
6458 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
6459 		MPASS(error == 0);
6460 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
6461 		MPASS(error == 0);
6462 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
6463 		MPASS(error == 0);
6464 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
6465 		MPASS(error == 0);
6466 
6467 		if ((error = pf_clear_tables()) != 0)
6468 			break;
6469 
6470 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
6471 			DPFPRINTF(PF_DEBUG_MISC, ("%s: eth\n", __func__));
6472 			break;
6473 		}
6474 		error = pf_commit_eth(t[0], &nn);
6475 		MPASS(error == 0);
6476 
6477 #ifdef ALTQ
6478 		if ((error = pf_begin_altq(&t[0])) != 0) {
6479 			DPFPRINTF(PF_DEBUG_MISC, ("%s: ALTQ\n", __func__));
6480 			break;
6481 		}
6482 		pf_commit_altq(t[0]);
6483 #endif
6484 
6485 		pf_clear_all_states();
6486 
6487 		pf_kill_srcnodes(NULL);
6488 
6489 		/* status does not use malloced mem so no need to cleanup */
6490 		/* fingerprints and interfaces have their own cleanup code */
6491 	} while(0);
6492 
6493 error:
6494 	return (error);
6495 }
6496 
6497 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)6498 pf_check_return(int chk, struct mbuf **m)
6499 {
6500 
6501 	switch (chk) {
6502 	case PF_PASS:
6503 		if (*m == NULL)
6504 			return (PFIL_CONSUMED);
6505 		else
6506 			return (PFIL_PASS);
6507 		break;
6508 	default:
6509 		if (*m != NULL) {
6510 			m_freem(*m);
6511 			*m = NULL;
6512 		}
6513 		return (PFIL_DROPPED);
6514 	}
6515 }
6516 
6517 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6518 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6519     void *ruleset __unused, struct inpcb *inp)
6520 {
6521 	int chk;
6522 
6523 	CURVNET_ASSERT_SET();
6524 
6525 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
6526 
6527 	return (pf_check_return(chk, m));
6528 }
6529 
6530 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6531 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6532     void *ruleset __unused, struct inpcb *inp)
6533 {
6534 	int chk;
6535 
6536 	CURVNET_ASSERT_SET();
6537 
6538 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
6539 
6540 	return (pf_check_return(chk, m));
6541 }
6542 
6543 #ifdef INET
6544 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6545 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
6546     void *ruleset __unused, struct inpcb *inp)
6547 {
6548 	int chk;
6549 
6550 	CURVNET_ASSERT_SET();
6551 
6552 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
6553 
6554 	return (pf_check_return(chk, m));
6555 }
6556 
6557 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6558 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
6559     void *ruleset __unused,  struct inpcb *inp)
6560 {
6561 	int chk;
6562 
6563 	CURVNET_ASSERT_SET();
6564 
6565 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
6566 
6567 	return (pf_check_return(chk, m));
6568 }
6569 #endif
6570 
6571 #ifdef INET6
6572 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6573 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
6574     void *ruleset __unused,  struct inpcb *inp)
6575 {
6576 	int chk;
6577 
6578 	CURVNET_ASSERT_SET();
6579 
6580 	/*
6581 	 * In case of loopback traffic IPv6 uses the real interface in
6582 	 * order to support scoped addresses. In order to support stateful
6583 	 * filtering we have change this to lo0 as it is the case in IPv4.
6584 	 */
6585 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
6586 	    m, inp, NULL);
6587 
6588 	return (pf_check_return(chk, m));
6589 }
6590 
6591 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)6592 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
6593     void *ruleset __unused,  struct inpcb *inp)
6594 {
6595 	int chk;
6596 
6597 	CURVNET_ASSERT_SET();
6598 
6599 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
6600 
6601 	return (pf_check_return(chk, m));
6602 }
6603 #endif /* INET6 */
6604 
6605 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
6606 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
6607 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
6608 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
6609 
6610 #ifdef INET
6611 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
6612 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
6613 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
6614 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
6615 #endif
6616 #ifdef INET6
6617 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
6618 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
6619 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
6620 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
6621 #endif
6622 
6623 static void
hook_pf_eth(void)6624 hook_pf_eth(void)
6625 {
6626 	struct pfil_hook_args pha = {
6627 		.pa_version = PFIL_VERSION,
6628 		.pa_modname = "pf",
6629 		.pa_type = PFIL_TYPE_ETHERNET,
6630 	};
6631 	struct pfil_link_args pla = {
6632 		.pa_version = PFIL_VERSION,
6633 	};
6634 	int ret __diagused;
6635 
6636 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
6637 		return;
6638 
6639 	pha.pa_mbuf_chk = pf_eth_check_in;
6640 	pha.pa_flags = PFIL_IN;
6641 	pha.pa_rulname = "eth-in";
6642 	V_pf_eth_in_hook = pfil_add_hook(&pha);
6643 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6644 	pla.pa_head = V_link_pfil_head;
6645 	pla.pa_hook = V_pf_eth_in_hook;
6646 	ret = pfil_link(&pla);
6647 	MPASS(ret == 0);
6648 	pha.pa_mbuf_chk = pf_eth_check_out;
6649 	pha.pa_flags = PFIL_OUT;
6650 	pha.pa_rulname = "eth-out";
6651 	V_pf_eth_out_hook = pfil_add_hook(&pha);
6652 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6653 	pla.pa_head = V_link_pfil_head;
6654 	pla.pa_hook = V_pf_eth_out_hook;
6655 	ret = pfil_link(&pla);
6656 	MPASS(ret == 0);
6657 
6658 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
6659 }
6660 
6661 static void
hook_pf(void)6662 hook_pf(void)
6663 {
6664 	struct pfil_hook_args pha = {
6665 		.pa_version = PFIL_VERSION,
6666 		.pa_modname = "pf",
6667 	};
6668 	struct pfil_link_args pla = {
6669 		.pa_version = PFIL_VERSION,
6670 	};
6671 	int ret __diagused;
6672 
6673 	if (atomic_load_bool(&V_pf_pfil_hooked))
6674 		return;
6675 
6676 #ifdef INET
6677 	pha.pa_type = PFIL_TYPE_IP4;
6678 	pha.pa_mbuf_chk = pf_check_in;
6679 	pha.pa_flags = PFIL_IN;
6680 	pha.pa_rulname = "default-in";
6681 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
6682 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6683 	pla.pa_head = V_inet_pfil_head;
6684 	pla.pa_hook = V_pf_ip4_in_hook;
6685 	ret = pfil_link(&pla);
6686 	MPASS(ret == 0);
6687 	pha.pa_mbuf_chk = pf_check_out;
6688 	pha.pa_flags = PFIL_OUT;
6689 	pha.pa_rulname = "default-out";
6690 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
6691 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6692 	pla.pa_head = V_inet_pfil_head;
6693 	pla.pa_hook = V_pf_ip4_out_hook;
6694 	ret = pfil_link(&pla);
6695 	MPASS(ret == 0);
6696 	if (V_pf_filter_local) {
6697 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6698 		pla.pa_head = V_inet_local_pfil_head;
6699 		pla.pa_hook = V_pf_ip4_out_hook;
6700 		ret = pfil_link(&pla);
6701 		MPASS(ret == 0);
6702 	}
6703 #endif
6704 #ifdef INET6
6705 	pha.pa_type = PFIL_TYPE_IP6;
6706 	pha.pa_mbuf_chk = pf_check6_in;
6707 	pha.pa_flags = PFIL_IN;
6708 	pha.pa_rulname = "default-in6";
6709 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
6710 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
6711 	pla.pa_head = V_inet6_pfil_head;
6712 	pla.pa_hook = V_pf_ip6_in_hook;
6713 	ret = pfil_link(&pla);
6714 	MPASS(ret == 0);
6715 	pha.pa_mbuf_chk = pf_check6_out;
6716 	pha.pa_rulname = "default-out6";
6717 	pha.pa_flags = PFIL_OUT;
6718 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
6719 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6720 	pla.pa_head = V_inet6_pfil_head;
6721 	pla.pa_hook = V_pf_ip6_out_hook;
6722 	ret = pfil_link(&pla);
6723 	MPASS(ret == 0);
6724 	if (V_pf_filter_local) {
6725 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
6726 		pla.pa_head = V_inet6_local_pfil_head;
6727 		pla.pa_hook = V_pf_ip6_out_hook;
6728 		ret = pfil_link(&pla);
6729 		MPASS(ret == 0);
6730 	}
6731 #endif
6732 
6733 	atomic_store_bool(&V_pf_pfil_hooked, true);
6734 }
6735 
6736 static void
dehook_pf_eth(void)6737 dehook_pf_eth(void)
6738 {
6739 
6740 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
6741 		return;
6742 
6743 	pfil_remove_hook(V_pf_eth_in_hook);
6744 	pfil_remove_hook(V_pf_eth_out_hook);
6745 
6746 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
6747 }
6748 
6749 static void
dehook_pf(void)6750 dehook_pf(void)
6751 {
6752 
6753 	if (!atomic_load_bool(&V_pf_pfil_hooked))
6754 		return;
6755 
6756 #ifdef INET
6757 	pfil_remove_hook(V_pf_ip4_in_hook);
6758 	pfil_remove_hook(V_pf_ip4_out_hook);
6759 #endif
6760 #ifdef INET6
6761 	pfil_remove_hook(V_pf_ip6_in_hook);
6762 	pfil_remove_hook(V_pf_ip6_out_hook);
6763 #endif
6764 
6765 	atomic_store_bool(&V_pf_pfil_hooked, false);
6766 }
6767 
6768 static void
pf_load_vnet(void)6769 pf_load_vnet(void)
6770 {
6771 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
6772 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
6773 
6774 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
6775 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
6776 
6777 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
6778 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
6779 #ifdef ALTQ
6780 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
6781 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
6782 #endif
6783 
6784 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
6785 
6786 	pfattach_vnet();
6787 	V_pf_vnet_active = 1;
6788 }
6789 
6790 static int
pf_load(void)6791 pf_load(void)
6792 {
6793 	int error;
6794 
6795 	sx_init(&pf_end_lock, "pf end thread");
6796 
6797 	pf_mtag_initialize();
6798 
6799 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
6800 	if (pf_dev == NULL)
6801 		return (ENOMEM);
6802 
6803 	pf_end_threads = 0;
6804 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
6805 	if (error != 0)
6806 		return (error);
6807 
6808 	pfi_initialize();
6809 
6810 	return (0);
6811 }
6812 
6813 static void
pf_unload_vnet(void)6814 pf_unload_vnet(void)
6815 {
6816 	int ret __diagused;
6817 
6818 	V_pf_vnet_active = 0;
6819 	V_pf_status.running = 0;
6820 	dehook_pf();
6821 	dehook_pf_eth();
6822 
6823 	PF_RULES_WLOCK();
6824 	pf_syncookies_cleanup();
6825 	shutdown_pf();
6826 	PF_RULES_WUNLOCK();
6827 
6828 	ret = swi_remove(V_pf_swi_cookie);
6829 	MPASS(ret == 0);
6830 	ret = intr_event_destroy(V_pf_swi_ie);
6831 	MPASS(ret == 0);
6832 
6833 	pf_unload_vnet_purge();
6834 
6835 	pf_normalize_cleanup();
6836 	PF_RULES_WLOCK();
6837 	pfi_cleanup_vnet();
6838 	PF_RULES_WUNLOCK();
6839 	pfr_cleanup();
6840 	pf_osfp_flush();
6841 	pf_cleanup();
6842 	if (IS_DEFAULT_VNET(curvnet))
6843 		pf_mtag_cleanup();
6844 
6845 	pf_cleanup_tagset(&V_pf_tags);
6846 #ifdef ALTQ
6847 	pf_cleanup_tagset(&V_pf_qids);
6848 #endif
6849 	uma_zdestroy(V_pf_tag_z);
6850 
6851 #ifdef PF_WANT_32_TO_64_COUNTER
6852 	PF_RULES_WLOCK();
6853 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
6854 
6855 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
6856 	MPASS(V_pf_allkifcount == 0);
6857 
6858 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
6859 	V_pf_allrulecount--;
6860 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
6861 
6862 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
6863 	MPASS(V_pf_allrulecount == 0);
6864 
6865 	PF_RULES_WUNLOCK();
6866 
6867 	free(V_pf_kifmarker, PFI_MTYPE);
6868 	free(V_pf_rulemarker, M_PFRULE);
6869 #endif
6870 
6871 	/* Free counters last as we updated them during shutdown. */
6872 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
6873 	for (int i = 0; i < 2; i++) {
6874 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
6875 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
6876 	}
6877 	counter_u64_free(V_pf_default_rule.states_cur);
6878 	counter_u64_free(V_pf_default_rule.states_tot);
6879 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
6880 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
6881 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
6882 
6883 	for (int i = 0; i < PFRES_MAX; i++)
6884 		counter_u64_free(V_pf_status.counters[i]);
6885 	for (int i = 0; i < KLCNT_MAX; i++)
6886 		counter_u64_free(V_pf_status.lcounters[i]);
6887 	for (int i = 0; i < FCNT_MAX; i++)
6888 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
6889 	for (int i = 0; i < SCNT_MAX; i++)
6890 		counter_u64_free(V_pf_status.scounters[i]);
6891 
6892 	rm_destroy(&V_pf_rules_lock);
6893 	sx_destroy(&V_pf_ioctl_lock);
6894 }
6895 
6896 static void
pf_unload(void)6897 pf_unload(void)
6898 {
6899 
6900 	sx_xlock(&pf_end_lock);
6901 	pf_end_threads = 1;
6902 	while (pf_end_threads < 2) {
6903 		wakeup_one(pf_purge_thread);
6904 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
6905 	}
6906 	sx_xunlock(&pf_end_lock);
6907 
6908 	pf_nl_unregister();
6909 
6910 	if (pf_dev != NULL)
6911 		destroy_dev(pf_dev);
6912 
6913 	pfi_cleanup();
6914 
6915 	sx_destroy(&pf_end_lock);
6916 }
6917 
6918 static void
vnet_pf_init(void * unused __unused)6919 vnet_pf_init(void *unused __unused)
6920 {
6921 
6922 	pf_load_vnet();
6923 }
6924 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6925     vnet_pf_init, NULL);
6926 
6927 static void
vnet_pf_uninit(const void * unused __unused)6928 vnet_pf_uninit(const void *unused __unused)
6929 {
6930 
6931 	pf_unload_vnet();
6932 }
6933 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
6934 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
6935     vnet_pf_uninit, NULL);
6936 
6937 static int
pf_modevent(module_t mod,int type,void * data)6938 pf_modevent(module_t mod, int type, void *data)
6939 {
6940 	int error = 0;
6941 
6942 	switch(type) {
6943 	case MOD_LOAD:
6944 		error = pf_load();
6945 		pf_nl_register();
6946 		break;
6947 	case MOD_UNLOAD:
6948 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
6949 		 * the vnet_pf_uninit()s */
6950 		break;
6951 	default:
6952 		error = EINVAL;
6953 		break;
6954 	}
6955 
6956 	return (error);
6957 }
6958 
6959 static moduledata_t pf_mod = {
6960 	"pf",
6961 	pf_modevent,
6962 	0
6963 };
6964 
6965 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
6966 MODULE_DEPEND(pf, netlink, 1, 1, 1);
6967 MODULE_DEPEND(pf, crypto, 1, 1, 1);
6968 MODULE_VERSION(pf, PF_MODVER);
6969